From b477ca17fedca779a8b5895eef652096548474f2 Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 29 Sep 2016 13:19:39 +0300 Subject: [PATCH 01/23] bloom filter crate --- Cargo.lock | 5 + util/Cargo.toml | 1 + util/bloom/Cargo.toml | 9 ++ util/bloom/src/lib.rs | 240 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 255 insertions(+) create mode 100644 util/bloom/Cargo.toml create mode 100644 util/bloom/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 840684b2d..15cb17c88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -309,6 +309,10 @@ dependencies = [ "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethcore-bloom-journal" +version = "0.1.0" + [[package]] name = "ethcore-dapps" version = "1.4.0" @@ -528,6 +532,7 @@ dependencies = [ "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "ethcore-bigint 0.1.0", + "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/util/Cargo.toml b/util/Cargo.toml index 81916555c..520a4e003 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -34,6 +34,7 @@ using_queue = { path = "using_queue" } table = { path = "table" } ansi_term = "0.7" tiny-keccak= "1.0" +ethcore-bloom-journal = { path = "bloom" } [features] default = [] diff --git a/util/bloom/Cargo.toml b/util/bloom/Cargo.toml new file mode 100644 index 000000000..5397c691b --- /dev/null +++ b/util/bloom/Cargo.toml @@ -0,0 +1,9 @@ +[project] +name = "ethcore-bloom-journal" +version = "0.1.0" +authors = ["Ethcore"] +description = "Journaling bloom filter" +license = "GPL3" + +[lib] +path = "src/lib.rs" diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs new file mode 100644 index 000000000..b2b926166 --- /dev/null +++ b/util/bloom/src/lib.rs @@ -0,0 +1,240 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::cmp; +use std::f64; +use std::hash::{Hash, Hasher, SipHasher}; +use std::collections::HashSet; + +/// BitVec structure with journalling +/// Every time any of the blocks is getting set it's index is tracked +/// and can be then drained by `drain` method +struct BitVecJournal { + elems: Vec, + journal: HashSet, +} + +impl BitVecJournal { + pub fn new(size: usize) -> BitVecJournal { + let extra = if size % 8 > 0 { 1 } else { 0 }; + BitVecJournal { + elems: vec![0u64; size / 8 + extra], + journal: HashSet::new(), + } + } + + pub fn from_parts(parts: &[u64]) -> BitVecJournal { + BitVecJournal { + elems: parts.to_vec(), + journal: HashSet::new(), + } + } + + pub fn set(&mut self, index: usize) { + let e_index = index / 64; + let bit_index = index % 64; + let val = self.elems.get_mut(e_index).unwrap(); + *val |= 1u64 << bit_index; + self.journal.insert(e_index); + } + + pub fn get(&self, index: usize) -> bool { + let e_index = index / 64; + let bit_index = index % 64; + self.elems[e_index] & (1 << bit_index) != 0 + } + + pub fn drain(&mut self) -> Vec<(usize, u64)> { + let journal = self.journal.drain().collect::>(); + journal.iter().map(|idx| (*idx, self.elems[*idx])).collect::>() + } + + pub fn how_full(&self) -> f64 { + self.elems.iter().fold(0u64, |acc, e| acc + e.count_ones() as u64) as f64 / (self.elems.len() * 64) as f64 + } +} + +/// Bloom filter structure +pub struct Bloom { + bitmap: BitVecJournal, + bitmap_bits: u64, + k_num: u32, + sips: [SipHasher; 2], +} + +impl Bloom { + /// Create a new bloom filter structure. + /// bitmap_size is the size in bytes (not bits) that will be allocated in memory + /// items_count is an estimation of the maximum number of items to store. + pub fn new(bitmap_size: usize, items_count: usize) -> Bloom { + assert!(bitmap_size > 0 && items_count > 0); + let bitmap_bits = (bitmap_size as u64) * 8u64; + let k_num = Bloom::optimal_k_num(bitmap_bits, items_count); + let bitmap = BitVecJournal::new(bitmap_bits as usize); + let sips = [Bloom::sip_new(), Bloom::sip_new()]; + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + sips: sips, + } + } + + /// Initializes bloom filter from saved state + pub fn from_parts(parts: &[u64], k_num: u32) -> Bloom { + let bitmap_size = parts.len()*8; + let bitmap_bits = (bitmap_size as u64) * 8u64; + let bitmap = BitVecJournal::from_parts(parts); + let sips = [Bloom::sip_new(), Bloom::sip_new()]; + Bloom { + bitmap: bitmap, + bitmap_bits: bitmap_bits, + k_num: k_num, + sips: sips, + } + } + + /// Create a new bloom filter structure. + /// items_count is an estimation of the maximum number of items to store. + /// fp_p is the wanted rate of false positives, in ]0.0, 1.0[ + pub fn new_for_fp_rate(items_count: usize, fp_p: f64) -> Bloom { + let bitmap_size = Bloom::compute_bitmap_size(items_count, fp_p); + Bloom::new(bitmap_size, items_count) + } + + /// Compute a recommended bitmap size for items_count items + /// and a fp_p rate of false positives. + /// fp_p obviously has to be within the ]0.0, 1.0[ range. + pub fn compute_bitmap_size(items_count: usize, fp_p: f64) -> usize { + assert!(items_count > 0); + assert!(fp_p > 0.0 && fp_p < 1.0); + let log2 = f64::consts::LN_2; + let log2_2 = log2 * log2; + ((items_count as f64) * f64::ln(fp_p) / (-8.0 * log2_2)).ceil() as usize + } + + /// Records the presence of an item. + pub fn set(&mut self, item: T) + where T: Hash + { + let mut hashes = [0u64, 0u64]; + for k_i in 0..self.k_num { + let bit_offset = (self.bloom_hash(&mut hashes, &item, k_i) % self.bitmap_bits) as usize; + self.bitmap.set(bit_offset); + } + } + + /// Check if an item is present in the set. + /// There can be false positives, but no false negatives. + pub fn check(&self, item: T) -> bool + where T: Hash + { + let mut hashes = [0u64, 0u64]; + for k_i in 0..self.k_num { + let bit_offset = (self.bloom_hash(&mut hashes, &item, k_i) % self.bitmap_bits) as usize; + if !self.bitmap.get(bit_offset) { + return false; + } + } + true + } + + /// Return the number of bits in the filter + pub fn number_of_bits(&self) -> u64 { + self.bitmap_bits + } + + /// Return the number of hash functions used for `check` and `set` + pub fn number_of_hash_functions(&self) -> u32 { + self.k_num + } + + fn optimal_k_num(bitmap_bits: u64, items_count: usize) -> u32 { + let m = bitmap_bits as f64; + let n = items_count as f64; + let k_num = (m / n * f64::ln(2.0f64)).ceil() as u32; + cmp::max(k_num, 1) + } + + fn bloom_hash(&self, hashes: &mut [u64; 2], item: &T, k_i: u32) -> u64 + where T: Hash + { + if k_i < 2 { + let sip = &mut self.sips[k_i as usize].clone(); + item.hash(sip); + let hash = sip.finish(); + hashes[k_i as usize] = hash; + hash + } else { + hashes[0].wrapping_add((k_i as u64).wrapping_mul(hashes[1]) % 0xffffffffffffffc5) + } + } + + fn sip_new() -> SipHasher { + SipHasher::new() + } + + /// Drains the bloom journal returning the updated bloom part + pub fn drain_journal(&mut self) -> BloomJournal { + BloomJournal { + entries: self.bitmap.drain(), + hash_functions: self.k_num, + } + } + + /// Returns the ratio of set bits in the bloom filter to the total bits + pub fn how_full(&self) -> f64 { + self.bitmap.how_full() + } +} + +/// Bloom journal +/// Returns the tuple of (bloom part index, bloom part value) where each one is representing +/// an index of bloom parts that was updated since the last drain +pub struct BloomJournal { + pub hash_functions: u32, + pub entries: Vec<(usize, u64)>, +} + +#[test] +fn bloom_test_set() { + let mut bloom = Bloom::new(10, 80); + let key = vec![115u8, 99]; + assert!(!bloom.check(&key)); + bloom.set(&key); + assert!(bloom.check(&key)); +} + +#[test] +fn bloom_journalling() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + let drain = bloom.drain_journal(); + + assert_eq!(2, drain.entries.len()) +} + +#[test] +fn bloom_howfull() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + + let full = bloom.how_full(); + // 2/8/64 = 0.00390625 + assert!(full >= 0.0039f64 && full <= 0.004f64); +} From 59c0551ff468ed36e9c6e9dc450ff811978bd28c Mon Sep 17 00:00:00 2001 From: NikVolf Date: Thu, 29 Sep 2016 13:39:13 +0300 Subject: [PATCH 02/23] separate mod for tests --- util/bloom/src/lib.rs | 62 ++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs index b2b926166..9cf0e89f1 100644 --- a/util/bloom/src/lib.rs +++ b/util/bloom/src/lib.rs @@ -209,32 +209,38 @@ pub struct BloomJournal { pub entries: Vec<(usize, u64)>, } -#[test] -fn bloom_test_set() { - let mut bloom = Bloom::new(10, 80); - let key = vec![115u8, 99]; - assert!(!bloom.check(&key)); - bloom.set(&key); - assert!(bloom.check(&key)); -} - -#[test] -fn bloom_journalling() { - let initial = vec![0u64; 8]; - let mut bloom = Bloom::from_parts(&initial, 3); - bloom.set(&vec![5u8, 4]); - let drain = bloom.drain_journal(); - - assert_eq!(2, drain.entries.len()) -} - -#[test] -fn bloom_howfull() { - let initial = vec![0u64; 8]; - let mut bloom = Bloom::from_parts(&initial, 3); - bloom.set(&vec![5u8, 4]); - - let full = bloom.how_full(); - // 2/8/64 = 0.00390625 - assert!(full >= 0.0039f64 && full <= 0.004f64); + +#[cfg(test)] +mod tests { + use super::Bloom; + + #[test] + fn bloom_test_set() { + let mut bloom = Bloom::new(10, 80); + let key = vec![115u8, 99]; + assert!(!bloom.check(&key)); + bloom.set(&key); + assert!(bloom.check(&key)); + } + + #[test] + fn bloom_journalling() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + let drain = bloom.drain_journal(); + + assert_eq!(2, drain.entries.len()) + } + + #[test] + fn bloom_howfull() { + let initial = vec![0u64; 8]; + let mut bloom = Bloom::from_parts(&initial, 3); + bloom.set(&vec![5u8, 4]); + + let full = bloom.how_full(); + // 2/8/64 = 0.00390625 + assert!(full >= 0.0039f64 && full <= 0.004f64); + } } From 43e0970a54d8c879aaa87713ed0b6e0a7c4bce8e Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Fri, 30 Sep 2016 21:02:33 +0700 Subject: [PATCH 03/23] Update gitlab-ci add checksum --- .gitlab-ci.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6c3b5389b..0ead0a91a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,9 +19,11 @@ linux-stable: script: - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/checksum --body checksum tags: - rust - rust-stable @@ -40,9 +42,11 @@ linux-stable-14.04: script: - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/checksum --body checksum tags: - rust - rust-14.04 @@ -101,9 +105,11 @@ linux-centos: - export CC="gcc" - cargo build --release --verbose - strip target/release/parity + - md5sum target/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/checksum --body checksum tags: - rust - rust-centos @@ -127,9 +133,11 @@ linux-armv7: - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity + - md5sum target/armv7-unknown-linux-gnueabihf/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/checksum --body checksum tags: - rust - rust-arm @@ -154,9 +162,11 @@ linux-arm: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity + - md5sum target/arm-unknown-linux-gnueabihf/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/checksum --body checksum tags: - rust - rust-arm @@ -181,9 +191,11 @@ linux-armv6: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity + - md5sum target/arm-unknown-linux-gnueabi/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/checksum --body checksum tags: - rust - rust-arm @@ -208,9 +220,11 @@ linux-aarch64: - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity + - md5sum target/aarch64-unknown-linux-gnu/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/checksum --body checksum tags: - rust - rust-arm @@ -228,9 +242,11 @@ darwin: - stable script: - cargo build --release --verbose + - md5sum target/release/parity >> checksum - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/checksum --body checksum tags: - osx artifacts: @@ -250,10 +266,12 @@ windows: - set RUST_BACKTRACE=1 - rustup default stable-x86_64-pc-windows-msvc - cargo build --release --verbose + - md5sum target/release/parity >> checksum - aws configure set aws_access_key_id %s3_key% - aws configure set aws_secret_access_key %s3_secret% - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/checksum --body checksum tags: - rust-windows artifacts: From fa050246afbef8e259e06218615d594be7f9fdbf Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 30 Sep 2016 20:43:57 +0300 Subject: [PATCH 04/23] removed redundant memcopy --- util/bloom/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs index 9cf0e89f1..9d637965a 100644 --- a/util/bloom/src/lib.rs +++ b/util/bloom/src/lib.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::cmp; +use std::mem; use std::f64; use std::hash::{Hash, Hasher, SipHasher}; use std::collections::HashSet; @@ -58,8 +59,8 @@ impl BitVecJournal { } pub fn drain(&mut self) -> Vec<(usize, u64)> { - let journal = self.journal.drain().collect::>(); - journal.iter().map(|idx| (*idx, self.elems[*idx])).collect::>() + let journal = mem::replace(&mut self.journal, HashSet::new()).into_iter(); + journal.map(|idx| (idx, self.elems[idx])).collect::>() } pub fn how_full(&self) -> f64 { From 18630496d5a24a70843a19ef0898f30cdbc79bea Mon Sep 17 00:00:00 2001 From: NikVolf Date: Fri, 30 Sep 2016 21:02:16 +0300 Subject: [PATCH 05/23] asterisk space --- util/bloom/src/lib.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/util/bloom/src/lib.rs b/util/bloom/src/lib.rs index 9d637965a..582437651 100644 --- a/util/bloom/src/lib.rs +++ b/util/bloom/src/lib.rs @@ -63,7 +63,7 @@ impl BitVecJournal { journal.map(|idx| (idx, self.elems[idx])).collect::>() } - pub fn how_full(&self) -> f64 { + pub fn saturation(&self) -> f64 { self.elems.iter().fold(0u64, |acc, e| acc + e.count_ones() as u64) as f64 / (self.elems.len() * 64) as f64 } } @@ -96,7 +96,7 @@ impl Bloom { /// Initializes bloom filter from saved state pub fn from_parts(parts: &[u64], k_num: u32) -> Bloom { - let bitmap_size = parts.len()*8; + let bitmap_size = parts.len() * 8; let bitmap_bits = (bitmap_size as u64) * 8u64; let bitmap = BitVecJournal::from_parts(parts); let sips = [Bloom::sip_new(), Bloom::sip_new()]; @@ -197,8 +197,8 @@ impl Bloom { } /// Returns the ratio of set bits in the bloom filter to the total bits - pub fn how_full(&self) -> f64 { - self.bitmap.how_full() + pub fn saturation(&self) -> f64 { + self.bitmap.saturation() } } @@ -216,7 +216,7 @@ mod tests { use super::Bloom; #[test] - fn bloom_test_set() { + fn get_set() { let mut bloom = Bloom::new(10, 80); let key = vec![115u8, 99]; assert!(!bloom.check(&key)); @@ -225,7 +225,7 @@ mod tests { } #[test] - fn bloom_journalling() { + fn journalling() { let initial = vec![0u64; 8]; let mut bloom = Bloom::from_parts(&initial, 3); bloom.set(&vec![5u8, 4]); @@ -235,12 +235,12 @@ mod tests { } #[test] - fn bloom_howfull() { + fn saturation() { let initial = vec![0u64; 8]; let mut bloom = Bloom::from_parts(&initial, 3); bloom.set(&vec![5u8, 4]); - let full = bloom.how_full(); + let full = bloom.saturation(); // 2/8/64 = 0.00390625 assert!(full >= 0.0039f64 && full <= 0.004f64); } From 1029f8438cafc21d8a08883a8852f4b3df9b0b50 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 1 Oct 2016 16:33:19 +0400 Subject: [PATCH 06/23] using arc (#2420) --- ethcore/src/migrations/state/v7.rs | 9 +++++---- ethcore/src/migrations/v9.rs | 3 ++- parity/migration.rs | 5 +++-- util/src/migration/mod.rs | 13 +++++++------ util/src/migration/tests.rs | 2 +- 5 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ethcore/src/migrations/state/v7.rs b/ethcore/src/migrations/state/v7.rs index 9327decef..9af75a8ed 100644 --- a/ethcore/src/migrations/state/v7.rs +++ b/ethcore/src/migrations/state/v7.rs @@ -24,6 +24,7 @@ use util::{Address, FixedHash, H256}; use util::kvdb::Database; use util::migration::{Batch, Config, Error, Migration, SimpleMigration, Progress}; use util::sha3::Hashable; +use std::sync::Arc; use rlp::{decode, Rlp, RlpStream, Stream, View}; @@ -107,7 +108,7 @@ pub struct OverlayRecentV7 { impl OverlayRecentV7 { // walk all journal entries in the database backwards. // find migrations for any possible inserted keys. - fn walk_journal(&mut self, source: &Database) -> Result<(), Error> { + fn walk_journal(&mut self, source: Arc) -> Result<(), Error> { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { let mut era = decode::(&val); loop { @@ -151,7 +152,7 @@ impl OverlayRecentV7 { // walk all journal entries in the database backwards. // replace all possible inserted/deleted keys with their migrated counterparts // and commit the altered entries. - fn migrate_journal(&self, source: &Database, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { + fn migrate_journal(&self, source: Arc, mut batch: Batch, dest: &mut Database) -> Result<(), Error> { if let Some(val) = try!(source.get(None, V7_LATEST_ERA_KEY).map_err(Error::Custom)) { try!(batch.insert(V7_LATEST_ERA_KEY.into(), val.to_owned(), dest)); @@ -228,7 +229,7 @@ impl Migration for OverlayRecentV7 { // walk all records in the database, attempting to migrate any possible and // keeping records of those that we do. then migrate the journal using // this information. - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, col); // check version metadata. @@ -257,7 +258,7 @@ impl Migration for OverlayRecentV7 { try!(batch.insert(key, value.into_vec(), dest)); } - try!(self.walk_journal(source)); + try!(self.walk_journal(source.clone())); self.migrate_journal(source, batch, dest) } } diff --git a/ethcore/src/migrations/v9.rs b/ethcore/src/migrations/v9.rs index d4070d0c0..83729dc55 100644 --- a/ethcore/src/migrations/v9.rs +++ b/ethcore/src/migrations/v9.rs @@ -20,6 +20,7 @@ use rlp::{Rlp, RlpStream, View, Stream}; use util::kvdb::Database; use util::migration::{Batch, Config, Error, Migration, Progress}; +use std::sync::Arc; /// Which part of block to preserve pub enum Extract { @@ -55,7 +56,7 @@ impl Migration for ToV9 { fn version(&self) -> u32 { 9 } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, self.column); for (key, value) in source.iter(col) { diff --git a/parity/migration.rs b/parity/migration.rs index 66e1d8010..084ade676 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -19,6 +19,7 @@ use std::fs::File; use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; +use std::sync::Arc; use util::journaldb::Algorithm; use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError, Migration}; use util::kvdb::{CompactionProfile, Database, DatabaseConfig}; @@ -172,13 +173,13 @@ fn consolidate_database( let old_path_str = try!(old_db_path.to_str().ok_or(Error::MigrationImpossible)); let new_path_str = try!(new_db_path.to_str().ok_or(Error::MigrationImpossible)); - let cur_db = try!(Database::open(&db_config, old_path_str).map_err(db_error)); + let cur_db = Arc::new(try!(Database::open(&db_config, old_path_str).map_err(db_error))); // open new DB with proper number of columns db_config.columns = migration.columns(); let mut new_db = try!(Database::open(&db_config, new_path_str).map_err(db_error)); // Migrate to new database (default column only) - try!(migration.migrate(&cur_db, &config, &mut new_db, None)); + try!(migration.migrate(cur_db, &config, &mut new_db, None)); Ok(()) } diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index 0d34f4198..cd2b7fae1 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; use std::fs; use std::fmt; use std::path::{Path, PathBuf}; +use std::sync::Arc; use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; @@ -123,7 +124,7 @@ pub trait Migration: 'static { /// Version of the database after the migration. fn version(&self) -> u32; /// Migrate a source to a destination. - fn migrate(&mut self, source: &Database, config: &Config, destination: &mut Database, col: Option) -> Result<(), Error>; + fn migrate(&mut self, source: Arc, config: &Config, destination: &mut Database, col: Option) -> Result<(), Error>; } /// A simple migration over key-value pairs. @@ -142,7 +143,7 @@ impl Migration for T { fn version(&self) -> u32 { SimpleMigration::version(self) } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, col); for (key, value) in source.iter(col) { @@ -239,7 +240,7 @@ impl Manager { // start with the old db. let old_path_str = try!(old_path.to_str().ok_or(Error::MigrationImpossible)); - let mut cur_db = try!(Database::open(&db_config, old_path_str).map_err(Error::Custom)); + let mut cur_db = Arc::new(try!(Database::open(&db_config, old_path_str).map_err(Error::Custom))); for migration in migrations { // Change number of columns in new db @@ -254,16 +255,16 @@ impl Manager { // perform the migration from cur_db to new_db. match current_columns { // migrate only default column - None => try!(migration.migrate(&cur_db, &config, &mut new_db, None)), + None => try!(migration.migrate(cur_db.clone(), &config, &mut new_db, None)), Some(v) => { // Migrate all columns in previous DB for col in 0..v { - try!(migration.migrate(&cur_db, &config, &mut new_db, Some(col))) + try!(migration.migrate(cur_db.clone(), &config, &mut new_db, Some(col))) } } } // next iteration, we will migrate from this db into the other temp. - cur_db = new_db; + cur_db = Arc::new(new_db); temp_idx.swap(); // remove the other temporary migration database. diff --git a/util/src/migration/tests.rs b/util/src/migration/tests.rs index 05229bee5..57a5a9e32 100644 --- a/util/src/migration/tests.rs +++ b/util/src/migration/tests.rs @@ -91,7 +91,7 @@ impl Migration for AddsColumn { fn version(&self) -> u32 { 1 } - fn migrate(&mut self, source: &Database, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { let mut batch = Batch::new(config, col); for (key, value) in source.iter(col) { From b1d8b84eb96ebd8a8fe4da1014a344fcd8b43085 Mon Sep 17 00:00:00 2001 From: Jaco Greeff Date: Sat, 1 Oct 2016 15:16:23 +0200 Subject: [PATCH 07/23] update Morden registry address (#2417) --- ethcore/res/ethereum/morden.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/res/ethereum/morden.json b/ethcore/res/ethereum/morden.json index 0d643e4c0..ef18df97d 100644 --- a/ethcore/res/ethereum/morden.json +++ b/ethcore/res/ethereum/morden.json @@ -8,7 +8,7 @@ "difficultyBoundDivisor": "0x0800", "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", - "registrar": "0x8e4e9b13d4b45cb0befc93c3061b1408f67316b2", + "registrar": "0x52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d", "frontierCompatibilityModeLimit": "0x789b0" } } From 80afb78c7f41d8af5b4da757be0a25d772d958f4 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Sun, 2 Oct 2016 09:40:54 +0200 Subject: [PATCH 08/23] Disabling debug symbols due to rustc 1.12 memory usage --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 84edb6c1e..edcb145af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,6 @@ path = "parity/main.rs" name = "parity" [profile.release] -debug = true +debug = false lto = false From 0dcdaa7a2a51e92bca6977c71871af34330b8864 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Sun, 2 Oct 2016 18:45:36 +0200 Subject: [PATCH 09/23] Jumptable cache (#2427) * Jumptable cache * Updated registrar address --- ethcore/src/action_params.rs | 8 +- ethcore/src/client/client.rs | 2 +- ethcore/src/evm/ext.rs | 2 +- ethcore/src/evm/factory.rs | 22 ++-- ethcore/src/evm/interpreter/mod.rs | 47 +++---- ethcore/src/evm/interpreter/shared_cache.rs | 84 +++++++++++++ ethcore/src/evm/tests.rs | 80 ++++++------ ethcore/src/executive.rs | 24 ++-- ethcore/src/externalities.rs | 8 +- ethcore/src/json_tests/executive.rs | 4 +- ethcore/src/miner/miner.rs | 2 +- ethcore/src/state/account.rs | 130 ++++++++++---------- ethcore/src/state/mod.rs | 16 ++- ethcore/src/tests/client.rs | 2 +- ethcore/src/types/trace_types/trace.rs | 2 +- ethcore/src/verification/queue/kind.rs | 3 +- rpc/src/v1/tests/helpers/miner_service.rs | 2 +- util/src/misc.rs | 2 +- util/src/standard.rs | 2 +- 19 files changed, 266 insertions(+), 176 deletions(-) create mode 100644 ethcore/src/evm/interpreter/shared_cache.rs diff --git a/ethcore/src/action_params.rs b/ethcore/src/action_params.rs index 1886c3d36..46c159269 100644 --- a/ethcore/src/action_params.rs +++ b/ethcore/src/action_params.rs @@ -43,6 +43,8 @@ impl ActionValue { pub struct ActionParams { /// Address of currently executed code. pub code_address: Address, + /// Hash of currently executed code. + pub code_hash: H256, /// Receive address. Usually equal to code_address, /// except when called using CALLCODE. pub address: Address, @@ -57,7 +59,7 @@ pub struct ActionParams { /// Transaction value. pub value: ActionValue, /// Code being executed. - pub code: Option, + pub code: Option>, /// Input data. pub data: Option, /// Type of call @@ -70,6 +72,7 @@ impl Default for ActionParams { fn default() -> ActionParams { ActionParams { code_address: Address::new(), + code_hash: SHA3_EMPTY, address: Address::new(), sender: Address::new(), origin: Address::new(), @@ -88,10 +91,11 @@ impl From for ActionParams { let address: Address = t.address.into(); ActionParams { code_address: Address::new(), + code_hash: (&*t.code).sha3(), address: address, sender: t.sender.into(), origin: t.origin.into(), - code: Some(t.code.into()), + code: Some(Arc::new(t.code.into())), data: Some(t.data.into()), gas: t.gas.into(), gas_price: t.gas_price.into(), diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index f8b4259d5..32a363210 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -831,7 +831,7 @@ impl BlockChainClient for Client { } fn code(&self, address: &Address, id: BlockID) -> Option> { - self.state_at(id).map(|s| s.code(address)) + self.state_at(id).map(|s| s.code(address).map(|c| (*c).clone())) } fn balance(&self, address: &Address, id: BlockID) -> Option { diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index 2bbc7035b..6397f067e 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -81,7 +81,7 @@ pub trait Ext { ) -> MessageCallResult; /// Returns code at given address - fn extcode(&self, address: &Address) -> Bytes; + fn extcode(&self, address: &Address) -> Arc; /// Returns code size at given address fn extcodesize(&self, address: &Address) -> usize; diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 94800c7de..629b423da 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -18,8 +18,10 @@ //! //! TODO: consider spliting it into two separate files. use std::fmt; +use std::sync::Arc; use evm::Evm; use util::{U256, Uint}; +use super::interpreter::SharedCache; #[derive(Debug, PartialEq, Clone)] /// Type of EVM to use. @@ -82,7 +84,8 @@ impl VMType { /// Evm factory. Creates appropriate Evm. #[derive(Clone)] pub struct Factory { - evm: VMType + evm: VMType, + evm_cache: Arc, } impl Factory { @@ -95,9 +98,9 @@ impl Factory { Box::new(super::jit::JitEvm::default()) }, VMType::Interpreter => if Self::can_fit_in_usize(gas) { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } else { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } } } @@ -108,9 +111,9 @@ impl Factory { pub fn create(&self, gas: U256) -> Box { match self.evm { VMType::Interpreter => if Self::can_fit_in_usize(gas) { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } else { - Box::new(super::interpreter::Interpreter::::default()) + Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) } } } @@ -118,7 +121,8 @@ impl Factory { /// Create new instance of specific `VMType` factory pub fn new(evm: VMType) -> Self { Factory { - evm: evm + evm: evm, + evm_cache: Arc::new(SharedCache::default()), } } @@ -132,7 +136,8 @@ impl Default for Factory { #[cfg(all(feature = "jit", not(test)))] fn default() -> Factory { Factory { - evm: VMType::Jit + evm: VMType::Jit, + evm_cache: Arc::new(SharedCache::default()), } } @@ -140,7 +145,8 @@ impl Default for Factory { #[cfg(any(not(feature = "jit"), test))] fn default() -> Factory { Factory { - evm: VMType::Interpreter + evm: VMType::Interpreter, + evm_cache: Arc::new(SharedCache::default()), } } } diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index ad2d5cd34..2a6ab8460 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -31,10 +31,12 @@ macro_rules! evm_debug { mod gasometer; mod stack; mod memory; +mod shared_cache; use self::gasometer::Gasometer; use self::stack::{Stack, VecStack}; use self::memory::Memory; +pub use self::shared_cache::SharedCache; use std::marker::PhantomData; use common::*; @@ -98,9 +100,9 @@ enum InstructionResult { /// Intepreter EVM implementation -#[derive(Default)] pub struct Interpreter { mem: Vec, + cache: Arc, _type: PhantomData, } @@ -109,7 +111,7 @@ impl evm::Evm for Interpreter { self.mem.clear(); let code = ¶ms.code.as_ref().unwrap(); - let valid_jump_destinations = self.find_jump_destinations(code); + let valid_jump_destinations = self.cache.jump_destinations(¶ms.code_hash, code); let mut gasometer = Gasometer::::new(try!(Cost::from_u256(params.gas))); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); @@ -188,6 +190,14 @@ impl evm::Evm for Interpreter { } impl Interpreter { + /// Create a new `Interpreter` instance with shared cache. + pub fn new(cache: Arc) -> Interpreter { + Interpreter { + mem: Vec::new(), + cache: cache, + _type: PhantomData::default(), + } + } fn verify_instruction(&self, ext: &evm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack) -> evm::Result<()> { let schedule = ext.schedule(); @@ -486,10 +496,10 @@ impl Interpreter { stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { - self.copy_data_to_memory(stack, ¶ms.data.clone().unwrap_or_else(|| vec![])); + self.copy_data_to_memory(stack, params.data.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); }, instructions::CODECOPY => { - self.copy_data_to_memory(stack, ¶ms.code.clone().unwrap_or_else(|| vec![])); + self.copy_data_to_memory(stack, params.code.as_ref().map_or_else(|| &[] as &[u8], |c| &**c as &[u8])); }, instructions::EXTCODECOPY => { let address = u256_to_address(&stack.pop_back()); @@ -790,23 +800,6 @@ impl Interpreter { Ok(()) } - fn find_jump_destinations(&self, code: &[u8]) -> BitSet { - let mut jump_dests = BitSet::with_capacity(code.len()); - let mut position = 0; - - while position < code.len() { - let instruction = code[position]; - - if instruction == instructions::JUMPDEST { - jump_dests.insert(position); - } else if instructions::is_push(instruction) { - position += instructions::get_push_bytes(instruction); - } - position += 1; - } - - jump_dests - } } fn get_and_reset_sign(value: U256) -> (U256, bool) { @@ -833,15 +826,3 @@ fn address_to_u256(value: Address) -> U256 { U256::from(&*H256::from(value)) } -#[test] -fn test_find_jump_destinations() { - // given - let interpreter = Interpreter::::default(); - let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); - - // when - let valid_jump_destinations = interpreter.find_jump_destinations(&code); - - // then - assert!(valid_jump_destinations.contains(66)); -} diff --git a/ethcore/src/evm/interpreter/shared_cache.rs b/ethcore/src/evm/interpreter/shared_cache.rs new file mode 100644 index 000000000..76360138b --- /dev/null +++ b/ethcore/src/evm/interpreter/shared_cache.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use lru_cache::LruCache; +use util::{H256, Mutex}; +use util::sha3::*; +use bit_set::BitSet; +use super::super::instructions; + +const CACHE_CODE_ITEMS: usize = 4096; + +/// GLobal cache for EVM interpreter +pub struct SharedCache { + jump_destinations: Mutex>> +} + +impl SharedCache { + /// Get jump destincations bitmap for a contract. + pub fn jump_destinations(&self, code_hash: &H256, code: &[u8]) -> Arc { + if code_hash == &SHA3_EMPTY { + return Self::find_jump_destinations(code); + } + if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) { + return d.clone(); + } + + let d = Self::find_jump_destinations(code); + self.jump_destinations.lock().insert(code_hash.clone(), d.clone()); + d + } + + fn find_jump_destinations(code: &[u8]) -> Arc { + let mut jump_dests = BitSet::with_capacity(code.len()); + let mut position = 0; + + while position < code.len() { + let instruction = code[position]; + + if instruction == instructions::JUMPDEST { + jump_dests.insert(position); + } else if instructions::is_push(instruction) { + position += instructions::get_push_bytes(instruction); + } + position += 1; + } + Arc::new(jump_dests) + } +} + +impl Default for SharedCache { + fn default() -> SharedCache { + SharedCache { + jump_destinations: Mutex::new(LruCache::new(CACHE_CODE_ITEMS)), + } + } +} + + +#[test] +fn test_find_jump_destinations() { + use util::FromHex; + // given + let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap(); + + // when + let valid_jump_destinations = SharedCache::find_jump_destinations(&code); + + // then + assert!(valid_jump_destinations.contains(66)); +} diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 89a4c4ba9..eb7d168cf 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -49,7 +49,7 @@ pub struct FakeExt { depth: usize, store: HashMap, blockhashes: HashMap, - codes: HashMap, + codes: HashMap>, logs: Vec, _suicides: HashSet
, info: EnvInfo, @@ -136,8 +136,8 @@ impl Ext for FakeExt { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Bytes { - self.codes.get(address).unwrap_or(&Bytes::new()).clone() + fn extcode(&self, address: &Address) -> Arc { + self.codes.get(address).unwrap_or(&Arc::new(Bytes::new())).clone() } fn extcodesize(&self, address: &Address) -> usize { @@ -184,11 +184,11 @@ fn test_stack_underflow() { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let err = { - let mut vm : Box = Box::new(super::interpreter::Interpreter::::default()); + let mut vm : Box = Box::new(super::interpreter::Interpreter::::new(Arc::new(super::interpreter::SharedCache::default()))); test_finalize(vm.exec(params, &mut ext)).unwrap_err() }; @@ -211,7 +211,7 @@ fn test_add(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -231,7 +231,7 @@ fn test_sha3(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -251,7 +251,7 @@ fn test_address(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -273,7 +273,7 @@ fn test_origin(factory: super::Factory) { params.address = address.clone(); params.origin = origin.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -295,7 +295,7 @@ fn test_sender(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -329,9 +329,9 @@ fn test_extcodecopy(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); - ext.codes.insert(sender, sender_code); + ext.codes.insert(sender, Arc::new(sender_code)); let gas_left = { let mut vm = factory.create(params.gas); @@ -350,7 +350,7 @@ fn test_log_empty(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -382,7 +382,7 @@ fn test_log_sender(factory: super::Factory) { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -406,7 +406,7 @@ fn test_blockhash(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.blockhashes.insert(U256::zero(), blockhash.clone()); @@ -428,7 +428,7 @@ fn test_calldataload(factory: super::Factory) { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); params.data = Some(data); let mut ext = FakeExt::new(); @@ -449,7 +449,7 @@ fn test_author(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.author = author; @@ -469,7 +469,7 @@ fn test_timestamp(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.timestamp = timestamp; @@ -489,7 +489,7 @@ fn test_number(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.number = number; @@ -509,7 +509,7 @@ fn test_difficulty(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.difficulty = difficulty; @@ -529,7 +529,7 @@ fn test_gas_limit(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); ext.info.gas_limit = gas_limit; @@ -548,7 +548,7 @@ fn test_mul(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -566,7 +566,7 @@ fn test_sub(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -584,7 +584,7 @@ fn test_div(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -602,7 +602,7 @@ fn test_div_zero(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -620,7 +620,7 @@ fn test_mod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -639,7 +639,7 @@ fn test_smod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -658,7 +658,7 @@ fn test_sdiv(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -677,7 +677,7 @@ fn test_exp(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -697,7 +697,7 @@ fn test_comparison(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -718,7 +718,7 @@ fn test_signed_comparison(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -739,7 +739,7 @@ fn test_bitops(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -762,7 +762,7 @@ fn test_addmod_mulmod(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -783,7 +783,7 @@ fn test_byte(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -802,7 +802,7 @@ fn test_signextend(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -822,7 +822,7 @@ fn test_badinstruction_int() { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let err = { @@ -842,7 +842,7 @@ fn test_pop(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(100_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -862,7 +862,7 @@ fn test_extops(factory: super::Factory) { params.gas = U256::from(150_000); params.gas_price = U256::from(0x32); params.value = ActionValue::Transfer(U256::from(0x99)); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -885,7 +885,7 @@ fn test_jumps(factory: super::Factory) { let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); let mut ext = FakeExt::new(); let gas_left = { @@ -908,7 +908,7 @@ fn test_calls(factory: super::Factory) { let code_address = Address::from(0x998); let mut params = ActionParams::default(); params.gas = U256::from(150_000); - params.code = Some(code); + params.code = Some(Arc::new(code)); params.address = address.clone(); let mut ext = FakeExt::new(); ext.balances = { diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 6a7e4672a..b0b0b58c8 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -168,13 +168,14 @@ impl<'a> Executive<'a> { let new_address = contract_address(&sender, &nonce); let params = ActionParams { code_address: new_address.clone(), + code_hash: t.data.sha3(), address: new_address, sender: sender.clone(), origin: sender.clone(), gas: init_gas, gas_price: t.gas_price, value: ActionValue::Transfer(t.value), - code: Some(t.data.clone()), + code: Some(Arc::new(t.data.clone())), data: None, call_type: CallType::None, }; @@ -190,6 +191,7 @@ impl<'a> Executive<'a> { gas_price: t.gas_price, value: ActionValue::Transfer(t.value), code: self.state.code(address), + code_hash: self.state.code_hash(address), data: Some(t.data.clone()), call_type: CallType::Call, }; @@ -511,7 +513,7 @@ mod tests { params.address = address.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some("3331600055".from_hex().unwrap()); + params.code = Some(Arc::new("3331600055".from_hex().unwrap())); params.value = ActionValue::Transfer(U256::from(0x7)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -570,7 +572,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -628,7 +630,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); params.call_type = CallType::Call; let mut state_result = get_temp_state(); @@ -740,7 +742,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(100.into()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -828,7 +830,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -880,7 +882,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from(100)); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); @@ -937,7 +939,7 @@ mod tests { params.address = address_a.clone(); params.sender = sender.clone(); params.gas = U256::from(100_000); - params.code = Some(code_a.clone()); + params.code = Some(Arc::new(code_a.clone())); params.value = ActionValue::Transfer(U256::from(100_000)); let mut state_result = get_temp_state(); @@ -987,10 +989,10 @@ mod tests { let mut params = ActionParams::default(); params.address = address.clone(); params.gas = U256::from(100_000); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code.clone())); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); - state.init_code(&address, code.clone()); + state.init_code(&address, code); let info = EnvInfo::default(); let engine = TestEngine::new(0); let mut substate = Substate::new(); @@ -1188,7 +1190,7 @@ mod tests { params.sender = sender.clone(); params.origin = sender.clone(); params.gas = U256::from(0x0186a0); - params.code = Some(code.clone()); + params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::from_str("0de0b6b3a7640000").unwrap()); let mut state_result = get_temp_state(); let mut state = state_result.reference_mut(); diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 82b946ffc..67c04aefb 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -146,7 +146,8 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT gas: *gas, gas_price: self.origin_info.gas_price, value: ActionValue::Transfer(*value), - code: Some(code.to_vec()), + code: Some(Arc::new(code.to_vec())), + code_hash: code.sha3(), data: None, call_type: CallType::None, }; @@ -185,6 +186,7 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT gas: *gas, gas_price: self.origin_info.gas_price, code: self.state.code(code_address), + code_hash: self.state.code_hash(code_address), data: Some(data.to_vec()), call_type: call_type, }; @@ -201,8 +203,8 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT } } - fn extcode(&self, address: &Address) -> Bytes { - self.state.code(address).unwrap_or_else(|| vec![]) + fn extcode(&self, address: &Address) -> Arc { + self.state.code(address).unwrap_or_else(|| Arc::new(vec![])) } fn extcodesize(&self, address: &Address) -> usize { diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 1fe98acdb..5576f9ad4 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -127,7 +127,7 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer { MessageCallResult::Success(*gas) } - fn extcode(&self, address: &Address) -> Bytes { + fn extcode(&self, address: &Address) -> Arc { self.ext.extcode(address) } @@ -232,7 +232,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { for (address, account) in vm.post_state.unwrap().into_iter() { let address = address.into(); let code: Vec = account.code.into(); - fail_unless(state.code(&address).unwrap_or_else(Vec::new) == code, "code is incorrect"); + fail_unless(state.code(&address).as_ref().map_or_else(|| code.is_empty(), |c| &**c == &code), "code is incorrect"); fail_unless(state.balance(&address) == account.balance.into(), "balance is incorrect"); fail_unless(state.nonce(&address) == account.nonce.into(), "nonce is incorrect"); account.storage.into_iter().foreach(|(k, v)| { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 2b0585db7..77183d452 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -587,7 +587,7 @@ impl MinerService for Miner { fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option { let sealing_work = self.sealing_work.lock(); - sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_code(address), |b| b.block().fields().state.code(address)) + sealing_work.queue.peek_last_ref().map_or_else(|| chain.latest_code(address), |b| b.block().fields().state.code(address).map(|c| (*c).clone())) } fn set_author(&self, author: Address) { diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 07478220a..bd7ed810b 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -40,14 +40,16 @@ pub struct Account { // Modified storage. Accumulates changes to storage made in `set_storage` // Takes precedence over `storage_cache`. storage_changes: HashMap, - // Code hash of the account. If None, means that it's a contract whose code has not yet been set. - code_hash: Option, + // Code hash of the account. + code_hash: H256, // Size of the accoun code. code_size: Option, // Code cache of the account. - code_cache: Bytes, - // Account is new or has been modified + code_cache: Arc, + // Account is new or has been modified. filth: Filth, + // Account code new or has been modified. + code_filth: Filth, // Cached address hash. address_hash: Cell>, } @@ -62,10 +64,11 @@ impl Account { storage_root: SHA3_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: storage, - code_hash: Some(code.sha3()), + code_hash: code.sha3(), code_size: Some(code.len()), - code_cache: code, + code_cache: Arc::new(code), filth: Filth::Dirty, + code_filth: Filth::Dirty, address_hash: Cell::new(None), } } @@ -82,9 +85,10 @@ impl Account { storage_root: SHA3_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: pod.storage.into_iter().collect(), - code_hash: pod.code.as_ref().map(|c| c.sha3()), + code_hash: pod.code.as_ref().map_or(SHA3_EMPTY, |c| c.sha3()), + code_filth: Filth::Dirty, code_size: Some(pod.code.as_ref().map_or(0, |c| c.len())), - code_cache: pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c), + code_cache: Arc::new(pod.code.map_or_else(|| { warn!("POD account with unknown code is being created! Assuming no code."); vec![] }, |c| c)), filth: Filth::Dirty, address_hash: Cell::new(None), } @@ -98,10 +102,11 @@ impl Account { storage_root: SHA3_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: HashMap::new(), - code_hash: Some(SHA3_EMPTY), - code_cache: vec![], + code_hash: SHA3_EMPTY, + code_cache: Arc::new(vec![]), code_size: Some(0), filth: Filth::Dirty, + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -115,10 +120,11 @@ impl Account { storage_root: r.val_at(2), storage_cache: Self::empty_storage_cache(), storage_changes: HashMap::new(), - code_hash: Some(r.val_at(3)), - code_cache: vec![], + code_hash: r.val_at(3), + code_cache: Arc::new(vec![]), code_size: None, filth: Filth::Clean, + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -132,10 +138,11 @@ impl Account { storage_root: SHA3_NULL_RLP, storage_cache: Self::empty_storage_cache(), storage_changes: HashMap::new(), - code_hash: None, - code_cache: vec![], + code_hash: SHA3_EMPTY, + code_cache: Arc::new(vec![]), code_size: None, filth: Filth::Dirty, + code_filth: Filth::Clean, address_hash: Cell::new(None), } } @@ -143,16 +150,15 @@ impl Account { /// Set this account's code to the given code. /// NOTE: Account should have been created with `new_contract()` pub fn init_code(&mut self, code: Bytes) { - assert!(self.code_hash.is_none()); - self.code_cache = code; + self.code_hash = code.sha3(); + self.code_cache = Arc::new(code); self.code_size = Some(self.code_cache.len()); self.filth = Filth::Dirty; + self.code_filth = Filth::Dirty; } /// Reset this account's code to the given code. pub fn reset_code(&mut self, code: Bytes) { - self.code_hash = None; - self.code_size = Some(0); self.init_code(code); } @@ -209,10 +215,9 @@ impl Account { /// return the nonce associated with this account. pub fn nonce(&self) -> &U256 { &self.nonce } - #[cfg(test)] /// return the code hash associated with this account. pub fn code_hash(&self) -> H256 { - self.code_hash.clone().unwrap_or(SHA3_EMPTY) + self.code_hash.clone() } /// return the code hash associated with this account. @@ -227,13 +232,11 @@ impl Account { /// returns the account's code. If `None` then the code cache isn't available - /// get someone who knows to call `note_code`. - pub fn code(&self) -> Option<&[u8]> { - match self.code_hash { - Some(c) if c == SHA3_EMPTY && self.code_cache.is_empty() => Some(&self.code_cache), - Some(_) if !self.code_cache.is_empty() => Some(&self.code_cache), - None => Some(&self.code_cache), - _ => None, + pub fn code(&self) -> Option> { + if self.code_hash != SHA3_EMPTY && self.code_cache.is_empty() { + return None; } + Some(self.code_cache.clone()) } /// returns the account's code size. If `None` then the code cache or code size cache isn't available - @@ -246,24 +249,23 @@ impl Account { /// Provide a byte array which hashes to the `code_hash`. returns the hash as a result. pub fn note_code(&mut self, code: Bytes) -> Result<(), H256> { let h = code.sha3(); - match self.code_hash { - Some(ref i) if h == *i => { - self.code_cache = code; - self.code_size = Some(self.code_cache.len()); - Ok(()) - }, - _ => Err(h) + if self.code_hash == h { + self.code_cache = Arc::new(code); + self.code_size = Some(self.code_cache.len()); + Ok(()) + } else { + Err(h) } } /// Is `code_cache` valid; such that code is going to return Some? pub fn is_cached(&self) -> bool { - !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == Some(SHA3_EMPTY)) + !self.code_cache.is_empty() || (self.code_cache.is_empty() && self.code_hash == SHA3_EMPTY) } /// Is this a new or modified account? pub fn is_dirty(&self) -> bool { - self.filth == Filth::Dirty || !self.storage_is_clean() + self.filth == Filth::Dirty || self.code_filth == Filth::Dirty || !self.storage_is_clean() } /// Mark account as clean. @@ -277,20 +279,17 @@ impl Account { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.is_cached() || - match self.code_hash { - Some(ref h) => match db.get(h) { - Some(x) => { - self.code_cache = x.to_vec(); - self.code_size = Some(x.len()); - true - }, - _ => { - warn!("Failed reverse get of {}", h); - false - }, - }, - _ => false, - } + match db.get(&self.code_hash) { + Some(x) => { + self.code_cache = Arc::new(x.to_vec()); + self.code_size = Some(x.len()); + true + }, + _ => { + warn!("Failed reverse get of {}", self.code_hash); + false + }, + } } /// Provide a database to get `code_size`. Should not be called if it is a contract without code. @@ -298,18 +297,19 @@ impl Account { // TODO: fill out self.code_cache; trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.code_size.is_some() || - match self.code_hash { - Some(ref h) if h != &SHA3_EMPTY => match db.get(h) { + if self.code_hash != SHA3_EMPTY { + match db.get(&self.code_hash) { Some(x) => { self.code_size = Some(x.len()); true }, _ => { - warn!("Failed reverse get of {}", h); + warn!("Failed reverse get of {}", self.code_hash); false }, - }, - _ => false, + } + } else { + false } } @@ -370,15 +370,16 @@ impl Account { /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. pub fn commit_code(&mut self, db: &mut HashDB) { - trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty()); - match (self.code_hash.is_none(), self.code_cache.is_empty()) { + trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty()); + match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { (true, true) => { - self.code_hash = Some(SHA3_EMPTY); self.code_size = Some(0); + self.code_filth = Filth::Clean; }, (true, false) => { - self.code_hash = Some(db.insert(&self.code_cache)); + db.emplace(self.code_hash.clone(), (*self.code_cache).clone()); self.code_size = Some(self.code_cache.len()); + self.code_filth = Filth::Clean; }, (false, _) => {}, } @@ -390,7 +391,7 @@ impl Account { stream.append(&self.nonce); stream.append(&self.balance); stream.append(&self.storage_root); - stream.append(self.code_hash.as_ref().unwrap_or(&SHA3_EMPTY)); + stream.append(&self.code_hash); stream.out() } @@ -404,8 +405,9 @@ impl Account { storage_changes: HashMap::new(), code_hash: self.code_hash.clone(), code_size: self.code_size.clone(), - code_cache: Bytes::new(), + code_cache: self.code_cache.clone(), filth: self.filth, + code_filth: self.code_filth, address_hash: self.address_hash.clone(), } } @@ -433,6 +435,7 @@ impl Account { self.nonce = other.nonce; self.storage_root = other.storage_root; self.code_hash = other.code_hash; + self.code_filth = other.code_filth; self.code_cache = other.code_cache; self.code_size = other.code_size; self.address_hash = other.address_hash; @@ -536,7 +539,7 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); assert_eq!(a.code_size(), Some(3)); a.commit_code(&mut db); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); @@ -548,11 +551,12 @@ mod tests { let mut db = MemoryDB::new(); let mut db = AccountDBMut::new(&mut db, &Address::new()); a.init_code(vec![0x55, 0x44, 0xffu8]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); a.commit_code(&mut db); + assert_eq!(a.code_filth, Filth::Clean); assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb"); a.reset_code(vec![0x55]); - assert_eq!(a.code_hash(), SHA3_EMPTY); + assert_eq!(a.code_filth, Filth::Dirty); a.commit_code(&mut db); assert_eq!(a.code_hash().hex(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be"); } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 79d7cba54..0661420ed 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -319,9 +319,14 @@ impl State { } /// Get accounts' code. - pub fn code(&self, a: &Address) -> Option { + pub fn code(&self, a: &Address) -> Option> { self.ensure_cached(a, RequireCache::Code, - |a| a.as_ref().map_or(None, |a| a.code().map(|x|x.to_vec()))) + |a| a.as_ref().map_or(None, |a| a.code().clone())) + } + + pub fn code_hash(&self, a: &Address) -> H256 { + self.ensure_cached(a, RequireCache::None, + |a| a.as_ref().map_or(SHA3_EMPTY, |a| a.code_hash())) } /// Get accounts' code size. @@ -640,6 +645,7 @@ impl Clone for State { #[cfg(test)] mod tests { +use std::sync::Arc; use std::str::FromStr; use rustc_serialize::hex::FromHex; use super::*; @@ -1504,14 +1510,14 @@ fn code_from_database() { let mut state = get_temp_state_in(temp.as_path()); state.require_or_from(&a, false, ||Account::new_contract(42.into(), 0.into()), |_|{}); state.init_code(&a, vec![1, 2, 3]); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); state.commit().unwrap(); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); state.drop() }; let state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap(); - assert_eq!(state.code(&a), Some([1u8, 2, 3].to_vec())); + assert_eq!(state.code(&a), Some(Arc::new([1u8, 2, 3].to_vec()))); } #[test] diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index dc95e8267..59e3699ac 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -57,7 +57,7 @@ fn should_return_registrar() { IoChannel::disconnected(), &db_config ).unwrap(); - assert_eq!(client.additional_params().get("registrar"), Some(&"8e4e9b13d4b45cb0befc93c3061b1408f67316b2".to_owned())); + assert_eq!(client.additional_params().get("registrar"), Some(&"52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d".to_owned())); } #[test] diff --git a/ethcore/src/types/trace_types/trace.rs b/ethcore/src/types/trace_types/trace.rs index 9efeaa001..2571805a6 100644 --- a/ethcore/src/types/trace_types/trace.rs +++ b/ethcore/src/types/trace_types/trace.rs @@ -181,7 +181,7 @@ impl From for Create { from: p.sender, value: p.value.value(), gas: p.gas, - init: p.code.unwrap_or_else(Vec::new), + init: p.code.map_or_else(Vec::new, |c| (*c).clone()), } } } diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index 7585f1e6d..b6b6c5cf6 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -164,6 +164,7 @@ pub mod headers { } /// A mode for verifying headers. + #[allow(dead_code)] pub struct Headers; impl Kind for Headers { @@ -179,4 +180,4 @@ pub mod headers { engine.verify_block_unordered(&unverified, None).map(|_| unverified) } } -} \ No newline at end of file +} diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs index 0f36b4f54..ddc0b057b 100644 --- a/rpc/src/v1/tests/helpers/miner_service.rs +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -249,7 +249,7 @@ impl MinerService for TestMinerService { } fn code(&self, _chain: &MiningBlockChainClient, address: &Address) -> Option { - self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).clone()) + self.latest_closed_block.lock().as_ref().map_or(None, |b| b.block().fields().state.code(address).map(|c| (*c).clone())) } } diff --git a/util/src/misc.rs b/util/src/misc.rs index 50b2e7e8d..b0452e85e 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -23,7 +23,7 @@ use target_info::Target; include!(concat!(env!("OUT_DIR"), "/version.rs")); include!(concat!(env!("OUT_DIR"), "/rustc_version.rs")); -#[derive(PartialEq,Eq,Clone,Copy)] +#[derive(PartialEq, Eq, Clone, Copy, Debug)] /// Boolean type for clean/dirty status. pub enum Filth { /// Data has not been changed. diff --git a/util/src/standard.rs b/util/src/standard.rs index 3d6c93e1a..0693dcd23 100644 --- a/util/src/standard.rs +++ b/util/src/standard.rs @@ -46,4 +46,4 @@ pub use rustc_serialize::hex::{FromHex, FromHexError}; pub use heapsize::HeapSizeOf; pub use itertools::Itertools; -pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; \ No newline at end of file +pub use parking_lot::{Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; From 06fe768ac2a23c6815e25be7e8564099fc726293 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Mon, 3 Oct 2016 11:13:10 +0200 Subject: [PATCH 10/23] Polishing Actually enable fat db pr (#1974) (#2048) * Actually enable fat db, and do RPCs for it. * Implement HashDB traits for AccountDB. * user defaults * finished user defaults * user defaults are network-dependent * added tests for newly added functions, logger is initialized first * dir cleanup in progress * user_file is placed next to snapshots * fixing requested change --- ethcore/src/account_db.rs | 16 +++++++++++ ethcore/src/client/client.rs | 47 +++++++++++++++++++++++++++---- ethcore/src/client/config.rs | 5 ++-- ethcore/src/client/test_client.rs | 4 +++ ethcore/src/client/traits.rs | 3 ++ evmbin/src/main.rs | 1 - parity/blockchain.rs | 26 +++++++++++------ parity/cli/config.full.toml | 2 +- parity/cli/config.toml | 2 +- parity/cli/mod.rs | 8 +++--- parity/cli/usage.txt | 5 +++- parity/configuration.rs | 10 +++++++ parity/helpers.rs | 2 ++ parity/params.rs | 13 +++++++++ parity/run.rs | 26 ++++++++++++++--- parity/snapshot.rs | 14 +++++---- parity/user_defaults.rs | 6 ++++ rpc/src/v1/impls/ethcore.rs | 19 +++++++++++++ rpc/src/v1/traits/ethcore.rs | 10 +++++++ util/src/trie/mod.rs | 3 ++ 20 files changed, 188 insertions(+), 34 deletions(-) diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index a02b670d0..2d00f8ed5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -121,6 +121,10 @@ impl<'db> HashDB for AccountDB<'db>{ fn remove(&mut self, _key: &H256) { unimplemented!() } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } } /// DB backend wrapper for Account trie @@ -193,6 +197,18 @@ impl<'db> HashDB for AccountDBMut<'db>{ let key = combine_key(&self.address_hash, key); self.db.remove(&key) } + + fn insert_aux(&mut self, hash: Vec, value: Vec) { + self.db.insert_aux(hash, value); + } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } + + fn remove_aux(&mut self, hash: &[u8]) { + self.db.remove_aux(hash); + } } struct Wrapping<'db>(&'db HashDB); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 32a363210..6b1cc0c65 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -23,9 +23,9 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock}; -use util::journaldb; -use util::{U256, H256, Address, H2048, Uint}; -use util::TrieFactory; +use util::{journaldb, TrieFactory, Trie}; +use util::trie::TrieSpec; +use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::kvdb::*; // other @@ -51,7 +51,7 @@ use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{ BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify + ChainNotify, }; use client::Error as ClientError; use env_info::EnvInfo; @@ -171,6 +171,11 @@ impl Client { let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); + let trie_spec = match config.fat_db { + true => TrieSpec::Fat, + false => TrieSpec::Secure, + }; + let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let mut state_db = StateDB::new(journal_db); if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) { @@ -193,7 +198,7 @@ impl Client { let factories = Factories { vm: EvmFactory::new(config.vm_type.clone()), - trie: TrieFactory::new(config.trie_spec.clone()), + trie: TrieFactory::new(trie_spec), accountdb: Default::default(), }; @@ -842,6 +847,38 @@ impl BlockChainClient for Client { self.state_at(id).map(|s| s.storage_at(address, position)) } + fn list_accounts(&self, id: BlockID) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_accounts: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let (root, db) = state.drop(); + let trie = match self.factories.trie.readonly(db.as_hashdb(), &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_accounts: Couldn't open the DB"); + return None; + } + }; + + let iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + let accounts = iter.filter_map(|item| { + item.ok().map(|(addr, _)| Address::from_slice(&addr)) + }).collect(); + + Some(accounts) + } + fn transaction(&self, id: TransactionID) -> Option { self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 399132108..8cf54387b 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -22,7 +22,6 @@ pub use evm::VMType; use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; -use util::trie::TrieSpec; /// Client state db compaction profile #[derive(Debug, PartialEq)] @@ -91,8 +90,8 @@ pub struct ClientConfig { pub tracing: TraceConfig, /// VM type. pub vm_type: VMType, - /// Trie type. - pub trie_spec: TrieSpec, + /// Fat DB enabled? + pub fat_db: bool, /// The JournalDB ("pruning") algorithm to use. pub pruning: journaldb::Algorithm, /// The name of the client instance. diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 971f7c448..bd74eb958 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -384,6 +384,10 @@ impl BlockChainClient for TestBlockChainClient { } } + fn list_accounts(&self, _id: BlockID) -> Option> { + None + } + fn transaction(&self, _id: TransactionID) -> Option { None // Simple default. } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 4da84bcbb..c2af744dd 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -112,6 +112,9 @@ pub trait BlockChainClient : Sync + Send { Therefore storage_at has returned Some; qed") } + /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. + fn list_accounts(&self, id: BlockID) -> Option>; + /// Get transaction with given hash. fn transaction(&self, id: TransactionID) -> Option; diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 94684129c..bc24afa1e 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -123,7 +123,6 @@ impl Args { } } - fn die(msg: &'static str) -> ! { println!("{}", msg); ::std::process::exit(-1) diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 3dfdac804..d4a4d8217 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -30,8 +30,8 @@ use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; use informant::{Informant, MillisecondDuration}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use io_handler::ImportIoHandler; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; @@ -81,6 +81,7 @@ pub struct ImportBlockchain { pub wal: bool, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub vm_type: VMType, } @@ -96,6 +97,7 @@ pub struct ExportBlockchain { pub compaction: DatabaseCompactionProfile, pub wal: bool, pub mode: Mode, + pub fat_db: Switch, pub tracing: Switch, pub from_block: BlockID, pub to_block: BlockID, @@ -135,14 +137,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -151,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); // build client let service = try!(ClientService::start( @@ -283,14 +288,17 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -299,7 +307,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index ec5dfbe35..a411e6767 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -82,7 +82,7 @@ cache_size_queue = 50 cache_size = 128 # Overrides above caches with total size fast_and_loose = false db_compaction = "ssd" -fat_db = false +fat_db = "auto" [snapshots] disable_periodic = false diff --git a/parity/cli/config.toml b/parity/cli/config.toml index 11ec333aa..a5ad55d40 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -49,7 +49,7 @@ cache_size_db = 128 cache_size_blocks = 16 cache_size_queue = 100 db_compaction = "ssd" -fat_db = true +fat_db = "off" [snapshots] disable_periodic = true diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b8b10ec1d..082dbe8e4 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -217,7 +217,7 @@ usage! { or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), flag_db_compaction: String = "ssd", or |c: &Config| otry!(c.footprint).db_compaction.clone(), - flag_fat_db: bool = false, + flag_fat_db: String = "auto", or |c: &Config| otry!(c.footprint).fat_db.clone(), // -- Import/Export Options @@ -362,7 +362,7 @@ struct Footprint { cache_size_blocks: Option, cache_size_queue: Option, db_compaction: Option, - fat_db: Option, + fat_db: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -535,7 +535,7 @@ mod tests { flag_cache_size: Some(128), flag_fast_and_loose: false, flag_db_compaction: "ssd".into(), - flag_fat_db: false, + flag_fat_db: "auto".into(), // -- Import/Export Options flag_from: "1".into(), @@ -687,7 +687,7 @@ mod tests { cache_size_blocks: Some(16), cache_size_queue: Some(100), db_compaction: Some("ssd".into()), - fat_db: Some(true), + fat_db: Some("off".into()), }), snapshots: Some(Snapshots { disable_periodic: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 4c5d3b94b..861b7dafc 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -217,7 +217,10 @@ Footprint Options: --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs (default: {flag_db_compaction}). - --fat-db Fat database. (default: {flag_fat_db}) + --fat-db BOOL Build appropriate information to allow enumeration + of all accounts and storage keys. Doubles the size + of the state database. BOOL may be one of on, off + or auto. (default: {flag_fat_db}) Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or diff --git a/parity/configuration.rs b/parity/configuration.rs index 1aa338c26..8eb617d2b 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -84,6 +84,7 @@ impl Configuration { let cache_config = self.cache_config(); let spec = try!(self.chain().parse()); let tracing = try!(self.args.flag_tracing.parse()); + let fat_db = try!(self.args.flag_fat_db.parse()); let compaction = try!(self.args.flag_db_compaction.parse()); let wal = !self.args.flag_fast_and_loose; let enable_network = self.enable_network(&mode); @@ -140,6 +141,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, vm_type: vm_type, }; Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) @@ -156,6 +158,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, from_block: try!(to_block_id(&self.args.flag_from)), to_block: try!(to_block_id(&self.args.flag_to)), }; @@ -169,6 +172,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -185,6 +189,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -216,6 +221,7 @@ impl Configuration { miner_extras: try!(self.miner_extras()), mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, wal: wal, vm_type: vm_type, @@ -717,6 +723,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), vm_type: VMType::Interpreter, }))); } @@ -737,6 +744,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -758,6 +766,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -804,6 +813,7 @@ mod tests { ui: false, name: "".into(), custom_bootnodes: false, + fat_db: Default::default(), no_periodic_snapshot: false, })); } diff --git a/parity/helpers.rs b/parity/helpers.rs index 0649e7fe9..abdd5daa5 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -191,6 +191,7 @@ pub fn to_client_config( cache_config: &CacheConfig, mode: Mode, tracing: bool, + fat_db: bool, compaction: DatabaseCompactionProfile, wal: bool, vm_type: VMType, @@ -217,6 +218,7 @@ pub fn to_client_config( client_config.mode = mode; client_config.tracing.enabled = tracing; + client_config.fat_db = fat_db; client_config.pruning = pruning; client_config.db_compaction = compaction; client_config.db_wal = wal; diff --git a/parity/params.rs b/parity/params.rs index 160b50866..df0730b59 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -252,6 +252,19 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R } } +pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result { + if algorithm != Algorithm::Archive { + return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); + } + + match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + (false, Switch::On, false) => Err("FatDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + } +} + #[cfg(test)] mod tests { use util::journaldb::Algorithm; diff --git a/parity/run.rs b/parity/run.rs index 4b458d4a6..d108ec53c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -35,7 +35,10 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; use signer::SignerServer; use dapps::WebappServer; use io_handler::ClientIoHandler; -use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool}; +use params::{ + SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, + tracing_switch_to_bool, fatdb_switch_to_bool, +}; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use dir::Directories; use cache::CacheConfig; @@ -72,6 +75,7 @@ pub struct RunCmd { pub miner_extras: MinerExtras, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub wal: bool, pub vm_type: VMType, @@ -115,11 +119,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); @@ -135,7 +142,17 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // display info about used pruning algorithm info!("Starting {}", Colour::White.bold().paint(version())); - info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str())); + info!("State DB configuation: {}{}{}", + Colour::White.bold().paint(algorithm.as_str()), + match fat_db { + true => Colour::White.bold().paint(" +Fat").to_string(), + false => "".to_owned(), + }, + match tracing { + true => Colour::White.bold().paint(" +Trace").to_string(), + false => "".to_owned(), + } + ); // display warning about using experimental journaldb alorithm if !algorithm.is_stable() { @@ -171,6 +188,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { &cmd.cache_config, cmd.mode, tracing, + fat_db, cmd.compaction, cmd.wal, cmd.vm_type, diff --git a/parity/snapshot.rs b/parity/snapshot.rs index f3a8a45d3..6b2efeed5 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -30,7 +30,7 @@ use ethcore::miner::Miner; use ethcore::ids::BlockID; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; @@ -57,6 +57,7 @@ pub struct SnapshotCommand { pub logger_config: LogConfig, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub file_path: Option, pub wal: bool, @@ -139,9 +140,6 @@ impl SnapshotCommand { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); - // Setup logging let _logger = setup_log(&self.logger_config); @@ -150,6 +148,12 @@ impl SnapshotCommand { // select pruning algorithm let algorithm = self.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -158,7 +162,7 @@ impl SnapshotCommand { try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/user_defaults.rs b/parity/user_defaults.rs index 8a1feebae..b7fc3d929 100644 --- a/parity/user_defaults.rs +++ b/parity/user_defaults.rs @@ -30,6 +30,7 @@ pub struct UserDefaults { pub is_first_launch: bool, pub pruning: Algorithm, pub tracing: bool, + pub fat_db: bool, } impl Serialize for UserDefaults { @@ -38,6 +39,7 @@ impl Serialize for UserDefaults { let mut map: BTreeMap = BTreeMap::new(); map.insert("pruning".into(), Value::String(self.pruning.as_str().into())); map.insert("tracing".into(), Value::Bool(self.tracing)); + map.insert("fat_db".into(), Value::Bool(self.fat_db)); map.serialize(serializer) } } @@ -62,11 +64,14 @@ impl Visitor for UserDefaultsVisitor { let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method"))); let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing"))); let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value"))); + let fat_db: Value = map.remove("fat_db".into()).unwrap_or_else(|| Value::Bool(false)); + let fat_db = try!(fat_db.as_bool().ok_or_else(|| Error::custom("invalid fat_db value"))); let user_defaults = UserDefaults { is_first_launch: false, pruning: pruning, tracing: tracing, + fat_db: fat_db, }; Ok(user_defaults) @@ -79,6 +84,7 @@ impl Default for UserDefaults { is_first_launch: true, pruning: Algorithm::default(), tracing: false, + fat_db: false, } } } diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index 684ce61a4..56f9f6fc4 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -29,6 +29,7 @@ use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; use ethcore::miner::MinerService; use ethcore::client::{MiningBlockChainClient}; +use ethcore::ids::BlockID; use jsonrpc_core::{from_params, to_value, Value, Error, Params, Ready}; use v1::traits::Ethcore; @@ -251,6 +252,24 @@ impl Ethcore for EthcoreClient where ) } + fn list_accounts(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + + take_weak!(self.client) + .list_accounts(BlockID::Latest) + .map(|a| Ok(to_value(&a.into_iter().map(Into::into).collect::>()))) + .unwrap_or(Ok(Value::Null)) + } + + fn list_storage_keys(&self, params: Params) -> Result { + try!(self.active()); + + from_params::<(H160,)>(params).and_then(|(_addr,)| + Ok(Value::Null) + ) + } + fn encrypt_message(&self, params: Params) -> Result { try!(self.active()); from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index 0565da04a..011b78c8b 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -76,6 +76,14 @@ pub trait Ethcore: Sized + Send + Sync + 'static { /// Returns the value of the registrar for this network. fn registry_address(&self, _: Params) -> Result; + /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. + /// Takes no parameters. + fn list_accounts(&self, _: Params) -> Result; + + /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), + /// or null if not. + fn list_storage_keys(&self, _: Params) -> Result; + /// Encrypt some data with a public key under ECIES. /// First parameter is the 512-byte destination public key, second is the message. fn encrypt_message(&self, _: Params) -> Result; @@ -108,6 +116,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static { delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); + delegate.add_method("ethcore_listAccounts", Ethcore::list_accounts); + delegate.add_method("ethcore_listStorageKeys", Ethcore::list_storage_keys); delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); delegate.add_async_method("ethcore_hashContent", Ethcore::hash_content); diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index 6eebd8f5d..952eb8894 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -233,4 +233,7 @@ impl TrieFactory { TrieSpec::Fat => Ok(Box::new(try!(FatDBMut::from_existing(db, root)))), } } + + /// Returns true iff the trie DB is a fat DB (allows enumeration of keys). + pub fn is_fat(&self) -> bool { self.spec == TrieSpec::Fat } } From e1d3b3fff8a43cc842b8b552947d034cf46a6ab5 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 3 Oct 2016 14:02:43 +0400 Subject: [PATCH 11/23] Accounts bloom in master (#2426) * bloom crate link * database layout and outdated tests * state db alterations * v10 migration run * using arc * bloom migration * migration fixes and mess * fix tests --- Cargo.lock | 8 ++ ethcore/Cargo.toml | 2 + ethcore/src/client/test_client.rs | 7 +- ethcore/src/db.rs | 4 +- ethcore/src/lib.rs | 2 + ethcore/src/migrations/mod.rs | 3 + ethcore/src/migrations/v10.rs | 117 ++++++++++++++++++++++++++++++ ethcore/src/snapshot/mod.rs | 10 ++- ethcore/src/spec/spec.rs | 1 + ethcore/src/state/mod.rs | 26 +++++-- ethcore/src/state_db.rs | 73 ++++++++++++++++++- ethcore/src/tests/helpers.rs | 3 +- parity/migration.rs | 5 +- util/src/migration/mod.rs | 10 ++- 14 files changed, 255 insertions(+), 16 deletions(-) create mode 100644 ethcore/src/migrations/v10.rs diff --git a/Cargo.lock b/Cargo.lock index 15cb17c88..bb91080c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,6 +120,11 @@ name = "bloomchain" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bytes" version = "0.3.0" @@ -270,10 +275,12 @@ version = "1.4.0" dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", + "ethcore-bloom-journal 0.1.0", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", "ethcore-ipc 1.4.0", @@ -1901,6 +1908,7 @@ dependencies = [ "checksum bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23" "checksum blastfig 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "09640e0509d97d5cdff03a9f5daf087a8e04c735c3b113a75139634a19cfc7b2" "checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d" +"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 2f1291d56..96dcba3f8 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -38,6 +38,8 @@ ethcore-ipc-nano = { path = "../ipc/nano" } rlp = { path = "../util/rlp" } rand = "0.3" lru-cache = "0.0.7" +ethcore-bloom-journal = { path = "../util/bloom" } +byteorder = "0.5" [dependencies.hyper] git = "https://github.com/ethcore/hyper" diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index bd74eb958..11929294f 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -25,8 +25,9 @@ use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; use client::{ BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID, - TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError + TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, }; +use db::{NUM_COLUMNS, COL_STATE}; use header::{Header as BlockHeader, BlockNumber}; use filter::Filter; use log_entry::LocalizedLogEntry; @@ -286,8 +287,8 @@ impl TestBlockChainClient { pub fn get_temp_state_db() -> GuardedTempResult { let temp = RandomTempPath::new(); - let db = Database::open_default(temp.as_str()).unwrap(); - let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, None); + let db = Database::open(&DatabaseConfig::with_columns(NUM_COLUMNS), temp.as_str()).unwrap(); + let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); let state_db = StateDB::new(journal_db); GuardedTempResult { _temp: temp, diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index c8c24cc5f..10672d730 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -34,8 +34,10 @@ pub const COL_BODIES: Option = Some(2); pub const COL_EXTRA: Option = Some(3); /// Column for Traces pub const COL_TRACE: Option = Some(4); +/// Column for Traces +pub const COL_ACCOUNT_BLOOM: Option = Some(5); /// Number of columns in DB -pub const NUM_COLUMNS: Option = Some(5); +pub const NUM_COLUMNS: Option = Some(6); /// Modes for updating caches. #[derive(Clone, Copy)] diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index cc398a2f5..c72a977cf 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -99,6 +99,8 @@ extern crate ethcore_devtools as devtools; extern crate rand; extern crate bit_set; extern crate rlp; +extern crate ethcore_bloom_journal as bloom_journal; +extern crate byteorder; #[macro_use] extern crate log; diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 5c0c6f420..7ccafac74 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -23,3 +23,6 @@ pub mod extras; mod v9; pub use self::v9::ToV9; pub use self::v9::Extract; + +mod v10; +pub use self::v10::ToV10; diff --git a/ethcore/src/migrations/v10.rs b/ethcore/src/migrations/v10.rs new file mode 100644 index 000000000..88884fb26 --- /dev/null +++ b/ethcore/src/migrations/v10.rs @@ -0,0 +1,117 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Bloom upgrade + +use std::sync::Arc; +use db::{COL_EXTRA, COL_HEADERS, COL_STATE}; +use state_db::{ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET, StateDB}; +use util::trie::TrieDB; +use views::HeaderView; +use bloom_journal::Bloom; +use util::migration::{Error, Migration, Progress, Batch, Config}; +use util::journaldb; +use util::{H256, FixedHash, Trie}; +use util::{Database, DBTransaction}; + +/// Account bloom upgrade routine. If bloom already present, does nothing. +/// If database empty (no best block), does nothing. +/// Can be called on upgraded database with no issues (will do nothing). +pub fn generate_bloom(source: Arc, dest: &mut Database) -> Result<(), Error> { + trace!(target: "migration", "Account bloom upgrade started"); + let best_block_hash = match try!(source.get(COL_EXTRA, b"best")) { + // no migration needed + None => { + trace!(target: "migration", "No best block hash, skipping"); + return Ok(()); + }, + Some(hash) => hash, + }; + let best_block_header = match try!(source.get(COL_HEADERS, &best_block_hash)) { + // no best block, nothing to do + None => { + trace!(target: "migration", "No best block header, skipping"); + return Ok(()) + }, + Some(x) => x, + }; + let state_root = HeaderView::new(&best_block_header).state_root(); + + trace!("Adding accounts bloom (one-time upgrade)"); + let bloom_journal = { + let mut bloom = Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET); + // no difference what algorithm is passed, since there will be no writes + let state_db = journaldb::new( + source.clone(), + journaldb::Algorithm::OverlayRecent, + COL_STATE); + let account_trie = try!(TrieDB::new(state_db.as_hashdb(), &state_root).map_err(|e| Error::Custom(format!("Cannot open trie: {:?}", e)))); + for item in try!(account_trie.iter().map_err(|_| Error::MigrationImpossible)) { + let (ref account_key, _) = try!(item.map_err(|_| Error::MigrationImpossible)); + let account_key_hash = H256::from_slice(&account_key); + bloom.set(&*account_key_hash); + } + + bloom.drain_journal() + }; + + trace!(target: "migration", "Generated {} bloom updates", bloom_journal.entries.len()); + + let mut batch = DBTransaction::new(dest); + try!(StateDB::commit_bloom(&mut batch, bloom_journal).map_err(|_| Error::Custom("Failed to commit bloom".to_owned()))); + try!(dest.write(batch)); + + trace!(target: "migration", "Finished bloom update"); + + + Ok(()) +} + +/// Account bloom migration. +#[derive(Default)] +pub struct ToV10 { + progress: Progress, +} + +impl ToV10 { + /// New v10 migration + pub fn new() -> ToV10 { ToV10 { progress: Progress::default() } } +} + +impl Migration for ToV10 { + fn version(&self) -> u32 { + 10 + } + + fn pre_columns(&self) -> Option { Some(5) } + + fn columns(&self) -> Option { Some(6) } + + fn migrate(&mut self, source: Arc, config: &Config, dest: &mut Database, col: Option) -> Result<(), Error> { + let mut batch = Batch::new(config, col); + for (key, value) in source.iter(col) { + self.progress.tick(); + try!(batch.insert(key.to_vec(), value.to_vec(), dest)); + } + try!(batch.commit(dest)); + + if col == COL_STATE { + try!(generate_bloom(source, dest)); + } + + Ok(()) + } +} diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 2074f8174..2150ee226 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -43,6 +43,8 @@ use self::account::Account; use self::block::AbridgedBlock; use self::io::SnapshotWriter; +use super::state_db::StateDB; + use crossbeam::{scope, ScopedJoinHandle}; use rand::{Rng, OsRng}; @@ -454,6 +456,10 @@ impl StateRebuilder { self.code_map.insert(code_hash, code); } + let backing = self.db.backing().clone(); + + // bloom has to be updated + let mut bloom = StateDB::load_bloom(&backing); // batch trie writes { @@ -464,12 +470,14 @@ impl StateRebuilder { }; for (hash, thin_rlp) in pairs { + bloom.set(&*hash); try!(account_trie.insert(&hash, &thin_rlp)); } } - let backing = self.db.backing().clone(); + let bloom_journal = bloom.drain_journal(); let mut batch = backing.transaction(); + try!(StateDB::commit_bloom(&mut batch, bloom_journal)); try!(self.db.inject(&mut batch)); try!(backing.write(batch).map_err(::util::UtilError::SimpleString)); trace!(target: "snapshot", "current state root: {:?}", self.state_root); diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index b133c7181..34f7afff4 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -247,6 +247,7 @@ impl Spec { } trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root); for (address, account) in self.genesis_state.get().iter() { + db.note_account_bloom(address); account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address)); } assert!(db.as_hashdb().contains(&self.state_root())); diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 0661420ed..a2fe25b91 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -300,7 +300,10 @@ impl State { } } } - + + // check bloom before any requests to trie + if !self.db.check_account_bloom(address) { return H256::zero() } + // account is not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let maybe_acc = match db.get(address) { @@ -405,6 +408,7 @@ impl State { for (address, ref mut a) in accounts.iter_mut() { match a { &mut&mut AccountEntry::Cached(ref mut account) if account.is_dirty() => { + db.note_account_bloom(&address); let addr_hash = account.address_hash(address); let mut account_db = factories.accountdb.create(db.as_hashdb_mut(), addr_hash); account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); @@ -468,6 +472,7 @@ impl State { pub fn populate_from(&mut self, accounts: PodState) { assert!(self.snapshots.borrow().is_empty()); for (add, acc) in accounts.drain().into_iter() { + self.db.note_account_bloom(&add); self.cache.borrow_mut().insert(add, AccountEntry::Cached(Account::from_pod(acc))); } } @@ -543,6 +548,9 @@ impl State { match result { Some(r) => r, None => { + // first check bloom if it is not in database for sure + if !self.db.check_account_bloom(a) { return f(None); } + // not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let mut maybe_acc = match db.get(a) { @@ -579,11 +587,17 @@ impl State { Some(Some(acc)) => self.insert_cache(a, AccountEntry::Cached(acc)), Some(None) => self.insert_cache(a, AccountEntry::Missing), None => { - let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = match db.get(a) { - Ok(Some(acc)) => AccountEntry::Cached(Account::from_rlp(acc)), - Ok(None) => AccountEntry::Missing, - Err(e) => panic!("Potential DB corruption encountered: {}", e), + let maybe_acc = if self.db.check_account_bloom(a) { + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let maybe_acc = match db.get(a) { + Ok(Some(acc)) => AccountEntry::Cached(Account::from_rlp(acc)), + Ok(None) => AccountEntry::Missing, + Err(e) => panic!("Potential DB corruption encountered: {}", e), + }; + maybe_acc + } + else { + AccountEntry::Missing }; self.insert_cache(a, maybe_acc); } diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index af6780cdc..7a1206801 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -18,11 +18,19 @@ use lru_cache::LruCache; use util::journaldb::JournalDB; use util::hash::{H256}; use util::hashdb::HashDB; -use util::{Arc, Address, DBTransaction, UtilError, Mutex}; use state::Account; +use util::{Arc, Address, Database, DBTransaction, UtilError, Mutex, Hashable}; +use bloom_journal::{Bloom, BloomJournal}; +use db::COL_ACCOUNT_BLOOM; +use byteorder::{LittleEndian, ByteOrder}; const STATE_CACHE_ITEMS: usize = 65536; +pub const ACCOUNT_BLOOM_SPACE: usize = 1048576; +pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000; + +pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count"; + struct AccountCache { /// DB Account cache. `None` indicates that account is known to be missing. accounts: LruCache>, @@ -39,22 +47,83 @@ pub struct StateDB { account_cache: Arc>, cache_overlay: Vec<(Address, Option)>, is_canon: bool, + account_bloom: Arc>, } impl StateDB { + /// Create a new instance wrapping `JournalDB` pub fn new(db: Box) -> StateDB { + let bloom = Self::load_bloom(db.backing()); StateDB { db: db, account_cache: Arc::new(Mutex::new(AccountCache { accounts: LruCache::new(STATE_CACHE_ITEMS) })), cache_overlay: Vec::new(), is_canon: false, + account_bloom: Arc::new(Mutex::new(bloom)), } } + /// Loads accounts bloom from the database + /// This bloom is used to handle request for the non-existant account fast + pub fn load_bloom(db: &Database) -> Bloom { + let hash_count_entry = db.get(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY) + .expect("Low-level database error"); + + if hash_count_entry.is_none() { + return Bloom::new(ACCOUNT_BLOOM_SPACE, DEFAULT_ACCOUNT_PRESET); + } + let hash_count_bytes = hash_count_entry.unwrap(); + assert_eq!(hash_count_bytes.len(), 1); + let hash_count = hash_count_bytes[0]; + + let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; + let mut key = [0u8; 8]; + for i in 0..ACCOUNT_BLOOM_SPACE / 8 { + LittleEndian::write_u64(&mut key, i as u64); + bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error") + .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) + .unwrap_or(0u64); + } + + let bloom = Bloom::from_parts(&bloom_parts, hash_count as u32); + trace!(target: "account_bloom", "Bloom is {:?} full, hash functions count = {:?}", bloom.saturation(), hash_count); + bloom + } + + pub fn check_account_bloom(&self, address: &Address) -> bool { + trace!(target: "account_bloom", "Check account bloom: {:?}", address); + let bloom = self.account_bloom.lock(); + bloom.check(&*address.sha3()) + } + + pub fn note_account_bloom(&self, address: &Address) { + trace!(target: "account_bloom", "Note account bloom: {:?}", address); + let mut bloom = self.account_bloom.lock(); + bloom.set(&*address.sha3()); + } + + pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> Result<(), UtilError> { + assert!(journal.hash_functions <= 255); + batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &vec![journal.hash_functions as u8]); + let mut key = [0u8; 8]; + let mut val = [0u8; 8]; + + for (bloom_part_index, bloom_part_value) in journal.entries { + LittleEndian::write_u64(&mut key, bloom_part_index as u64); + LittleEndian::write_u64(&mut val, bloom_part_value); + batch.put(COL_ACCOUNT_BLOOM, &key, &val); + } + Ok(()) + } + /// Commit all recent insert operations and canonical historical commits' removals from the /// old era to the backing database, reverting any non-canonical historical commit's inserts. pub fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + { + let mut bloom_lock = self.account_bloom.lock(); + try!(Self::commit_bloom(batch, bloom_lock.drain_journal())); + } let records = try!(self.db.commit(batch, now, id, end)); if self.is_canon { self.commit_cache(); @@ -81,6 +150,7 @@ impl StateDB { account_cache: self.account_cache.clone(), cache_overlay: Vec::new(), is_canon: false, + account_bloom: self.account_bloom.clone(), } } @@ -91,6 +161,7 @@ impl StateDB { account_cache: self.account_cache.clone(), cache_overlay: Vec::new(), is_canon: true, + account_bloom: self.account_bloom.clone(), } } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 6504ef8a9..acbf4e641 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -29,6 +29,7 @@ use ethereum; use devtools::*; use miner::Miner; use rlp::{self, RlpStream, Stream}; +use db::COL_STATE; #[cfg(feature = "json-tests")] pub enum ChainEra { @@ -344,7 +345,7 @@ pub fn get_temp_state() -> GuardedTempResult { pub fn get_temp_state_db_in(path: &Path) -> StateDB { let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); - let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, None); + let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, COL_STATE); StateDB::new(journal_db) } diff --git a/parity/migration.rs b/parity/migration.rs index 084ade676..26bb606bc 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -30,7 +30,7 @@ use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 9; +const CURRENT_VERSION: u32 = 10; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. @@ -144,7 +144,8 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig /// Migrations on the consolidated database. fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { - let manager = MigrationManager::new(default_migration_settings(compaction_profile)); + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); + try!(manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)); Ok(manager) } diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index cd2b7fae1..80cfa29b6 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -115,6 +115,12 @@ impl From<::std::io::Error> for Error { } } +impl From for Error { + fn from(e: String) -> Self { + Error::Custom(e) + } +} + /// A generalized migration from the given db to a destination db. pub trait Migration: 'static { /// Number of columns in the database before the migration. @@ -222,10 +228,12 @@ impl Manager { pub fn execute(&mut self, old_path: &Path, version: u32) -> Result { let config = self.config.clone(); let migrations = self.migrations_from(version); + trace!(target: "migration", "Total migrations to execute for version {}: {}", version, migrations.len()); if migrations.is_empty() { return Err(Error::MigrationImpossible) }; - let columns = migrations.iter().find(|m| m.version() == version).and_then(|m| m.pre_columns()); + let columns = migrations.iter().nth(0).and_then(|m| m.pre_columns()); + trace!(target: "migration", "Expecting database to contain {:?} columns", columns); let mut db_config = DatabaseConfig { max_open_files: 64, cache_sizes: Default::default(), From a0a13600efbb5d07513429450b1d641fece3dfb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 3 Oct 2016 15:01:10 +0200 Subject: [PATCH 12/23] Fixing Signer token RPC API --- parity/run.rs | 4 +++- parity/signer.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/parity/run.rs b/parity/run.rs index d108ec53c..ef84d75a2 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -241,7 +241,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let signer_path = cmd.signer_conf.signer_path.clone(); let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { signer_port: cmd.signer_port, - signer_service: Arc::new(rpc_apis::SignerService::new(move || signer::new_token(signer_path.clone()))), + signer_service: Arc::new(rpc_apis::SignerService::new(move || { + signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) + })), client: client.clone(), sync: sync_provider.clone(), net: manage_network.clone(), diff --git a/parity/signer.rs b/parity/signer.rs index b60bc7211..869c7fab5 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -74,7 +74,7 @@ pub fn new_token(path: String) -> Result { .map_err(|err| format!("Error generating token: {:?}", err)) } -fn generate_new_token(path: String) -> io::Result { +pub fn generate_new_token(path: String) -> io::Result { let path = codes_path(path); let mut codes = try!(signer::AuthCodes::from_file(&path)); let code = try!(codes.generate_new()); From 10d572e24f08239c0e2947ca2483e0fc7b3fe70a Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 3 Oct 2016 19:31:50 +0200 Subject: [PATCH 13/23] Fixed FatDB check (#2443) --- parity/params.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/parity/params.rs b/parity/params.rs index df0730b59..a1b28406a 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -253,16 +253,17 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R } pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result { - if algorithm != Algorithm::Archive { - return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); - } - - match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { (false, Switch::On, false) => Err("FatDB resync required".into()), (_, Switch::On, _) => Ok(true), (_, Switch::Off, _) => Ok(false), (_, Switch::Auto, def) => Ok(def), + }; + + if result.clone().unwrap_or(false) && algorithm != Algorithm::Archive { + return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); } + result } #[cfg(test)] From 48bb8900452b39927f401ee44f0e3363dc27ba37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 3 Oct 2016 23:29:46 +0200 Subject: [PATCH 14/23] Better EVM informant & Slow transactions warning (#2436) * EVM informant. Slow transactions tracking * Additional feature for tests --- Cargo.toml | 2 + ethcore/Cargo.toml | 4 +- ethcore/src/block.rs | 118 +++++++++++----- ethcore/src/evm/interpreter/informant.rs | 164 +++++++++++++++++++++++ ethcore/src/evm/interpreter/mod.rs | 52 +++---- ethcore/src/evm/interpreter/stack.rs | 14 +- 6 files changed, 272 insertions(+), 82 deletions(-) create mode 100644 ethcore/src/evm/interpreter/informant.rs diff --git a/Cargo.toml b/Cargo.toml index edcb145af..034b1383b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,6 +70,8 @@ stratum = ["ipc"] ethkey-cli = ["ethcore/ethkey-cli"] ethstore-cli = ["ethcore/ethstore-cli"] evm-debug = ["ethcore/evm-debug"] +evm-debug-tests = ["ethcore/evm-debug-tests"] +slow-blocks = ["ethcore/slow-blocks"] [[bin]] path = "parity/main.rs" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 96dcba3f8..7bda7e567 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -47,7 +47,9 @@ default-features = false [features] jit = ["evmjit"] -evm-debug = [] +evm-debug = ["slow-blocks"] +evm-debug-tests = ["evm-debug"] +slow-blocks = [] # Use SLOW_TX_DURATION="50" (compile time!) to track transactions over 50ms json-tests = [] test-heavy = [] dev = ["clippy"] diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index dc60f9bbe..80c35d1d0 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,14 +16,26 @@ //! Blockchain block. -use common::*; +use std::sync::Arc; +use std::collections::HashSet; + +use rlp::{UntrustedRlp, RlpStream, Encodable, Decodable, Decoder, DecoderError, View, Stream}; +use util::{Bytes, Address, Uint, FixedHash, Hashable, U256, H256, ordered_trie_root, SHA3_NULL_RLP}; +use util::error::{Mismatch, OutOfBounds}; + +use basic_types::{LogBloom, Seal}; +use env_info::{EnvInfo, LastHashes}; use engines::Engine; -use state::*; -use state_db::StateDB; -use verification::PreverifiedBlock; -use trace::FlatTrace; +use error::{Error, BlockError, TransactionError}; use factory::Factories; -use rlp::*; +use header::Header; +use receipt::Receipt; +use state::State; +use state_db::StateDB; +use trace::FlatTrace; +use transaction::SignedTransaction; +use verification::PreverifiedBlock; +use views::BlockView; /// A block, encoded as it is on the block chain. #[derive(Default, Debug, Clone, PartialEq)] @@ -518,25 +530,38 @@ pub fn enact( b.set_uncles_hash(header.uncles_hash().clone()); b.set_transactions_root(header.transactions_root().clone()); b.set_receipts_root(header.receipts_root().clone()); - for t in transactions { try!(b.push_transaction(t.clone(), None)); } - for u in uncles { try!(b.push_uncle(u.clone())); } + + try!(push_transactions(&mut b, transactions)); + for u in uncles { + try!(b.push_uncle(u.clone())); + } Ok(b.close_and_lock()) } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -#[cfg_attr(feature="dev", allow(too_many_arguments))] -pub fn enact_bytes( - block_bytes: &[u8], - engine: &Engine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, -) -> Result { - let block = BlockView::new(block_bytes); - let header = block.header(); - enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, factories) +#[inline(always)] +#[cfg(not(feature = "slow-blocks"))] +fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> { + for t in transactions { + try!(block.push_transaction(t.clone(), None)); + } + Ok(()) +} + +#[cfg(feature = "slow-blocks")] +fn push_transactions(block: &mut OpenBlock, transactions: &[SignedTransaction]) -> Result<(), Error> { + use std::time; + + let slow_tx = option_env!("SLOW_TX_DURATION").and_then(|v| v.parse().ok()).unwrap_or(100); + for t in transactions { + let hash = t.hash(); + let start = time::Instant::now(); + try!(block.push_transaction(t.clone(), None)); + let took = start.elapsed(); + if took > time::Duration::from_millis(slow_tx) { + warn!("Heavy transaction in block {:?}: {:?}", block.header().number(), hash); + } + } + Ok(()) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header @@ -554,26 +579,45 @@ pub fn enact_verified( enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, factories) } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -#[cfg_attr(feature="dev", allow(too_many_arguments))] -pub fn enact_and_seal( - block_bytes: &[u8], - engine: &Engine, - tracing: bool, - db: StateDB, - parent: &Header, - last_hashes: Arc, - factories: Factories, -) -> Result { - let header = BlockView::new(block_bytes).header_view(); - Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)).seal(engine, header.seal()))) -} - #[cfg(test)] mod tests { use tests::helpers::*; use super::*; use common::*; + use engines::Engine; + use factory::Factories; + use state_db::StateDB; + + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header + #[cfg_attr(feature="dev", allow(too_many_arguments))] + fn enact_bytes( + block_bytes: &[u8], + engine: &Engine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let block = BlockView::new(block_bytes); + let header = block.header(); + enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, factories) + } + + /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards + #[cfg_attr(feature="dev", allow(too_many_arguments))] + fn enact_and_seal( + block_bytes: &[u8], + engine: &Engine, + tracing: bool, + db: StateDB, + parent: &Header, + last_hashes: Arc, + factories: Factories, + ) -> Result { + let header = BlockView::new(block_bytes).header_view(); + Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)).seal(engine, header.seal()))) + } #[test] fn open_block() { diff --git a/ethcore/src/evm/interpreter/informant.rs b/ethcore/src/evm/interpreter/informant.rs new file mode 100644 index 000000000..200b01526 --- /dev/null +++ b/ethcore/src/evm/interpreter/informant.rs @@ -0,0 +1,164 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub use self::inner::*; + +#[macro_use] +#[cfg(not(feature = "evm-debug"))] +mod inner { + macro_rules! evm_debug { + ($x: expr) => {} + } + + pub struct EvmInformant; + impl EvmInformant { + pub fn new(_depth: usize) -> Self { + EvmInformant {} + } + pub fn done(&mut self) {} + } +} + +#[macro_use] +#[cfg(feature = "evm-debug")] +mod inner { + use std::iter; + use std::collections::HashMap; + use std::time::{Instant, Duration}; + + use evm::interpreter::stack::Stack; + use evm::instructions::{Instruction, InstructionInfo, INSTRUCTIONS}; + use evm::{CostType}; + + use util::U256; + + macro_rules! evm_debug { + ($x: expr) => { + $x + } + } + + fn print(data: String) { + if cfg!(feature = "evm-debug-tests") { + println!("{}", data); + } else { + debug!(target: "evm", "{}", data); + } + } + + pub struct EvmInformant { + spacing: String, + last_instruction: Instant, + stats: HashMap, + } + + impl EvmInformant { + + fn color(instruction: Instruction, name: &str) -> String { + let c = instruction as usize % 6; + let colors = [31, 34, 33, 32, 35, 36]; + format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) + } + + fn as_micro(duration: &Duration) -> u64 { + let mut sec = duration.as_secs(); + let subsec = duration.subsec_nanos() as u64; + sec = sec.saturating_mul(1_000_000u64); + sec += subsec / 1_000; + sec + } + + pub fn new(depth: usize) -> Self { + EvmInformant { + spacing: iter::repeat(".").take(depth).collect(), + last_instruction: Instant::now(), + stats: HashMap::new(), + } + } + + pub fn before_instruction(&mut self, pc: usize, instruction: Instruction, info: &InstructionInfo, current_gas: &Cost, stack: &Stack) { + let time = self.last_instruction.elapsed(); + self.last_instruction = Instant::now(); + + print(format!("{}[0x{:<3x}][{:>19}(0x{:<2x}) Gas Left: {:6?} (Previous took: {:10}μs)", + &self.spacing, + pc, + Self::color(instruction, info.name), + instruction, + current_gas, + Self::as_micro(&time), + )); + + if info.args > 0 { + for (idx, item) in stack.peek_top(info.args).iter().enumerate() { + print(format!("{} |{:2}: {:?}", self.spacing, idx, item)); + } + } + } + + pub fn after_instruction(&mut self, instruction: Instruction) { + let mut stats = self.stats.entry(instruction).or_insert_with(|| Stats::default()); + let took = self.last_instruction.elapsed(); + stats.note(took); + } + + pub fn done(&mut self) { + // Print out stats + let infos = &*INSTRUCTIONS; + + let mut stats: Vec<(_,_)> = self.stats.drain().collect(); + stats.sort_by(|ref a, ref b| b.1.avg().cmp(&a.1.avg())); + + print(format!("\n{}-------OPCODE STATS:", self.spacing)); + for (instruction, stats) in stats.into_iter() { + let info = infos[instruction as usize]; + print(format!("{}-------{:>19}(0x{:<2x}) count: {:4}, avg: {:10}μs", + self.spacing, + Self::color(instruction, info.name), + instruction, + stats.count, + stats.avg(), + )); + } + } + + } + + struct Stats { + count: u64, + total_duration: Duration, + } + + impl Default for Stats { + fn default() -> Self { + Stats { + count: 0, + total_duration: Duration::from_secs(0), + } + } + } + + impl Stats { + fn note(&mut self, took: Duration) { + self.count += 1; + self.total_duration += took; + } + + fn avg(&self) -> u64 { + EvmInformant::as_micro(&self.total_duration) / self.count + } + } +} diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index 2a6ab8460..fdf99876a 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -16,18 +16,8 @@ //! Rust VM implementation -#[cfg(not(feature = "evm-debug"))] -macro_rules! evm_debug { - ($x: expr) => {} -} - -#[cfg(feature = "evm-debug")] -macro_rules! evm_debug { - ($x: expr) => { - $x - } -} - +#[macro_use] +mod informant; mod gasometer; mod stack; mod memory; @@ -45,13 +35,6 @@ use super::instructions::{self, Instruction, InstructionInfo}; use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType}; use bit_set::BitSet; -#[cfg(feature = "evm-debug")] -fn color(instruction: Instruction, name: &'static str) -> String { - let c = instruction as usize % 6; - let colors = [31, 34, 33, 32, 35, 36]; - format!("\x1B[1;{}m{}\x1B[0m", colors[c], name) -} - type CodePosition = usize; type ProgramCounter = usize; @@ -74,6 +57,15 @@ struct CodeReader<'a> { #[cfg_attr(feature="dev", allow(len_without_is_empty))] impl<'a> CodeReader<'a> { + + /// Create new code reader - starting at position 0. + fn new(code: &'a Bytes) -> Self { + CodeReader { + position: 0, + code: code, + } + } + /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { let pos = self.position; @@ -110,15 +102,14 @@ impl evm::Evm for Interpreter { fn exec(&mut self, params: ActionParams, ext: &mut evm::Ext) -> evm::Result { self.mem.clear(); + let mut informant = informant::EvmInformant::new(ext.depth()); + let code = ¶ms.code.as_ref().unwrap(); let valid_jump_destinations = self.cache.jump_destinations(¶ms.code_hash, code); let mut gasometer = Gasometer::::new(try!(Cost::from_u256(params.gas))); let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); - let mut reader = CodeReader { - position: 0, - code: code - }; + let mut reader = CodeReader::new(code); let infos = &*instructions::INSTRUCTIONS; while reader.position < code.len() { @@ -138,15 +129,7 @@ impl evm::Evm for Interpreter { gasometer.current_mem_gas = mem_gas; gasometer.current_gas = gasometer.current_gas - gas_cost; - evm_debug!({ - println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}", - reader.position, - color(instruction, info.name), - instruction, - gas_cost, - gasometer.current_gas + gas_cost - ); - }); + evm_debug!({ informant.before_instruction(reader.position, instruction, &info, &gasometer.current_gas, &stack) }); let (mem_written, store_written) = match trace_executed { true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)), @@ -158,6 +141,8 @@ impl evm::Evm for Interpreter { gasometer.current_gas, ¶ms, ext, instruction, &mut reader, &mut stack )); + evm_debug!({ informant.after_instruction(instruction) }); + if trace_executed { ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written); } @@ -179,12 +164,13 @@ impl evm::Evm for Interpreter { reader.position = pos; }, InstructionResult::StopExecutionNeedsReturn(gas, off, size) => { + informant.done(); return Ok(GasLeft::NeedsReturn(gas.as_u256(), self.mem.read_slice(off, size))); }, InstructionResult::StopExecution => break, } } - + informant.done(); Ok(GasLeft::Known(gasometer.current_gas.as_u256())) } } diff --git a/ethcore/src/evm/interpreter/stack.rs b/ethcore/src/evm/interpreter/stack.rs index 98adf8539..0d7ef4dbb 100644 --- a/ethcore/src/evm/interpreter/stack.rs +++ b/ethcore/src/evm/interpreter/stack.rs @@ -34,7 +34,7 @@ pub trait Stack { /// Get number of elements on Stack fn size(&self) -> usize; /// Returns all data on stack. - fn peek_top(&mut self, no_of_elems: usize) -> &[T]; + fn peek_top(&self, no_of_elems: usize) -> &[T]; } pub struct VecStack { @@ -68,12 +68,7 @@ impl Stack for VecStack { fn pop_back(&mut self) -> S { let val = self.stack.pop(); match val { - Some(x) => { - evm_debug!({ - println!(" POP: {}", x) - }); - x - }, + Some(x) => x, None => panic!("Tried to pop from empty stack.") } } @@ -88,9 +83,6 @@ impl Stack for VecStack { } fn push(&mut self, elem: S) { - evm_debug!({ - println!(" PUSH: {}", elem) - }); self.stack.push(elem); } @@ -98,7 +90,7 @@ impl Stack for VecStack { self.stack.len() } - fn peek_top(&mut self, no_from_top: usize) -> &[S] { + fn peek_top(&self, no_from_top: usize) -> &[S] { assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist."); &self.stack[self.stack.len() - no_from_top .. self.stack.len()] } From cb0e0abc4a89429148f0f58f124ef6690cbb7e81 Mon Sep 17 00:00:00 2001 From: svyatonik Date: Tue, 4 Oct 2016 11:44:47 +0300 Subject: [PATCH 15/23] closes ethcore/parity#1918 --- parity/account.rs | 24 +++++++++++++++++++++++- parity/configuration.rs | 11 +++++++++-- parity/params.rs | 2 -- parity/run.rs | 22 ++-------------------- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/parity/account.rs b/parity/account.rs index 26a974090..72a932315 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethcore::ethstore::{EthStore, import_accounts}; +use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; @@ -24,6 +24,7 @@ pub enum AccountCmd { New(NewAccount), List(String), Import(ImportAccounts), + ImportFromGeth(ImportFromGethAccounts) } #[derive(Debug, PartialEq)] @@ -39,11 +40,18 @@ pub struct ImportAccounts { pub to: String, } +#[derive(Debug, PartialEq)] +pub struct ImportFromGethAccounts { + pub testnet: bool, + pub to: String, +} + pub fn execute(cmd: AccountCmd) -> Result { match cmd { AccountCmd::New(new_cmd) => new(new_cmd), AccountCmd::List(path) => list(path), AccountCmd::Import(import_cmd) => import(import_cmd), + AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd) } } @@ -86,3 +94,17 @@ fn import(i: ImportAccounts) -> Result { } Ok(format!("{}", imported)) } + +fn import_geth(i: ImportFromGethAccounts) -> Result { + use std::io::ErrorKind; + use ethcore::ethstore::Error; + + let dir = Box::new(try!(keys_dir(i.to))); + let secret_store = Box::new(EthStore::open(dir).unwrap()); + let geth_accounts = read_geth_accounts(i.testnet); + match secret_store.import_geth_accounts(geth_accounts, i.testnet) { + Ok(v) => Ok(format!("Successfully imported {} account(s) from geth.", v.len())), + Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => Err("Failed to find geth keys folder.".into()), + Err(err) => Err(format!("Import geth accounts failed. {}", err)) + } +} diff --git a/parity/configuration.rs b/parity/configuration.rs index 8eb617d2b..811ba6097 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -39,7 +39,7 @@ use signer::Configuration as SignerConfiguration; use run::RunCmd; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; use presale::ImportWallet; -use account::{AccountCmd, NewAccount, ImportAccounts}; +use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; #[derive(Debug, PartialEq)] @@ -120,6 +120,14 @@ impl Configuration { unreachable!(); }; Cmd::Account(account_cmd) + } else if self.args.flag_import_geth_keys { + let account_cmd = AccountCmd::ImportFromGeth( + ImportFromGethAccounts { + to: dirs.keys, + testnet: self.args.flag_testnet + } + ); + Cmd::Account(account_cmd) } else if self.args.cmd_wallet { let presale_cmd = ImportWallet { iterations: self.args.flag_keys_iterations, @@ -319,7 +327,6 @@ impl Configuration { fn accounts_config(&self) -> Result { let cfg = AccountsConfig { iterations: self.args.flag_keys_iterations, - import_keys: self.args.flag_import_geth_keys, testnet: self.args.flag_testnet, password_files: self.args.flag_password.clone(), unlocked_accounts: try!(to_addresses(&self.args.flag_unlock)), diff --git a/parity/params.rs b/parity/params.rs index a1b28406a..faba029b2 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -142,7 +142,6 @@ impl str::FromStr for ResealPolicy { #[derive(Debug, PartialEq)] pub struct AccountsConfig { pub iterations: u32, - pub import_keys: bool, pub testnet: bool, pub password_files: Vec, pub unlocked_accounts: Vec
, @@ -152,7 +151,6 @@ impl Default for AccountsConfig { fn default() -> Self { AccountsConfig { iterations: 10240, - import_keys: false, testnet: false, password_files: Vec::new(), unlocked_accounts: Vec::new(), diff --git a/parity/run.rs b/parity/run.rs index ef84d75a2..528b207c6 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -15,7 +15,6 @@ // along with Parity. If not, see . use std::sync::{Arc, Mutex, Condvar}; -use std::io::ErrorKind; use ctrlc::CtrlC; use fdlimit::raise_fd_limit; use ethcore_logger::{Config as LogConfig, setup_log}; @@ -360,28 +359,11 @@ fn daemonize(_pid_file: String) -> Result<(), String> { } fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig) -> Result { - use ethcore::ethstore::{import_accounts, EthStore}; - use ethcore::ethstore::dir::{GethDirectory, DirectoryType, DiskDirectory}; - use ethcore::ethstore::Error; + use ethcore::ethstore::EthStore; + use ethcore::ethstore::dir::DiskDirectory; let passwords = try!(passwords_from_files(cfg.password_files)); - if cfg.import_keys { - let t = if cfg.testnet { - DirectoryType::Testnet - } else { - DirectoryType::Main - }; - - let from = GethDirectory::open(t); - let to = try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e))); - match import_accounts(&from, &to) { - Ok(_) => {} - Err(Error::Io(ref io_err)) if io_err.kind() == ErrorKind::NotFound => {} - Err(err) => warn!("Import geth accounts failed. {}", err) - } - } - let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e)))); let account_service = AccountProvider::new(Box::new( try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))) From abc5db0f80b84e8c20d4649ce475f6b601a00c42 Mon Sep 17 00:00:00 2001 From: svyatonik Date: Tue, 4 Oct 2016 16:01:39 +0300 Subject: [PATCH 16/23] Remove redundant Box-ing. --- parity/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/account.rs b/parity/account.rs index 72a932315..6a05e945d 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -100,7 +100,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result { use ethcore::ethstore::Error; let dir = Box::new(try!(keys_dir(i.to))); - let secret_store = Box::new(EthStore::open(dir).unwrap()); + let secret_store = EthStore::open(dir).unwrap(); let geth_accounts = read_geth_accounts(i.testnet); match secret_store.import_geth_accounts(geth_accounts, i.testnet) { Ok(v) => Ok(format!("Successfully imported {} account(s) from geth.", v.len())), From 6e477951baa72f5a0c308d2bf59a814e7b54e2a7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 4 Oct 2016 17:32:26 +0200 Subject: [PATCH 17/23] update rustc for appveyor to 1.12.0 (#2423) * update rustc for appveyor to 1.12.0 * turn off MIR for windows builds --- appveyor.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 3ffaa961e..3f6bb85ef 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,6 +6,7 @@ environment: certpass: secure: 0BgXJqxq9Ei34/hZ7121FQ== keyfile: C:\users\appveyor\Certificates.p12 + RUSTFLAGS: -Zorbit=off branches: only: @@ -18,10 +19,10 @@ branches: install: - git submodule update --init --recursive - ps: Install-Product node 6 - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.10.0-x86_64-pc-windows-msvc.exe" + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-1.12.0-x86_64-pc-windows-msvc.exe" - ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -FileName nsis\SimpleFC.dll - ps: Start-FileDownload "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -FileName nsis\vc_redist.x64.exe - - rust-1.10.0-x86_64-pc-windows-msvc.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - rust-1.12.0-x86_64-pc-windows-msvc.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin;C:\Program Files (x86)\NSIS;C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Bin - rustc -V - cargo -V From 2b147616fdf0b47136afe09ff836a26c86eeac48 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 4 Oct 2016 19:20:16 +0300 Subject: [PATCH 18/23] derive -> ipc --- Cargo.lock | 16 ---------------- Cargo.toml | 9 +-------- db/src/database.rs | 2 +- ethcore/src/client/chain_notify.rs | 2 +- ethcore/src/client/traits.rs | 1 - ethcore/src/snapshot/snapshot_service_trait.rs | 1 - ipc/codegen/src/codegen.rs | 4 ++-- ipc/codegen/src/lib.rs | 4 ++-- ipc/hypervisor/src/service.rs.in | 4 ++-- ipc/tests/nested.rs.in | 4 ++-- ipc/tests/service.rs.in | 2 +- ipc/tests/with_attrs.rs.in | 1 - stratum/src/traits.rs | 2 -- sync/Cargo.toml | 3 ++- sync/src/api.rs | 2 -- 15 files changed, 14 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb91080c2..821933d2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -276,7 +276,6 @@ dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", @@ -290,7 +289,6 @@ dependencies = [ "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", - "evmjit 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -324,7 +322,6 @@ version = "0.1.0" name = "ethcore-dapps" version = "1.4.0" dependencies = [ - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", @@ -466,7 +463,6 @@ dependencies = [ name = "ethcore-rpc" version = "1.4.0" dependencies = [ - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", "ethcore 1.4.0", "ethcore-devtools 1.4.0", @@ -496,7 +492,6 @@ dependencies = [ name = "ethcore-signer" version = "1.4.0" dependencies = [ - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", @@ -534,7 +529,6 @@ version = "1.4.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", @@ -588,7 +582,6 @@ dependencies = [ name = "ethkey" version = "0.2.0" dependencies = [ - "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "ethcore-bigint 0.1.0", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -601,7 +594,6 @@ dependencies = [ name = "ethstore" version = "0.1.0" dependencies = [ - "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -621,7 +613,6 @@ dependencies = [ name = "ethsync" version = "1.4.0" dependencies = [ - "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.4.0", "ethcore-io 1.4.0", @@ -639,13 +630,6 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "evmjit" -version = "1.4.0" -dependencies = [ - "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "fdlimit" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 034b1383b..da7e252f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,14 +62,7 @@ default = ["ui", "use-precompiled-js", "ipc"] ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] -ipc = ["ethcore/ipc"] -jit = ["ethcore/jit"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] -json-tests = ["ethcore/json-tests"] -stratum = ["ipc"] -ethkey-cli = ["ethcore/ethkey-cli"] -ethstore-cli = ["ethcore/ethstore-cli"] -evm-debug = ["ethcore/evm-debug"] +ipc = ["ethcore/ipc", "ethsync/ipc"] evm-debug-tests = ["ethcore/evm-debug-tests"] slow-blocks = ["ethcore/slow-blocks"] diff --git a/db/src/database.rs b/db/src/database.rs index 9a52822f6..e1774159b 100644 --- a/db/src/database.rs +++ b/db/src/database.rs @@ -157,7 +157,7 @@ impl Drop for Database { } } -#[derive(Ipc)] +#[ipc] impl DatabaseService for Database { fn open(&self, config: DatabaseConfig, path: String) -> Result<(), Error> { let mut db = self.db.write(); diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index 0c34382a0..e0282d460 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -18,7 +18,7 @@ use ipc::IpcConfig; use util::H256; /// Represents what has to be handled by actor listening to chain events -#[derive(Ipc)] +#[ipc] pub trait ChainNotify : Send + Sync { /// fires when chain has new blocks. fn new_blocks(&self, diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index c2af744dd..5f7b62ee2 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -38,7 +38,6 @@ use ipc::IpcConfig; use types::blockchain_info::BlockChainInfo; use types::block_status::BlockStatus; -#[derive(Ipc)] #[ipc(client_ident="RemoteClient")] /// Blockchain database client. Owns and manages a blockchain and a block queue. pub trait BlockChainClient : Sync + Send { diff --git a/ethcore/src/snapshot/snapshot_service_trait.rs b/ethcore/src/snapshot/snapshot_service_trait.rs index 7df90c943..65448090f 100644 --- a/ethcore/src/snapshot/snapshot_service_trait.rs +++ b/ethcore/src/snapshot/snapshot_service_trait.rs @@ -22,7 +22,6 @@ use ipc::IpcConfig; /// This handles: /// - restoration of snapshots to temporary databases. /// - responding to queries for snapshot manifests and chunks -#[derive(Ipc)] #[ipc(client_ident="RemoteSnapshotService")] pub trait SnapshotService : Sync + Send { /// Query the most recent manifest data. diff --git a/ipc/codegen/src/codegen.rs b/ipc/codegen/src/codegen.rs index 89bd9548c..9c7b33be8 100644 --- a/ipc/codegen/src/codegen.rs +++ b/ipc/codegen/src/codegen.rs @@ -49,7 +49,7 @@ pub fn expand_ipc_implementation( let item = match *annotatable { Annotatable::Item(ref item) => item, _ => { - cx.span_err(meta_item.span, "`#[derive(Ipc)]` may only be applied to struct implementations"); + cx.span_err(meta_item.span, "`#[ipc]` may only be applied to struct implementations"); return; }, }; @@ -832,7 +832,7 @@ fn implement_interface( _ => { cx.span_err( item.span, - "`#[derive(Ipc)]` may only be applied to implementations and traits"); + "`#[ipc]` may only be applied to implementations and traits"); return Err(Error); }, }; diff --git a/ipc/codegen/src/lib.rs b/ipc/codegen/src/lib.rs index ce1ca8592..94959b058 100644 --- a/ipc/codegen/src/lib.rs +++ b/ipc/codegen/src/lib.rs @@ -83,7 +83,7 @@ pub fn register(reg: &mut syntex::Registry) { reg.add_attr("feature(custom_derive)"); reg.add_attr("feature(custom_attribute)"); - reg.add_decorator("derive_Ipc", codegen::expand_ipc_implementation); + reg.add_decorator("ipc", codegen::expand_ipc_implementation); reg.add_decorator("derive_Binary", serialization::expand_serialization_implementation); reg.add_post_expansion_pass(strip_attributes); @@ -92,7 +92,7 @@ pub fn register(reg: &mut syntex::Registry) { #[cfg(not(feature = "with-syntex"))] pub fn register(reg: &mut rustc_plugin::Registry) { reg.register_syntax_extension( - syntax::parse::token::intern("derive_Ipc"), + syntax::parse::token::intern("ipc"), syntax::ext::base::MultiDecorator( Box::new(codegen::expand_ipc_implementation))); reg.register_syntax_extension( diff --git a/ipc/hypervisor/src/service.rs.in b/ipc/hypervisor/src/service.rs.in index 74d289f50..6996765ec 100644 --- a/ipc/hypervisor/src/service.rs.in +++ b/ipc/hypervisor/src/service.rs.in @@ -40,12 +40,12 @@ pub struct ModuleState { } -#[derive(Ipc)] +#[ipc] pub trait ControlService { fn shutdown(&self) -> bool; } -#[derive(Ipc)] +#[ipc] impl HypervisorService { // return type for making method synchronous fn module_ready(&self, module_id: u64, control_url: String) -> bool { diff --git a/ipc/tests/nested.rs.in b/ipc/tests/nested.rs.in index 4f0ac4a8a..df0c9bde3 100644 --- a/ipc/tests/nested.rs.in +++ b/ipc/tests/nested.rs.in @@ -33,7 +33,7 @@ impl IpcConfig for DBWriter {} #[derive(Binary)] pub enum DBError { Write, Read } -#[derive(Ipc)] +#[ipc] impl DBWriter for DB { fn write(&self, data: Vec) -> Result<(), DBError> { let mut writes = self.writes.write().unwrap(); @@ -48,7 +48,7 @@ impl DBWriter for DB { } } -#[derive(Ipc)] +#[ipc] trait DBNotify { fn notify(&self, a: u64, b: u64) -> bool; } diff --git a/ipc/tests/service.rs.in b/ipc/tests/service.rs.in index 9c221d481..cd9a5a6b2 100644 --- a/ipc/tests/service.rs.in +++ b/ipc/tests/service.rs.in @@ -28,7 +28,7 @@ pub struct CustomData { pub b: u64, } -#[derive(Ipc)] +#[ipc] impl Service { fn commit(&self, f: u32) -> u32 { let mut lock = self.commits.write().unwrap(); diff --git a/ipc/tests/with_attrs.rs.in b/ipc/tests/with_attrs.rs.in index bbf5b894a..f65627fce 100644 --- a/ipc/tests/with_attrs.rs.in +++ b/ipc/tests/with_attrs.rs.in @@ -18,7 +18,6 @@ use ipc::IpcConfig; pub struct BadlyNamedService; -#[derive(Ipc)] #[ipc(client_ident="PrettyNamedClient")] impl BadlyNamedService { fn is_zero(&self, x: u64) -> bool { diff --git a/stratum/src/traits.rs b/stratum/src/traits.rs index 339f753b5..5e93a9484 100644 --- a/stratum/src/traits.rs +++ b/stratum/src/traits.rs @@ -32,7 +32,6 @@ impl From for Error { } } -#[derive(Ipc)] #[ipc(client_ident="RemoteJobDispatcher")] /// Interface that can provide pow/blockchain-specific responses for the clients pub trait JobDispatcher: Send + Sync { @@ -44,7 +43,6 @@ pub trait JobDispatcher: Send + Sync { fn job(&self, _worker_id: String) -> Option { None } } -#[derive(Ipc)] #[ipc(client_ident="RemoteWorkHandler")] /// Interface that can handle requests to push job for workers pub trait PushWorkHandler: Send + Sync { diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 99c522075..3b5fc3c9c 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -29,5 +29,6 @@ ethcore-ipc-nano = { path = "../ipc/nano" } parking_lot = "0.2.6" [features] -default = [] +default = ["ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev"] +ipc = [] diff --git a/sync/src/api.rs b/sync/src/api.rs index 961a70cd3..c09157e3b 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -88,7 +88,6 @@ impl EthSync { } } -#[derive(Ipc)] #[ipc(client_ident="SyncClient")] impl SyncProvider for EthSync { /// Get sync status @@ -184,7 +183,6 @@ pub trait ManageNetwork : Send + Sync { } -#[derive(Ipc)] #[ipc(client_ident="NetworkManagerClient")] impl ManageNetwork for EthSync { fn accept_unreserved_peers(&self) { From 0002bfadabefe68eb3334a5b9b39014b8549001e Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 4 Oct 2016 19:22:26 +0300 Subject: [PATCH 19/23] accident repair --- Cargo.lock | 16 ++++++++++++++++ Cargo.toml | 7 +++++++ 2 files changed, 23 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 821933d2e..bb91080c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -276,6 +276,7 @@ dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", @@ -289,6 +290,7 @@ dependencies = [ "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", + "evmjit 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -322,6 +324,7 @@ version = "0.1.0" name = "ethcore-dapps" version = "1.4.0" dependencies = [ + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", @@ -463,6 +466,7 @@ dependencies = [ name = "ethcore-rpc" version = "1.4.0" dependencies = [ + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", "ethcore 1.4.0", "ethcore-devtools 1.4.0", @@ -492,6 +496,7 @@ dependencies = [ name = "ethcore-signer" version = "1.4.0" dependencies = [ + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", @@ -529,6 +534,7 @@ version = "1.4.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", @@ -582,6 +588,7 @@ dependencies = [ name = "ethkey" version = "0.2.0" dependencies = [ + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "ethcore-bigint 0.1.0", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -594,6 +601,7 @@ dependencies = [ name = "ethstore" version = "0.1.0" dependencies = [ + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -613,6 +621,7 @@ dependencies = [ name = "ethsync" version = "1.4.0" dependencies = [ + "clippy 0.0.90 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.4.0", "ethcore-io 1.4.0", @@ -630,6 +639,13 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "evmjit" +version = "1.4.0" +dependencies = [ + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "fdlimit" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index da7e252f4..38003e7a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,13 @@ ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] ipc = ["ethcore/ipc", "ethsync/ipc"] +jit = ["ethcore/jit"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] +json-tests = ["ethcore/json-tests"] +stratum = ["ipc"] +ethkey-cli = ["ethcore/ethkey-cli"] +ethstore-cli = ["ethcore/ethstore-cli"] +evm-debug = ["ethcore/evm-debug"] evm-debug-tests = ["ethcore/evm-debug-tests"] slow-blocks = ["ethcore/slow-blocks"] From b7814fa65c7a5ae681cbe6866c9f6c0d3d861e4e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 4 Oct 2016 19:05:46 +0200 Subject: [PATCH 20/23] Port a couple more RPC APIs to the new auto args (#2325) * add auto-args deserialization for RPC * make block param member public * change BlockParam to a more generic Trailing mechanism * define work type * build_rpc_trait macro, implement eth protocol * fix up tests * move eth_filter API to new macro * port ethcore module to new rpc macro * port ethcore_set to auto_args * port net RPC to auto_args * port rpc meta api to new * skeleton for async RPC auto_args * macro implementations for strongly-typed async RPC wrapper * clarify docs * reflect new required Rust version in README [ci skip] --- README.md | 14 +- rpc/src/v1/helpers/auto_args.rs | 159 ++++++++++++++++++++-- rpc/src/v1/impls/ethcore.rs | 176 ++++++++++++------------- rpc/src/v1/impls/ethcore_set.rs | 112 +++++++--------- rpc/src/v1/impls/net.rs | 20 ++- rpc/src/v1/impls/rpc.rs | 19 ++- rpc/src/v1/tests/mocked/ethcore_set.rs | 2 +- rpc/src/v1/traits/eth.rs | 83 ++++++------ rpc/src/v1/traits/ethcore.rs | 164 +++++++++++------------ rpc/src/v1/traits/ethcore_set.rs | 94 +++++++------ rpc/src/v1/traits/net.rs | 35 +++-- rpc/src/v1/traits/rpc.rs | 31 ++--- rpc/src/v1/types/mod.rs.in | 2 + rpc/src/v1/types/rpc_settings.rs | 28 ++++ 14 files changed, 534 insertions(+), 405 deletions(-) create mode 100644 rpc/src/v1/types/rpc_settings.rs diff --git a/README.md b/README.md index d5fb5f044..a6c987a69 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ [Internal Documentation][doc-url] + Be sure to check out [our wiki][wiki-url] for more information. [travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master @@ -18,8 +19,11 @@ Be sure to check out [our wiki][wiki-url] for more information. [doc-url]: https://ethcore.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/ethcore/parity/wiki +**Requires Rust version 1.12.0 to build** + ---- + ## About Parity Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and @@ -96,9 +100,9 @@ and Parity will begin syncing the Ethereum blockchain. ### Using systemd service file To start Parity as a regular user using systemd init: -1. Copy ```parity/scripts/parity.service``` to your -systemd user directory (usually ```~/.config/systemd/user```). -2. To pass any argument to Parity, write a ```~/.parity/parity.conf``` file this way: -```ARGS="ARG1 ARG2 ARG3"```. +1. Copy `parity/scripts/parity.service` to your +systemd user directory (usually `~/.config/systemd/user`). +2. To pass any argument to Parity, write a `~/.parity/parity.conf` file this way: +`ARGS="ARG1 ARG2 ARG3"`. - Example: ```ARGS="ui --geth --identity MyMachine"```. + Example: `ARGS="ui --geth --identity MyMachine"`. diff --git a/rpc/src/v1/helpers/auto_args.rs b/rpc/src/v1/helpers/auto_args.rs index c7deb0436..ce1e6854a 100644 --- a/rpc/src/v1/helpers/auto_args.rs +++ b/rpc/src/v1/helpers/auto_args.rs @@ -31,16 +31,32 @@ use serde::{Serialize, Deserialize}; /// function `to_delegate` which will automatically wrap each strongly-typed /// function in a wrapper which handles parameter and output type serialization. /// -/// Every function must have a `#[name("rpc_nameHere")]` attribute after -/// its documentation, and no other attributes. All function names are -/// allowed except for `to_delegate`, which is auto-generated. +/// RPC functions may come in a couple forms: async and synchronous. +/// These are parsed with the custom `#[rpc]` attribute, which must follow +/// documentation. +/// +/// ## The #[rpc] attribute +/// +/// Valid forms: +/// - `#[rpc(name = "name_here")]` (a synchronous rpc function which should be bound to the given name) +/// - `#[rpc(async, name = "name_here")]` (an async rpc function which should be bound to the given name) +/// +/// Synchronous function format: +/// `fn foo(&self, Param1, Param2, Param3) -> Out`. +/// +/// Asynchronous RPC functions must come in this form: +/// `fn foo(&self, Param1, Param2, Param3, Ready); +/// +/// Anything else will be rejected by the code generator. macro_rules! build_rpc_trait { + // entry-point. todo: make another for traits w/ bounds. ( $(#[$t_attr: meta])* pub trait $name: ident { $( - $(#[doc=$m_doc: expr])* #[name($rpc_name: expr)] - fn $method: ident (&self $(, $param: ty)*) -> $out: ty; + $( #[doc=$m_doc:expr] )* + #[ rpc( $($t:tt)* ) ] + fn $m_name: ident ( $($p: tt)* ) $( -> Result<$out: ty, Error> )* ; )* } ) => { @@ -48,7 +64,7 @@ macro_rules! build_rpc_trait { pub trait $name: Sized + Send + Sync + 'static { $( $(#[doc=$m_doc])* - fn $method(&self $(, $param)*) -> $out; + fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* ; )* /// Transform this into an `IoDelegate`, automatically wrapping @@ -56,14 +72,33 @@ macro_rules! build_rpc_trait { fn to_delegate(self) -> ::jsonrpc_core::IoDelegate { let mut del = ::jsonrpc_core::IoDelegate::new(self.into()); $( - del.add_method($rpc_name, move |base, params| { - ($name::$method as fn(&_ $(, $param)*) -> $out).wrap_rpc(base, params) - }); + build_rpc_trait!(WRAP del => + ( $($t)* ) + fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* + ); )* del } } - } + }; + + ( WRAP $del: expr => + (name = $name: expr) + fn $method: ident (&self $(, $param: ty)*) -> Result<$out: ty, Error> + ) => { + $del.add_method($name, move |base, params| { + (Self::$method as fn(&_ $(, $param)*) -> Result<$out, Error>).wrap_rpc(base, params) + }) + }; + + ( WRAP $del: expr => + (async, name = $name: expr) + fn $method: ident (&self, Ready<$out: ty> $(, $param: ty)*) + ) => { + $del.add_async_method($name, move |base, params, ready| { + (Self::$method as fn(&_, Ready<$out> $(, $param)*)).wrap_rpc(base, params, ready) + }) + }; } /// A wrapper type without an implementation of `Deserialize` @@ -71,11 +106,35 @@ macro_rules! build_rpc_trait { /// that take a trailing default parameter. pub struct Trailing(pub T); +/// A wrapper type for `jsonrpc_core`'s weakly-typed `Ready` struct. +pub struct Ready { + inner: ::jsonrpc_core::Ready, + _marker: ::std::marker::PhantomData, +} + +impl From<::jsonrpc_core::Ready> for Ready { + fn from(ready: ::jsonrpc_core::Ready) -> Self { + Ready { inner: ready, _marker: ::std::marker::PhantomData } + } +} + +impl Ready { + /// Respond withthe asynchronous result. + pub fn ready(self, result: Result) { + self.inner.ready(result.map(to_value)) + } +} + /// Wrapper trait for synchronous RPC functions. pub trait Wrap { fn wrap_rpc(&self, base: &B, params: Params) -> Result; } +/// Wrapper trait for asynchronous RPC functions. +pub trait WrapAsync { + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready); +} + // special impl for no parameters. impl Wrap for fn(&B) -> Result where B: Send + Sync + 'static, OUT: Serialize @@ -87,10 +146,23 @@ impl Wrap for fn(&B) -> Result } } +impl WrapAsync for fn(&B, Ready) + where B: Send + Sync + 'static, OUT: Serialize +{ + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { + match ::v1::helpers::params::expect_no_params(params) { + Ok(()) => (self)(base, ready.into()), + Err(e) => ready.ready(Err(e)), + } + } +} + // creates a wrapper implementation which deserializes the parameters, // calls the function with concrete type, and serializes the output. macro_rules! wrap { ($($x: ident),+) => { + + // synchronous implementation impl < BASE: Send + Sync + 'static, OUT: Serialize, @@ -102,6 +174,20 @@ macro_rules! wrap { }).map(to_value) } } + + // asynchronous implementation + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + > WrapAsync for fn(&BASE, Ready, $($x,)+ ) { + fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { + match from_params::<($($x,)+)>(params) { + Ok(($($x,)+)) => (self)(base, ready.into(), $($x,)+), + Err(e) => ready.ready(Err(e)), + } + } + } } } @@ -126,10 +212,34 @@ impl Wrap for fn(&B, Trailing) -> Result } } +impl WrapAsync for fn(&B, Ready, Trailing) + where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize +{ + fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), + }; + + let id = match len { + 0 => Ok((T::default(),)), + 1 => from_params::<(T,)>(params), + _ => Err(Error::invalid_params()), + }; + + match id { + Ok((id,)) => (self)(base, ready.into(), Trailing(id)), + Err(e) => ready.ready(Err(e)), + } + } +} + // similar to `wrap!`, but handles a single default trailing parameter // accepts an additional argument indicating the number of non-trailing parameters. macro_rules! wrap_with_trailing { ($num: expr, $($x: ident),+) => { + // synchronous implementation impl < BASE: Send + Sync + 'static, OUT: Serialize, @@ -155,6 +265,35 @@ macro_rules! wrap_with_trailing { (self)(base, $($x,)+ Trailing(id)).map(to_value) } } + + // asynchronous implementation + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + TRAILING: Default + Deserialize, + > WrapAsync for fn(&BASE, Ready, $($x,)+ Trailing) { + fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), + }; + + let params = match len - $num { + 0 => from_params::<($($x,)+)>(params) + .map(|($($x,)+)| ($($x,)+ TRAILING::default())), + 1 => from_params::<($($x,)+ TRAILING)>(params) + .map(|($($x,)+ id)| ($($x,)+ id)), + _ => Err(Error::invalid_params()), + }; + + match params { + Ok(($($x,)+ id)) => (self)(base, ready.into(), $($x,)+ Trailing(id)), + Err(e) => ready.ready(Err(e)) + } + } + } } } diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index 56f9f6fc4..a63d33f52 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -18,7 +18,7 @@ use std::{fs, io}; use std::sync::{mpsc, Arc, Weak}; use std::str::FromStr; -use std::collections::{BTreeMap}; + use util::{RotatingLogger, Address, Mutex, sha3}; use util::misc::version_data; @@ -31,11 +31,11 @@ use ethcore::miner::MinerService; use ethcore::client::{MiningBlockChainClient}; use ethcore::ids::BlockID; -use jsonrpc_core::{from_params, to_value, Value, Error, Params, Ready}; +use jsonrpc_core::Error; use v1::traits::Ethcore; -use v1::types::{Bytes, U256, H160, H256, H512, Peers, Transaction}; +use v1::types::{Bytes, U256, H160, H256, H512, Peers, Transaction, RpcSettings}; use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; -use v1::helpers::params::expect_no_params; +use v1::helpers::auto_args::Ready; /// Ethcore implementation. pub struct EthcoreClient where @@ -113,180 +113,168 @@ impl Ethcore for EthcoreClient where S: SyncProvider + 'static, F: Fetch + 'static { - fn transactions_limit(&self, params: Params) -> Result { + fn transactions_limit(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&take_weak!(self.miner).transactions_limit())) + + Ok(take_weak!(self.miner).transactions_limit()) } - fn min_gas_price(&self, params: Params) -> Result { + fn min_gas_price(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).minimal_gas_price()))) + + Ok(U256::from(take_weak!(self.miner).minimal_gas_price())) } - fn extra_data(&self, params: Params) -> Result { + fn extra_data(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&Bytes::new(take_weak!(self.miner).extra_data()))) + + Ok(Bytes::new(take_weak!(self.miner).extra_data())) } - fn gas_floor_target(&self, params: Params) -> Result { + fn gas_floor_target(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).gas_floor_target()))) + + Ok(U256::from(take_weak!(self.miner).gas_floor_target())) } - fn gas_ceil_target(&self, params: Params) -> Result { + fn gas_ceil_target(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&U256::from(take_weak!(self.miner).gas_ceil_target()))) + + Ok(U256::from(take_weak!(self.miner).gas_ceil_target())) } - fn dev_logs(&self, params: Params) -> Result { + fn dev_logs(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); + let logs = self.logger.logs(); - Ok(to_value(&logs.as_slice())) + Ok(logs.as_slice().to_owned()) } - fn dev_logs_levels(&self, params: Params) -> Result { + fn dev_logs_levels(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.logger.levels())) + + Ok(self.logger.levels().to_owned()) } - fn net_chain(&self, params: Params) -> Result { + fn net_chain(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.chain)) + + Ok(self.settings.chain.clone()) } - fn net_peers(&self, params: Params) -> Result { + fn net_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let sync_status = take_weak!(self.sync).status(); let net_config = take_weak!(self.net).network_config(); - Ok(to_value(&Peers { + Ok(Peers { active: sync_status.num_active_peers, connected: sync_status.num_peers, max: sync_status.current_max_peers(net_config.min_peers, net_config.max_peers), - })) + }) } - fn net_port(&self, params: Params) -> Result { + fn net_port(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.network_port)) + + Ok(self.settings.network_port) } - fn node_name(&self, params: Params) -> Result { + fn node_name(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&self.settings.name)) + + Ok(self.settings.name.clone()) } - fn registry_address(&self, params: Params) -> Result { + fn registry_address(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); - let r = take_weak!(self.client) - .additional_params() - .get("registrar") - .and_then(|s| Address::from_str(s).ok()) - .map(|s| H160::from(s)); - Ok(to_value(&r)) + + Ok( + take_weak!(self.client) + .additional_params() + .get("registrar") + .and_then(|s| Address::from_str(s).ok()) + .map(|s| H160::from(s)) + ) } - fn rpc_settings(&self, params: Params) -> Result { + fn rpc_settings(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - let mut map = BTreeMap::new(); - map.insert("enabled".to_owned(), Value::Bool(self.settings.rpc_enabled)); - map.insert("interface".to_owned(), Value::String(self.settings.rpc_interface.clone())); - map.insert("port".to_owned(), Value::U64(self.settings.rpc_port as u64)); - Ok(Value::Object(map)) + Ok(RpcSettings { + enabled: self.settings.rpc_enabled, + interface: self.settings.rpc_interface.clone(), + port: self.settings.rpc_port as u64, + }) } - fn default_extra_data(&self, params: Params) -> Result { + fn default_extra_data(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&Bytes::new(version_data()))) + + Ok(Bytes::new(version_data())) } - fn gas_price_statistics(&self, params: Params) -> Result { + fn gas_price_statistics(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); match take_weak!(self.client).gas_price_statistics(100, 8) { - Ok(stats) => Ok(to_value(&stats - .into_iter() - .map(|x| to_value(&U256::from(x))) - .collect::>())), + Ok(stats) => Ok(stats.into_iter().map(Into::into).collect()), _ => Err(Error::internal_error()), } } - fn unsigned_transactions_count(&self, params: Params) -> Result { + fn unsigned_transactions_count(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); match self.signer { None => Err(errors::signer_disabled()), - Some(ref signer) => Ok(to_value(&signer.len())), + Some(ref signer) => Ok(signer.len()), } } - fn generate_secret_phrase(&self, params: Params) -> Result { + fn generate_secret_phrase(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&random_phrase(12))) + Ok(random_phrase(12)) } - fn phrase_to_address(&self, params: Params) -> Result { + fn phrase_to_address(&self, phrase: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).map(|(phrase,)| - to_value(&H160::from(Brain::new(phrase).generate().unwrap().address())) - ) + + Ok(Brain::new(phrase).generate().unwrap().address().into()) } - fn list_accounts(&self, params: Params) -> Result { + fn list_accounts(&self) -> Result>, Error> { try!(self.active()); - try!(expect_no_params(params)); - take_weak!(self.client) + Ok(take_weak!(self.client) .list_accounts(BlockID::Latest) - .map(|a| Ok(to_value(&a.into_iter().map(Into::into).collect::>()))) - .unwrap_or(Ok(Value::Null)) + .map(|a| a.into_iter().map(Into::into).collect())) } - fn list_storage_keys(&self, params: Params) -> Result { + fn list_storage_keys(&self, _address: H160) -> Result>, Error> { try!(self.active()); - from_params::<(H160,)>(params).and_then(|(_addr,)| - Ok(Value::Null) - ) + // TODO: implement this + Ok(None) } - fn encrypt_message(&self, params: Params) -> Result { + fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { try!(self.active()); - from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { - let s = try!(ecies::encrypt(&key.into(), &[0; 0], &phrase.0).map_err(|_| Error::internal_error())); - Ok(to_value(&Bytes::from(s))) - }) + + ecies::encrypt(&key.into(), &[0; 0], &phrase.0) + .map_err(|_| Error::internal_error()) + .map(Into::into) } - fn pending_transactions(&self, params: Params) -> Result { + fn pending_transactions(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>())) + Ok(take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>()) } - fn hash_content(&self, params: Params, ready: Ready) { - let res = self.active().and_then(|_| from_params::<(String,)>(params)); + fn hash_content(&self, ready: Ready, url: String) { + let res = self.active(); let hash_content = |result| { let path = try!(result); @@ -301,15 +289,15 @@ impl Ethcore for EthcoreClient where match res { Err(e) => ready.ready(Err(e)), - Ok((url, )) => { + Ok(()) => { let (tx, rx) = mpsc::channel(); let res = self.fetch.lock().request_async(&url, Default::default(), Box::new(move |result| { let result = hash_content(result) .map_err(errors::from_fetch_error) - .map(|hash| to_value(H256::from(hash))); + .map(Into::into); // Receive ready and invoke with result. - let ready: Ready = rx.try_recv().expect("When on_done is invoked ready object is always sent."); + let ready: Ready = rx.try_recv().expect("When on_done is invoked ready object is always sent."); ready.ready(result); })); diff --git a/rpc/src/v1/impls/ethcore_set.rs b/rpc/src/v1/impls/ethcore_set.rs index 35b97f785..7bebf9bbb 100644 --- a/rpc/src/v1/impls/ethcore_set.rs +++ b/rpc/src/v1/impls/ethcore_set.rs @@ -21,7 +21,6 @@ use ethcore::miner::MinerService; use ethcore::client::MiningBlockChainClient; use ethsync::ManageNetwork; use v1::helpers::errors; -use v1::helpers::params::expect_no_params; use v1::traits::EthcoreSet; use v1::types::{Bytes, H160, U256}; @@ -58,105 +57,94 @@ impl EthcoreSet for EthcoreSetClient where C: MiningBlockChainClient + 'static, M: MinerService + 'static { - fn set_min_gas_price(&self, params: Params) -> Result { + fn set_min_gas_price(&self, gas_price: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(gas_price,)| { - take_weak!(self.miner).set_minimal_gas_price(gas_price.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_minimal_gas_price(gas_price.into()); + Ok(true) } - fn set_gas_floor_target(&self, params: Params) -> Result { + fn set_gas_floor_target(&self, target: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(target,)| { - take_weak!(self.miner).set_gas_floor_target(target.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_gas_floor_target(target.into()); + Ok(true) } - fn set_gas_ceil_target(&self, params: Params) -> Result { + fn set_gas_ceil_target(&self, target: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(target,)| { - take_weak!(self.miner).set_gas_ceil_target(target.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_gas_ceil_target(target.into()); + Ok(true) } - fn set_extra_data(&self, params: Params) -> Result { + fn set_extra_data(&self, extra_data: Bytes) -> Result { try!(self.active()); - from_params::<(Bytes,)>(params).and_then(|(extra_data,)| { - take_weak!(self.miner).set_extra_data(extra_data.to_vec()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_extra_data(extra_data.to_vec()); + Ok(true) } - fn set_author(&self, params: Params) -> Result { + fn set_author(&self, author: H160) -> Result { try!(self.active()); - from_params::<(H160,)>(params).and_then(|(author,)| { - take_weak!(self.miner).set_author(author.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_author(author.into()); + Ok(true) } - fn set_transactions_limit(&self, params: Params) -> Result { + fn set_transactions_limit(&self, limit: usize) -> Result { try!(self.active()); - from_params::<(usize,)>(params).and_then(|(limit,)| { - take_weak!(self.miner).set_transactions_limit(limit); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_transactions_limit(limit); + Ok(true) } - fn set_tx_gas_limit(&self, params: Params) -> Result { + fn set_tx_gas_limit(&self, limit: U256) -> Result { try!(self.active()); - from_params::<(U256,)>(params).and_then(|(limit,)| { - take_weak!(self.miner).set_tx_gas_limit(limit.into()); - Ok(to_value(&true)) - }) + + take_weak!(self.miner).set_tx_gas_limit(limit.into()); + Ok(true) } - fn add_reserved_peer(&self, params: Params) -> Result { + fn add_reserved_peer(&self, peer: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).and_then(|(peer,)| { - match take_weak!(self.net).add_reserved_peer(peer) { - Ok(()) => Ok(to_value(&true)), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - }) + + match take_weak!(self.net).add_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } } - fn remove_reserved_peer(&self, params: Params) -> Result { + fn remove_reserved_peer(&self, peer: String) -> Result { try!(self.active()); - from_params::<(String,)>(params).and_then(|(peer,)| { - match take_weak!(self.net).remove_reserved_peer(peer) { - Ok(()) => Ok(to_value(&true)), - Err(e) => Err(errors::invalid_params("Peer address", e)), - } - }) + + match take_weak!(self.net).remove_reserved_peer(peer) { + Ok(()) => Ok(true), + Err(e) => Err(errors::invalid_params("Peer address", e)), + } } - fn drop_non_reserved_peers(&self, params: Params) -> Result { + fn drop_non_reserved_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); + take_weak!(self.net).deny_unreserved_peers(); - Ok(to_value(&true)) + Ok(true) } - fn accept_non_reserved_peers(&self, params: Params) -> Result { + fn accept_non_reserved_peers(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); + take_weak!(self.net).accept_unreserved_peers(); - Ok(to_value(&true)) + Ok(true) } - fn start_network(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn start_network(&self) -> Result { take_weak!(self.net).start_network(); - Ok(Value::Bool(true)) + Ok(true) } - fn stop_network(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn stop_network(&self) -> Result { take_weak!(self.net).stop_network(); - Ok(Value::Bool(true)) + Ok(true) } } diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 9c22a3638..f0e836fb7 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -16,10 +16,9 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; -use jsonrpc_core::*; +use jsonrpc_core::Error; use ethsync::SyncProvider; use v1::traits::Net; -use v1::helpers::params::expect_no_params; /// Net rpc implementation. pub struct NetClient where S: SyncProvider { @@ -36,20 +35,19 @@ impl NetClient where S: SyncProvider { } impl Net for NetClient where S: SyncProvider + 'static { - fn version(&self, params: Params) -> Result { - try!(expect_no_params(params)); - Ok(Value::String(format!("{}", take_weak!(self.sync).status().network_id).to_owned())) + fn version(&self) -> Result { + Ok(format!("{}", take_weak!(self.sync).status().network_id).to_owned()) } - fn peer_count(&self, params: Params) -> Result { - try!(expect_no_params(params)); - Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned()) } - fn is_listening(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn is_listening(&self) -> Result { // right now (11 march 2016), we are always listening for incoming connections - Ok(Value::Bool(true)) + // + // (this may not be true now -- 26 september 2016) + Ok(true) } } diff --git a/rpc/src/v1/impls/rpc.rs b/rpc/src/v1/impls/rpc.rs index fafc92fe5..7f92c1ed9 100644 --- a/rpc/src/v1/impls/rpc.rs +++ b/rpc/src/v1/impls/rpc.rs @@ -16,9 +16,8 @@ //! RPC generic methods implementation. use std::collections::BTreeMap; -use jsonrpc_core::*; +use jsonrpc_core::Error; use v1::traits::Rpc; -use v1::helpers::params::expect_no_params; /// RPC generic methods implementation. pub struct RpcClient { @@ -40,26 +39,26 @@ impl RpcClient { } impl Rpc for RpcClient { - fn rpc_modules(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn rpc_modules(&self) -> Result, Error> { let modules = self.modules.iter() .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), Value::String(v.to_owned())); + map.insert(k.to_owned(), v.to_owned()); map }); - Ok(Value::Object(modules)) + + Ok(modules) } - fn modules(&self, params: Params) -> Result { - try!(expect_no_params(params)); + fn modules(&self) -> Result, Error> { let modules = self.modules.iter() .filter(|&(k, _v)| { self.valid_apis.contains(k) }) .fold(BTreeMap::new(), |mut map, (k, v)| { - map.insert(k.to_owned(), Value::String(v.to_owned())); + map.insert(k.to_owned(), v.to_owned()); map }); - Ok(Value::Object(modules)) + + Ok(modules) } } diff --git a/rpc/src/v1/tests/mocked/ethcore_set.rs b/rpc/src/v1/tests/mocked/ethcore_set.rs index eaa4cb0cb..e87d49b8c 100644 --- a/rpc/src/v1/tests/mocked/ethcore_set.rs +++ b/rpc/src/v1/tests/mocked/ethcore_set.rs @@ -115,4 +115,4 @@ fn rpc_ethcore_set_transactions_limit() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); assert_eq!(miner.transactions_limit(), 10_240_240); -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 80789fd0e..62301e21f 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -28,174 +28,173 @@ build_rpc_trait! { /// Eth rpc interface. pub trait Eth { /// Returns protocol version encoded as a string (quotes are necessary). - #[name("eth_protocolVersion")] + #[rpc(name = "eth_protocolVersion")] fn protocol_version(&self) -> Result; /// Returns an object with data about the sync status or false. (wtf?) - #[name("eth_syncing")] + #[rpc(name = "eth_syncing")] fn syncing(&self) -> Result; /// Returns the number of hashes per second that the node is mining with. - #[name("eth_hashrate")] + #[rpc(name = "eth_hashrate")] fn hashrate(&self) -> Result; /// Returns block author. - #[name("eth_coinbase")] + #[rpc(name = "eth_coinbase")] fn author(&self) -> Result; /// Returns true if client is actively mining new blocks. - #[name("eth_mining")] + #[rpc(name = "eth_mining")] fn is_mining(&self) -> Result; /// Returns current gas_price. - #[name("eth_gasPrice")] + #[rpc(name = "eth_gasPrice")] fn gas_price(&self) -> Result; /// Returns accounts list. - #[name("eth_accounts")] + #[rpc(name = "eth_accounts")] fn accounts(&self) -> Result, Error>; /// Returns highest block number. - #[name("eth_blockNumber")] + #[rpc(name = "eth_blockNumber")] fn block_number(&self) -> Result; /// Returns balance of the given account. - #[name("eth_getBalance")] + #[rpc(name = "eth_getBalance")] fn balance(&self, H160, Trailing) -> Result; /// Returns content of the storage at given address. - #[name("eth_getStorageAt")] + #[rpc(name = "eth_getStorageAt")] fn storage_at(&self, H160, U256, Trailing) -> Result; /// Returns block with given hash. - #[name("eth_getBlockByHash")] + #[rpc(name = "eth_getBlockByHash")] fn block_by_hash(&self, H256, bool) -> Result, Error>; /// Returns block with given number. - #[name("eth_getBlockByNumber")] + #[rpc(name = "eth_getBlockByNumber")] fn block_by_number(&self, BlockNumber, bool) -> Result, Error>; /// Returns the number of transactions sent from given address at given time (block number). - #[name("eth_getTransactionCount")] + #[rpc(name = "eth_getTransactionCount")] fn transaction_count(&self, H160, Trailing) -> Result; /// Returns the number of transactions in a block with given hash. - #[name("eth_getBlockTransactionCountByHash")] + #[rpc(name = "eth_getBlockTransactionCountByHash")] fn block_transaction_count_by_hash(&self, H256) -> Result, Error>; /// Returns the number of transactions in a block with given block number. - #[name("eth_getBlockTransactionCountByNumber")] + #[rpc(name = "eth_getBlockTransactionCountByNumber")] fn block_transaction_count_by_number(&self, BlockNumber) -> Result, Error>; /// Returns the number of uncles in a block with given hash. - #[name("eth_getUncleCountByBlockHash")] + #[rpc(name = "eth_getUncleCountByBlockHash")] fn block_uncles_count_by_hash(&self, H256) -> Result, Error>; /// Returns the number of uncles in a block with given block number. - #[name("eth_getUncleCountByBlockNumber")] + #[rpc(name = "eth_getUncleCountByBlockNumber")] fn block_uncles_count_by_number(&self, BlockNumber) -> Result, Error>; /// Returns the code at given address at given time (block number). - #[name("eth_getCode")] + #[rpc(name = "eth_getCode")] fn code_at(&self, H160, Trailing) -> Result; /// Sends signed transaction, returning its hash. - #[name("eth_sendRawTransaction")] + #[rpc(name = "eth_sendRawTransaction")] fn send_raw_transaction(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[name("eth_call")] + #[rpc(name = "eth_call")] fn call(&self, CallRequest, Trailing) -> Result; /// Estimate gas needed for execution of given contract. - #[name("eth_estimateGas")] + #[rpc(name = "eth_estimateGas")] fn estimate_gas(&self, CallRequest, Trailing) -> Result; /// Get transaction by its hash. - #[name("eth_getTransactionByHash")] + #[rpc(name = "eth_getTransactionByHash")] fn transaction_by_hash(&self, H256) -> Result, Error>; /// Returns transaction at given block hash and index. - #[name("eth_getTransactionByBlockHashAndIndex")] + #[rpc(name = "eth_getTransactionByBlockHashAndIndex")] fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; /// Returns transaction by given block number and index. - #[name("eth_getTransactionByBlockNumberAndIndex")] + #[rpc(name = "eth_getTransactionByBlockNumberAndIndex")] fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; /// Returns transaction receipt. - #[name("eth_getTransactionReceipt")] + #[rpc(name = "eth_getTransactionReceipt")] fn transaction_receipt(&self, H256) -> Result, Error>; /// Returns an uncles at given block and index. - #[name("eth_getUncleByBlockHashAndIndex")] + #[rpc(name = "eth_getUncleByBlockHashAndIndex")] fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; /// Returns an uncles at given block and index. - #[name("eth_getUncleByBlockNumberAndIndex")] + #[rpc(name = "eth_getUncleByBlockNumberAndIndex")] fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; /// Returns available compilers. - #[name("eth_getCompilers")] + #[rpc(name = "eth_getCompilers")] fn compilers(&self) -> Result, Error>; /// Compiles lll code. - #[name("eth_compileLLL")] + #[rpc(name = "eth_compileLLL")] fn compile_lll(&self, String) -> Result; /// Compiles solidity. - #[name("eth_compileSolidity")] + #[rpc(name = "eth_compileSolidity")] fn compile_solidity(&self, String) -> Result; /// Compiles serpent. - #[name("eth_compileSerpent")] + #[rpc(name = "eth_compileSerpent")] fn compile_serpent(&self, String) -> Result; /// Returns logs matching given filter object. - #[name("eth_getLogs")] + #[rpc(name = "eth_getLogs")] fn logs(&self, Filter) -> Result, Error>; /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. - #[name("eth_getWork")] + #[rpc(name = "eth_getWork")] fn work(&self, Trailing) -> Result; /// Used for submitting a proof-of-work solution. - #[name("eth_submitWork")] + #[rpc(name = "eth_submitWork")] fn submit_work(&self, H64, H256, H256) -> Result; /// Used for submitting mining hashrate. - #[name("eth_submitHashrate")] + #[rpc(name = "eth_submitHashrate")] fn submit_hashrate(&self, U256, H256) -> Result; } } build_rpc_trait! { - /// Eth filters rpc api (polling). // TODO: do filters api properly pub trait EthFilter { /// Returns id of new filter. - #[name("eth_newFilter")] + #[rpc(name = "eth_newFilter")] fn new_filter(&self, Filter) -> Result; /// Returns id of new block filter. - #[name("eth_newBlockFilter")] + #[rpc(name = "eth_newBlockFilter")] fn new_block_filter(&self) -> Result; /// Returns id of new block filter. - #[name("eth_newPendingTransactionFilter")] + #[rpc(name = "eth_newPendingTransactionFilter")] fn new_pending_transaction_filter(&self) -> Result; /// Returns filter changes since last poll. - #[name("eth_getFilterChanges")] + #[rpc(name = "eth_getFilterChanges")] fn filter_changes(&self, Index) -> Result; /// Returns all logs matching given filter (in a range 'from' - 'to'). - #[name("eth_getFilterLogs")] + #[rpc(name = "eth_getFilterLogs")] fn filter_logs(&self, Index) -> Result, Error>; /// Uninstalls filter. - #[name("eth_uninstallFilter")] + #[rpc(name = "eth_uninstallFilter")] fn uninstall_filter(&self, Index) -> Result; } } diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index 011b78c8b..25bb210fd 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -15,113 +15,107 @@ // along with Parity. If not, see . //! Ethcore-specific rpc interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Ethcore-specific rpc interface. -pub trait Ethcore: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::{Wrap, WrapAsync, Ready}; +use v1::types::{H160, H256, H512, U256, Bytes, Peers, Transaction, RpcSettings}; - /// Returns current transactions limit. - fn transactions_limit(&self, _: Params) -> Result; +build_rpc_trait! { + /// Ethcore-specific rpc interface. + pub trait Ethcore { + /// Returns current transactions limit. + #[rpc(name = "ethcore_transactionsLimit")] + fn transactions_limit(&self) -> Result; - /// Returns mining extra data. - fn extra_data(&self, _: Params) -> Result; + /// Returns mining extra data. + #[rpc(name = "ethcore_extraData")] + fn extra_data(&self) -> Result; - /// Returns mining gas floor target. - fn gas_floor_target(&self, _: Params) -> Result; + /// Returns mining gas floor target. + #[rpc(name = "ethcore_gasFloorTarget")] + fn gas_floor_target(&self) -> Result; - /// Returns mining gas floor cap. - fn gas_ceil_target(&self, _: Params) -> Result; + /// Returns mining gas floor cap. + #[rpc(name = "ethcore_gasCeilTarget")] + fn gas_ceil_target(&self) -> Result; - /// Returns minimal gas price for transaction to be included in queue. - fn min_gas_price(&self, _: Params) -> Result; + /// Returns minimal gas price for transaction to be included in queue. + #[rpc(name = "ethcore_minGasPrice")] + fn min_gas_price(&self) -> Result; - /// Returns latest logs - fn dev_logs(&self, _: Params) -> Result; + /// Returns latest logs + #[rpc(name = "ethcore_devLogs")] + fn dev_logs(&self) -> Result, Error>; - /// Returns logs levels - fn dev_logs_levels(&self, _: Params) -> Result; + /// Returns logs levels + #[rpc(name = "ethcore_devLogsLevels")] + fn dev_logs_levels(&self) -> Result; - /// Returns chain name - fn net_chain(&self, _: Params) -> Result; + /// Returns chain name + #[rpc(name = "ethcore_netChain")] + fn net_chain(&self) -> Result; - /// Returns peers details - fn net_peers(&self, _: Params) -> Result; + /// Returns peers details + #[rpc(name = "ethcore_netPeers")] + fn net_peers(&self) -> Result; - /// Returns network port - fn net_port(&self, _: Params) -> Result; + /// Returns network port + #[rpc(name = "ethcore_netPort")] + fn net_port(&self) -> Result; - /// Returns rpc settings - fn rpc_settings(&self, _: Params) -> Result; + /// Returns rpc settings + #[rpc(name = "ethcore_rpcSettings")] + fn rpc_settings(&self) -> Result; - /// Returns node name - fn node_name(&self, _: Params) -> Result; + /// Returns node name + #[rpc(name = "ethcore_nodeName")] + fn node_name(&self) -> Result; - /// Returns default extra data - fn default_extra_data(&self, _: Params) -> Result; + /// Returns default extra data + #[rpc(name = "ethcore_defaultExtraData")] + fn default_extra_data(&self) -> Result; - /// Returns distribution of gas price in latest blocks. - fn gas_price_statistics(&self, _: Params) -> Result; + /// Returns distribution of gas price in latest blocks. + #[rpc(name = "ethcore_gasPriceStatistics")] + fn gas_price_statistics(&self) -> Result, Error>; - /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) - /// Returns error when signer is disabled - fn unsigned_transactions_count(&self, _: Params) -> Result; + /// Returns number of unsigned transactions waiting in the signer queue (if signer enabled) + /// Returns error when signer is disabled + #[rpc(name = "ethcore_unsignedTransactionsCount")] + fn unsigned_transactions_count(&self) -> Result; - /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. - fn generate_secret_phrase(&self, _: Params) -> Result; + /// Returns a cryptographically random phrase sufficient for securely seeding a secret key. + #[rpc(name = "ethcore_generateSecretPhrase")] + fn generate_secret_phrase(&self) -> Result; - /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. - fn phrase_to_address(&self, _: Params) -> Result; + /// Returns whatever address would be derived from the given phrase if it were to seed a brainwallet. + #[rpc(name = "ethcore_phraseToAddress")] + fn phrase_to_address(&self, String) -> Result; - /// Returns the value of the registrar for this network. - fn registry_address(&self, _: Params) -> Result; + /// Returns the value of the registrar for this network. + #[rpc(name = "ethcore_registryAddress")] + fn registry_address(&self) -> Result, Error>; - /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. - /// Takes no parameters. - fn list_accounts(&self, _: Params) -> Result; + /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. + #[rpc(name = "ethcore_listAccounts")] + fn list_accounts(&self) -> Result>, Error>; - /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), - /// or null if not. - fn list_storage_keys(&self, _: Params) -> Result; + /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), + /// or null if not. + #[rpc(name = "ethcore_listStorageKeys")] + fn list_storage_keys(&self, H160) -> Result>, Error>; - /// Encrypt some data with a public key under ECIES. - /// First parameter is the 512-byte destination public key, second is the message. - fn encrypt_message(&self, _: Params) -> Result; + /// Encrypt some data with a public key under ECIES. + /// First parameter is the 512-byte destination public key, second is the message. + #[rpc(name = "ethcore_encryptMessage")] + fn encrypt_message(&self, H512, Bytes) -> Result; - /// Returns all pending (current) transactions from transaction queue. - fn pending_transactions(&self, _: Params) -> Result; + /// Returns all pending transactions from transaction queue. + #[rpc(name = "ethcore_pendingTransactions")] + fn pending_transactions(&self) -> Result, Error>; - /// Hash a file content under given URL. - fn hash_content(&self, _: Params, _: Ready); - - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - - delegate.add_method("ethcore_extraData", Ethcore::extra_data); - delegate.add_method("ethcore_gasFloorTarget", Ethcore::gas_floor_target); - delegate.add_method("ethcore_gasCeilTarget", Ethcore::gas_ceil_target); - delegate.add_method("ethcore_minGasPrice", Ethcore::min_gas_price); - delegate.add_method("ethcore_transactionsLimit", Ethcore::transactions_limit); - delegate.add_method("ethcore_devLogs", Ethcore::dev_logs); - delegate.add_method("ethcore_devLogsLevels", Ethcore::dev_logs_levels); - delegate.add_method("ethcore_netChain", Ethcore::net_chain); - delegate.add_method("ethcore_netPeers", Ethcore::net_peers); - delegate.add_method("ethcore_netPort", Ethcore::net_port); - delegate.add_method("ethcore_rpcSettings", Ethcore::rpc_settings); - delegate.add_method("ethcore_nodeName", Ethcore::node_name); - delegate.add_method("ethcore_defaultExtraData", Ethcore::default_extra_data); - delegate.add_method("ethcore_gasPriceStatistics", Ethcore::gas_price_statistics); - delegate.add_method("ethcore_unsignedTransactionsCount", Ethcore::unsigned_transactions_count); - delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); - delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); - delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); - delegate.add_method("ethcore_listAccounts", Ethcore::list_accounts); - delegate.add_method("ethcore_listStorageKeys", Ethcore::list_storage_keys); - delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); - delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); - delegate.add_async_method("ethcore_hashContent", Ethcore::hash_content); - - delegate + /// Hash a file content under given URL. + #[rpc(async, name = "ethcore_hashContent")] + fn hash_content(&self, Ready, String); } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/ethcore_set.rs b/rpc/src/v1/traits/ethcore_set.rs index bd1f6bf7c..9946314d6 100644 --- a/rpc/src/v1/traits/ethcore_set.rs +++ b/rpc/src/v1/traits/ethcore_set.rs @@ -16,66 +16,64 @@ //! Ethcore-specific rpc interface for operations altering the settings. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Ethcore-specific rpc interface for operations altering the settings. -pub trait EthcoreSet: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::Wrap; +use v1::types::{Bytes, H160, U256}; - /// Sets new minimal gas price for mined blocks. - fn set_min_gas_price(&self, _: Params) -> Result; +build_rpc_trait! { + /// Ethcore-specific rpc interface for operations altering the settings. + pub trait EthcoreSet { + /// Sets new minimal gas price for mined blocks. + #[rpc(name = "ethcore_setMinGasPrice")] + fn set_min_gas_price(&self, U256) -> Result; - /// Sets new gas floor target for mined blocks. - fn set_gas_floor_target(&self, _: Params) -> Result; + /// Sets new gas floor target for mined blocks. + #[rpc(name = "ethcore_setGasFloorTarget")] + fn set_gas_floor_target(&self, U256) -> Result; - /// Sets new gas ceiling target for mined blocks. - fn set_gas_ceil_target(&self, _: Params) -> Result; + /// Sets new gas ceiling target for mined blocks. + #[rpc(name = "ethcore_setGasCeilTarget")] + fn set_gas_ceil_target(&self, U256) -> Result; - /// Sets new extra data for mined blocks. - fn set_extra_data(&self, _: Params) -> Result; + /// Sets new extra data for mined blocks. + #[rpc(name = "ethcore_setExtraData")] + fn set_extra_data(&self, Bytes) -> Result; - /// Sets new author for mined block. - fn set_author(&self, _: Params) -> Result; + /// Sets new author for mined block. + #[rpc(name = "ethcore_setAuthor")] + fn set_author(&self, H160) -> Result; - /// Sets the limits for transaction queue. - fn set_transactions_limit(&self, _: Params) -> Result; + /// Sets the limits for transaction queue. + #[rpc(name = "ethcore_setTransactionsLimit")] + fn set_transactions_limit(&self, usize) -> Result; - /// Sets the maximum amount of gas a single transaction may consume. - fn set_tx_gas_limit(&self, _: Params) -> Result; + /// Sets the maximum amount of gas a single transaction may consume. + #[rpc(name = "ethcore_setMaxTransactionGas")] + fn set_tx_gas_limit(&self, U256) -> Result; - /// Add a reserved peer. - fn add_reserved_peer(&self, _: Params) -> Result; + /// Add a reserved peer. + #[rpc(name = "ethcore_addReservedPeer")] + fn add_reserved_peer(&self, String) -> Result; - /// Remove a reserved peer. - fn remove_reserved_peer(&self, _: Params) -> Result; + /// Remove a reserved peer. + #[rpc(name = "ethcore_removeReservedPeer")] + fn remove_reserved_peer(&self, String) -> Result; - /// Drop all non-reserved peers. - fn drop_non_reserved_peers(&self, _: Params) -> Result; + /// Drop all non-reserved peers. + #[rpc(name = "ethcore_dropNonReservedPeers")] + fn drop_non_reserved_peers(&self) -> Result; - /// Accept non-reserved peers (default behavior) - fn accept_non_reserved_peers(&self, _: Params) -> Result; + /// Accept non-reserved peers (default behavior) + #[rpc(name = "ethcore_acceptNonReservedPeers")] + fn accept_non_reserved_peers(&self) -> Result; - /// Start the network. - fn start_network(&self, _: Params) -> Result; + /// Start the network. + #[rpc(name = "ethcore_startNetwork")] + fn start_network(&self) -> Result; - /// Stop the network. - fn stop_network(&self, _: Params) -> Result; - - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("ethcore_setMinGasPrice", EthcoreSet::set_min_gas_price); - delegate.add_method("ethcore_setGasFloorTarget", EthcoreSet::set_gas_floor_target); - delegate.add_method("ethcore_setGasCeilTarget", EthcoreSet::set_gas_ceil_target); - delegate.add_method("ethcore_setExtraData", EthcoreSet::set_extra_data); - delegate.add_method("ethcore_setAuthor", EthcoreSet::set_author); - delegate.add_method("ethcore_setMaxTransactionGas", EthcoreSet::set_tx_gas_limit); - delegate.add_method("ethcore_setTransactionsLimit", EthcoreSet::set_transactions_limit); - delegate.add_method("ethcore_addReservedPeer", EthcoreSet::add_reserved_peer); - delegate.add_method("ethcore_removeReservedPeer", EthcoreSet::remove_reserved_peer); - delegate.add_method("ethcore_dropNonReservedPeers", EthcoreSet::drop_non_reserved_peers); - delegate.add_method("ethcore_acceptNonReservedPeers", EthcoreSet::accept_non_reserved_peers); - - delegate + /// Stop the network. + #[rpc(name = "ethcore_stopNetwork")] + fn stop_network(&self) -> Result; } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index 56fba3e32..36bd8be70 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -15,27 +15,24 @@ // along with Parity. If not, see . //! Net rpc interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// Net rpc interface. -pub trait Net: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn version(&self, _: Params) -> Result; +use v1::helpers::auto_args::Wrap; - /// Returns number of peers connected to node. - fn peer_count(&self, _: Params) -> Result; +build_rpc_trait! { + /// Net rpc interface. + pub trait Net { + /// Returns protocol version. + #[rpc(name = "net_version")] + fn version(&self) -> Result; - /// Returns true if client is actively listening for network connections. - /// Otherwise false. - fn is_listening(&self, _: Params) -> Result; + /// Returns number of peers connected to node. + #[rpc(name = "net_peerCount")] + fn peer_count(&self) -> Result; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("net_version", Net::version); - delegate.add_method("net_peerCount", Net::peer_count); - delegate.add_method("net_listening", Net::is_listening); - delegate + /// Returns true if client is actively listening for network connections. + /// Otherwise false. + #[rpc(name = "net_listening")] + fn is_listening(&self) -> Result; } -} +} \ No newline at end of file diff --git a/rpc/src/v1/traits/rpc.rs b/rpc/src/v1/traits/rpc.rs index 669d0d8c6..2109442a7 100644 --- a/rpc/src/v1/traits/rpc.rs +++ b/rpc/src/v1/traits/rpc.rs @@ -16,26 +16,21 @@ //! RPC interface. -use std::sync::Arc; -use jsonrpc_core::*; +use jsonrpc_core::Error; -/// RPC Interface. -pub trait Rpc: Sized + Send + Sync + 'static { +use v1::helpers::auto_args::Wrap; - /// Returns supported modules for Geth 1.3.6 - fn modules(&self, _: Params) -> Result; +use std::collections::BTreeMap; - /// Returns supported modules for Geth 1.4.0 - fn rpc_modules(&self, _: Params) -> Result; +build_rpc_trait! { + /// RPC Interface. + pub trait Rpc { + /// Returns supported modules for Geth 1.3.6 + #[rpc(name = "modules")] + fn modules(&self) -> Result, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - // Geth 1.3.6 compatibility - delegate.add_method("modules", Rpc::modules); - // Geth 1.4.0 compatibility - delegate.add_method("rpc_modules", Rpc::rpc_modules); - delegate + /// Returns supported modules for Geth 1.4.0 + #[rpc(name = "rpc_modules")] + fn rpc_modules(&self) -> Result, Error>; } -} - +} \ No newline at end of file diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 1369037ed..8aaf90eab 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -27,6 +27,7 @@ mod sync; mod transaction; mod transaction_request; mod receipt; +mod rpc_settings; mod trace; mod trace_filter; mod uint; @@ -45,6 +46,7 @@ pub use self::sync::{SyncStatus, SyncInfo, Peers}; pub use self::transaction::Transaction; pub use self::transaction_request::TransactionRequest; pub use self::receipt::Receipt; +pub use self::rpc_settings::RpcSettings; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; pub use self::uint::U256; diff --git a/rpc/src/v1/types/rpc_settings.rs b/rpc/src/v1/types/rpc_settings.rs new file mode 100644 index 000000000..9a20afa7a --- /dev/null +++ b/rpc/src/v1/types/rpc_settings.rs @@ -0,0 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! RPC Settings data. + +/// Values of RPC settings. +#[derive(Serialize, Deserialize)] +pub struct RpcSettings { + /// Whether RPC is enabled. + pub enabled: bool, + /// The interface being listened on. + pub interface: String, + /// The port being listened on. + pub port: u64, +} \ No newline at end of file From 0e8dda740fcda5c4bd37a23f32a8386a86e8fd0a Mon Sep 17 00:00:00 2001 From: svyatonik Date: Wed, 5 Oct 2016 00:13:07 +0300 Subject: [PATCH 21/23] * PR 2464: human-readable error message + struct documentation --- parity/account.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/parity/account.rs b/parity/account.rs index 6a05e945d..9d400cab5 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -40,9 +40,12 @@ pub struct ImportAccounts { pub to: String, } +/// Parameters for geth accounts' import #[derive(Debug, PartialEq)] pub struct ImportFromGethAccounts { + /// import mainnet (false) or testnet (true) accounts pub testnet: bool, + /// directory to import accounts to pub to: String, } @@ -59,6 +62,13 @@ fn keys_dir(path: String) -> Result { DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) } +fn secret_store(dir: Box, iterations: Option) -> Result { + match iterations { + Some(i) => EthStore::open_with_iterations(dir, i), + _ => EthStore::open(dir) + }.map_err(|e| format!("Could not open keys store: {}", e)) +} + fn new(n: NewAccount) -> Result { let password: String = match n.password_file { Some(file) => try!(password_from_file(file)), @@ -66,7 +76,7 @@ fn new(n: NewAccount) -> Result { }; let dir = Box::new(try!(keys_dir(n.path))); - let secret_store = Box::new(EthStore::open_with_iterations(dir, n.iterations).unwrap()); + let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations)))); let acc_provider = AccountProvider::new(secret_store); let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))); Ok(format!("{:?}", new_account)) @@ -74,7 +84,7 @@ fn new(n: NewAccount) -> Result { fn list(path: String) -> Result { let dir = Box::new(try!(keys_dir(path))); - let secret_store = Box::new(EthStore::open(dir).unwrap()); + let secret_store = Box::new(try!(secret_store(dir, None))); let acc_provider = AccountProvider::new(secret_store); let accounts = acc_provider.accounts(); let result = accounts.into_iter() @@ -100,7 +110,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result { use ethcore::ethstore::Error; let dir = Box::new(try!(keys_dir(i.to))); - let secret_store = EthStore::open(dir).unwrap(); + let secret_store = Box::new(try!(secret_store(dir, None))); let geth_accounts = read_geth_accounts(i.testnet); match secret_store.import_geth_accounts(geth_accounts, i.testnet) { Ok(v) => Ok(format!("Successfully imported {} account(s) from geth.", v.len())), From ced597e28204cd876a56cf0d870fd87202fa9ff6 Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Wed, 5 Oct 2016 10:53:19 +0700 Subject: [PATCH 22/23] Update gitlab-ci fix windows checksum --- .gitlab-ci.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0ead0a91a..32e8d953f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -264,13 +264,14 @@ windows: - set INCLUDE=C:\Program Files (x86)\Microsoft SDKs\Windows\v7.1A\Include;C:\vs2015\VC\include;C:\Program Files (x86)\Windows Kits\10\Include\10.0.10240.0\ucrt - set LIB=C:\vs2015\VC\lib;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.10240.0\ucrt\x64 - set RUST_BACKTRACE=1 + - set RUSTFLAGS=-Zorbit=off - rustup default stable-x86_64-pc-windows-msvc - cargo build --release --verbose - - md5sum target/release/parity >> checksum + - cmd md5sum target\release\parity >> checksum - aws configure set aws_access_key_id %s3_key% - aws configure set aws_secret_access_key %s3_secret% - - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe - - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target\release\parity.exe + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target\release\parity.pdb - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/checksum --body checksum tags: - rust-windows From a7e09d88427e64611e0f8f0562c43fc2e9fcb905 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 5 Oct 2016 10:36:09 +0300 Subject: [PATCH 23/23] Update codegen.rs --- ipc/codegen/src/codegen.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipc/codegen/src/codegen.rs b/ipc/codegen/src/codegen.rs index 9c7b33be8..9caa436bc 100644 --- a/ipc/codegen/src/codegen.rs +++ b/ipc/codegen/src/codegen.rs @@ -49,7 +49,7 @@ pub fn expand_ipc_implementation( let item = match *annotatable { Annotatable::Item(ref item) => item, _ => { - cx.span_err(meta_item.span, "`#[ipc]` may only be applied to struct implementations"); + cx.span_err(meta_item.span, "`#[ipc]` may only be applied to implementations and traits"); return; }, };