Merge branch 'master' into dev-chain

This commit is contained in:
keorn 2016-11-14 10:05:34 +00:00
commit d8e6dbd981
30 changed files with 292 additions and 120 deletions

2
Cargo.lock generated
View File

@ -1249,7 +1249,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-ui-precompiled" name = "parity-ui-precompiled"
version = "1.4.0" version = "1.4.0"
source = "git+https://github.com/ethcore/js-precompiled.git#afaeb08a0f41ed41add35d86db4c751c5593e292" source = "git+https://github.com/ethcore/js-precompiled.git#bf33dd4aabd2adb2178576db5a4d23b8902d39b8"
dependencies = [ dependencies = [
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
] ]

View File

@ -89,7 +89,7 @@ impl From<ethjson::blockchain::Account> for PodAccount {
let key: U256 = key.into(); let key: U256 = key.into();
let value: U256 = value.into(); let value: U256 = value.into();
(H256::from(key), H256::from(value)) (H256::from(key), H256::from(value))
}).collect() }).collect(),
} }
} }
} }
@ -99,8 +99,12 @@ impl From<ethjson::spec::Account> for PodAccount {
PodAccount { PodAccount {
balance: a.balance.map_or_else(U256::zero, Into::into), balance: a.balance.map_or_else(U256::zero, Into::into),
nonce: a.nonce.map_or_else(U256::zero, Into::into), nonce: a.nonce.map_or_else(U256::zero, Into::into),
code: a.code.map(Into::into).or_else(|| Some(Vec::new())), code: Some(a.code.map_or_else(Vec::new, Into::into)),
storage: BTreeMap::new() storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| {
let key: U256 = key.into();
let value: U256 = value.into();
(H256::from(key), H256::from(value))
}).collect()),
} }
} }
} }
@ -112,7 +116,7 @@ impl fmt::Display for PodAccount {
self.nonce, self.nonce,
self.code.as_ref().map_or(0, |c| c.len()), self.code.as_ref().map_or(0, |c| c.len()),
self.code.as_ref().map_or_else(H256::new, |c| c.sha3()), self.code.as_ref().map_or_else(H256::new, |c| c.sha3()),
self.storage.len() self.storage.len(),
) )
} }
} }

View File

@ -45,6 +45,8 @@ pub enum Error {
MissingCode(Vec<H256>), MissingCode(Vec<H256>),
/// Unrecognized code encoding. /// Unrecognized code encoding.
UnrecognizedCodeState(u8), UnrecognizedCodeState(u8),
/// Restoration aborted.
RestorationAborted,
/// Trie error. /// Trie error.
Trie(TrieError), Trie(TrieError),
/// Decoder error. /// Decoder error.
@ -67,6 +69,7 @@ impl fmt::Display for Error {
a pruned database. Please re-run with the --pruning archive flag."), a pruned database. Please re-run with the --pruning archive flag."),
Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()), Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()),
Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state), Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state),
Error::RestorationAborted => write!(f, "Snapshot restoration aborted."),
Error::Io(ref err) => err.fmt(f), Error::Io(ref err) => err.fmt(f),
Error::Decoder(ref err) => err.fmt(f), Error::Decoder(ref err) => err.fmt(f),
Error::Trie(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f),

View File

@ -407,30 +407,28 @@ impl StateRebuilder {
} }
/// Feed an uncompressed state chunk into the rebuilder. /// Feed an uncompressed state chunk into the rebuilder.
pub fn feed(&mut self, chunk: &[u8]) -> Result<(), ::error::Error> { pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> {
let rlp = UntrustedRlp::new(chunk); let rlp = UntrustedRlp::new(chunk);
let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp(); let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp();
let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect();
let mut pairs = Vec::with_capacity(rlp.item_count()); let mut pairs = Vec::with_capacity(rlp.item_count());
// initialize the pairs vector with empty values so we have slots to write into. // initialize the pairs vector with empty values so we have slots to write into.
pairs.resize(rlp.item_count(), (H256::new(), Vec::new())); pairs.resize(rlp.item_count(), (H256::new(), Vec::new()));
let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1; let status = try!(rebuild_accounts(
self.db.as_hashdb_mut(),
rlp,
&mut pairs,
&self.code_map,
flag
));
// new code contained within this chunk. for (addr_hash, code_hash) in status.missing_code {
let mut chunk_code = HashMap::new(); self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash);
for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) {
let code_map = &self.code_map;
let status = try!(rebuild_accounts(self.db.as_hashdb_mut(), account_chunk, out_pairs_chunk, code_map));
chunk_code.extend(status.new_code);
for (addr_hash, code_hash) in status.missing_code {
self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash);
}
} }
// patch up all missing code. must be done after collecting all new missing code entries. // patch up all missing code. must be done after collecting all new missing code entries.
for (code_hash, code) in chunk_code { for (code_hash, code) in status.new_code {
for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) { for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) {
let mut db = AccountDBMut::from_hash(self.db.as_hashdb_mut(), addr_hash); let mut db = AccountDBMut::from_hash(self.db.as_hashdb_mut(), addr_hash);
db.emplace(code_hash, DBValue::from_slice(&code)); db.emplace(code_hash, DBValue::from_slice(&code));
@ -450,6 +448,8 @@ impl StateRebuilder {
}; };
for (hash, thin_rlp) in pairs { for (hash, thin_rlp) in pairs {
if !flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
if &thin_rlp[..] != &empty_rlp[..] { if &thin_rlp[..] != &empty_rlp[..] {
self.bloom.set(&*hash); self.bloom.set(&*hash);
} }
@ -487,17 +487,18 @@ struct RebuiltStatus {
} }
// rebuild a set of accounts and their storage. // rebuild a set of accounts and their storage.
// returns // returns a status detailing newly-loaded code and accounts missing code.
fn rebuild_accounts( fn rebuild_accounts(
db: &mut HashDB, db: &mut HashDB,
account_chunk: &[&[u8]], account_fat_rlps: UntrustedRlp,
out_chunk: &mut [(H256, Bytes)], out_chunk: &mut [(H256, Bytes)],
code_map: &HashMap<H256, Bytes> code_map: &HashMap<H256, Bytes>,
abort_flag: &AtomicBool
) -> Result<RebuiltStatus, ::error::Error> ) -> Result<RebuiltStatus, ::error::Error>
{ {
let mut status = RebuiltStatus::default(); let mut status = RebuiltStatus::default();
for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) { for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk) {
let account_rlp = UntrustedRlp::new(account_pair); if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
let hash: H256 = try!(account_rlp.val_at(0)); let hash: H256 = try!(account_rlp.val_at(0));
let fat_rlp = try!(account_rlp.at(1)); let fat_rlp = try!(account_rlp.at(1));
@ -580,7 +581,7 @@ impl BlockRebuilder {
/// Feed the rebuilder an uncompressed block chunk. /// Feed the rebuilder an uncompressed block chunk.
/// Returns the number of blocks fed or any errors. /// Returns the number of blocks fed or any errors.
pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result<u64, ::error::Error> { pub fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result<u64, ::error::Error> {
use basic_types::Seal::With; use basic_types::Seal::With;
use util::U256; use util::U256;
use util::triehash::ordered_trie_root; use util::triehash::ordered_trie_root;
@ -601,6 +602,8 @@ impl BlockRebuilder {
let parent_total_difficulty = try!(rlp.val_at::<U256>(2)); let parent_total_difficulty = try!(rlp.val_at::<U256>(2));
for idx in 3..item_count { for idx in 3..item_count {
if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) }
let pair = try!(rlp.at(idx)); let pair = try!(rlp.at(idx));
let abridged_rlp = try!(pair.at(0)).as_raw().to_owned(); let abridged_rlp = try!(pair.at(0)).as_raw().to_owned();
let abridged_block = AbridgedBlock::from_raw(abridged_rlp); let abridged_block = AbridgedBlock::from_raw(abridged_rlp);

View File

@ -118,12 +118,12 @@ impl Restoration {
}) })
} }
// feeds a state chunk // feeds a state chunk, aborts early if `flag` becomes false.
fn feed_state(&mut self, hash: H256, chunk: &[u8]) -> Result<(), Error> { fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> {
if self.state_chunks_left.remove(&hash) { if self.state_chunks_left.remove(&hash) {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.state.feed(&self.snappy_buffer[..len])); try!(self.state.feed(&self.snappy_buffer[..len], flag));
if let Some(ref mut writer) = self.writer.as_mut() { if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_state_chunk(hash, chunk)); try!(writer.write_state_chunk(hash, chunk));
@ -134,11 +134,11 @@ impl Restoration {
} }
// feeds a block chunk // feeds a block chunk
fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine) -> Result<(), Error> { fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine, flag: &AtomicBool) -> Result<(), Error> {
if self.block_chunks_left.remove(&hash) { if self.block_chunks_left.remove(&hash) {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); try!(self.blocks.feed(&self.snappy_buffer[..len], engine, flag));
if let Some(ref mut writer) = self.writer.as_mut() { if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_block_chunk(hash, chunk)); try!(writer.write_block_chunk(hash, chunk));
} }
@ -224,6 +224,7 @@ pub struct Service {
db_restore: Arc<DatabaseRestore>, db_restore: Arc<DatabaseRestore>,
progress: super::Progress, progress: super::Progress,
taking_snapshot: AtomicBool, taking_snapshot: AtomicBool,
restoring_snapshot: AtomicBool,
} }
impl Service { impl Service {
@ -244,6 +245,7 @@ impl Service {
db_restore: params.db_restore, db_restore: params.db_restore,
progress: Default::default(), progress: Default::default(),
taking_snapshot: AtomicBool::new(false), taking_snapshot: AtomicBool::new(false),
restoring_snapshot: AtomicBool::new(false),
}; };
// create the root snapshot dir if it doesn't exist. // create the root snapshot dir if it doesn't exist.
@ -436,6 +438,8 @@ impl Service {
state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32,
block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32,
}; };
self.restoring_snapshot.store(true, Ordering::SeqCst);
Ok(()) Ok(())
} }
@ -490,8 +494,8 @@ impl Service {
}; };
(match is_state { (match is_state {
true => rest.feed_state(hash, chunk), true => rest.feed_state(hash, chunk, &self.restoring_snapshot),
false => rest.feed_blocks(hash, chunk, &*self.engine), false => rest.feed_blocks(hash, chunk, &*self.engine, &self.restoring_snapshot),
}.map(|_| rest.is_done()), rest.db.clone()) }.map(|_| rest.is_done()), rest.db.clone())
}; };
@ -573,6 +577,7 @@ impl SnapshotService for Service {
} }
fn abort_restore(&self) { fn abort_restore(&self) {
self.restoring_snapshot.store(false, Ordering::SeqCst);
*self.restoration.lock() = None; *self.restoration.lock() = None;
*self.status.lock() = RestorationStatus::Inactive; *self.status.lock() = RestorationStatus::Inactive;
} }

View File

@ -17,10 +17,11 @@
//! Block chunker and rebuilder tests. //! Block chunker and rebuilder tests.
use devtools::RandomTempPath; use devtools::RandomTempPath;
use error::Error;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
use blockchain::BlockChain; use blockchain::BlockChain;
use snapshot::{chunk_blocks, BlockRebuilder, Progress}; use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use util::{Mutex, snappy}; use util::{Mutex, snappy};
@ -28,6 +29,7 @@ use util::kvdb::{Database, DatabaseConfig};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool;
fn chunk_and_restore(amount: u64) { fn chunk_and_restore(amount: u64) {
let mut canon_chain = ChainGenerator::default(); let mut canon_chain = ChainGenerator::default();
@ -75,10 +77,11 @@ fn chunk_and_restore(amount: u64) {
let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap(); let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap();
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
let engine = ::engines::NullEngine::new(Default::default(), Default::default()); let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().block_hashes { for chunk_hash in &reader.manifest().block_hashes {
let compressed = reader.chunk(*chunk_hash).unwrap(); let compressed = reader.chunk(*chunk_hash).unwrap();
let chunk = snappy::decompress(&compressed).unwrap(); let chunk = snappy::decompress(&compressed).unwrap();
rebuilder.feed(&chunk, &engine).unwrap(); rebuilder.feed(&chunk, &engine, &flag).unwrap();
} }
rebuilder.finalize(HashMap::new()).unwrap(); rebuilder.finalize(HashMap::new()).unwrap();
@ -93,3 +96,46 @@ fn chunk_and_restore_500() { chunk_and_restore(500) }
#[test] #[test]
fn chunk_and_restore_40k() { chunk_and_restore(40000) } fn chunk_and_restore_40k() { chunk_and_restore(40000) }
#[test]
fn checks_flag() {
use ::rlp::{RlpStream, Stream};
use util::H256;
let mut stream = RlpStream::new_list(5);
stream.append(&100u64)
.append(&H256::default())
.append(&(!0u64));
stream.append_empty_data().append_empty_data();
let genesis = {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
canon_chain.generate(&mut finalizer).unwrap()
};
let chunk = stream.out();
let path = RandomTempPath::create_dir();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap());
let chain = BlockChain::new(Default::default(), &genesis, db.clone());
let engine = ::engines::NullEngine::new(Default::default(), Default::default());
let manifest = ::snapshot::ManifestData {
state_hashes: Vec::new(),
block_hashes: Vec::new(),
state_root: ::util::sha3::SHA3_NULL_RLP,
block_number: 102,
block_hash: H256::default(),
};
let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap();
match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}
_ => panic!("Wrong result on abort flag set")
}
}

View File

@ -16,10 +16,12 @@
//! State snapshotting tests. //! State snapshotting tests.
use snapshot::{chunk_state, Progress, StateRebuilder}; use snapshot::{chunk_state, Error as SnapshotError, Progress, StateRebuilder};
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
use super::helpers::{compare_dbs, StateProducer}; use super::helpers::{compare_dbs, StateProducer};
use error::Error;
use rand::{XorShiftRng, SeedableRng}; use rand::{XorShiftRng, SeedableRng};
use util::hash::H256; use util::hash::H256;
use util::journaldb::{self, Algorithm}; use util::journaldb::{self, Algorithm};
@ -29,6 +31,7 @@ use util::Mutex;
use devtools::RandomTempPath; use devtools::RandomTempPath;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool;
#[test] #[test]
fn snap_and_restore() { fn snap_and_restore() {
@ -65,11 +68,13 @@ fn snap_and_restore() {
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive); let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive);
let reader = PackedReader::new(&snap_file).unwrap().unwrap(); let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(true);
for chunk_hash in &reader.manifest().state_hashes { for chunk_hash in &reader.manifest().state_hashes {
let raw = reader.chunk(*chunk_hash).unwrap(); let raw = reader.chunk(*chunk_hash).unwrap();
let chunk = ::util::snappy::decompress(&raw).unwrap(); let chunk = ::util::snappy::decompress(&raw).unwrap();
rebuilder.feed(&chunk).unwrap(); rebuilder.feed(&chunk, &flag).unwrap();
} }
assert_eq!(rebuilder.state_root(), state_root); assert_eq!(rebuilder.state_root(), state_root);
@ -82,3 +87,52 @@ fn snap_and_restore() {
compare_dbs(&old_db, new_db.as_hashdb()); compare_dbs(&old_db, new_db.as_hashdb());
} }
#[test]
fn checks_flag() {
let mut producer = StateProducer::new();
let mut rng = XorShiftRng::from_seed([5, 6, 7, 8]);
let mut old_db = MemoryDB::new();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
for _ in 0..10 {
producer.tick(&mut rng, &mut old_db);
}
let snap_dir = RandomTempPath::create_dir();
let mut snap_file = snap_dir.as_path().to_owned();
snap_file.push("SNAP");
let state_root = producer.state_root();
let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap());
let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default()).unwrap();
writer.into_inner().finish(::snapshot::ManifestData {
state_hashes: state_hashes,
block_hashes: Vec::new(),
state_root: state_root,
block_number: 0,
block_hash: H256::default(),
}).unwrap();
let mut db_path = snap_dir.as_path().to_owned();
db_path.push("db");
{
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap());
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive);
let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(false);
for chunk_hash in &reader.manifest().state_hashes {
let raw = reader.chunk(*chunk_hash).unwrap();
let chunk = ::util::snappy::decompress(&raw).unwrap();
match rebuilder.feed(&chunk, &flag) {
Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {},
_ => panic!("unexpected result when feeding with flag off"),
}
}
}
}

View File

@ -168,7 +168,7 @@ impl Spec {
/// Get the configured Network ID. /// Get the configured Network ID.
pub fn network_id(&self) -> usize { self.params.network_id } pub fn network_id(&self) -> usize { self.params.network_id }
/// Get the configured Network ID. /// Get the configured subprotocol name.
pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() } pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() }
/// Get the configured network fork block. /// Get the configured network fork block.

View File

@ -7,6 +7,6 @@ JavaScript APIs and UIs for Parity.
0. Install [Node](https://nodejs.org/) if not already available 0. Install [Node](https://nodejs.org/) if not already available
0. Change to the `js` directory inside `parity/` 0. Change to the `js` directory inside `parity/`
0. Install the npm modules via `npm install` 0. Install the npm modules via `npm install`
0. Parity should be run with `parity --signer-no-validation [...options]` (where `options` can be `--chain testnet`) 0. Parity should be run with `parity --ui-no-validation [...options]` (where `options` can be `--chain testnet`)
0. Start the development environment via `npm start` 0. Start the development environment via `npm start`
0. Connect to the [UI](http://localhost:3000) 0. Connect to the [UI](http://localhost:3000)

View File

@ -1,6 +1,6 @@
{ {
"name": "parity.js", "name": "parity.js",
"version": "0.2.33", "version": "0.2.37",
"main": "release/index.js", "main": "release/index.js",
"jsnext:main": "src/index.js", "jsnext:main": "src/index.js",
"author": "Parity Team <admin@parity.io>", "author": "Parity Team <admin@parity.io>",

View File

@ -309,7 +309,6 @@ export default class Contract {
try { try {
subscriptions[idx].callback(null, this.parseEventLogs(logs)); subscriptions[idx].callback(null, this.parseEventLogs(logs));
} catch (error) { } catch (error) {
this.unsubscribe(idx);
console.error('_sendSubscriptionChanges', error); console.error('_sendSubscriptionChanges', error);
} }
}); });

View File

@ -107,7 +107,6 @@ export default class Manager {
callback(error, data); callback(error, data);
} catch (error) { } catch (error) {
console.error(`Unable to update callback for subscriptionId ${subscriptionId}`, error); console.error(`Unable to update callback for subscriptionId ${subscriptionId}`, error);
this.unsubscribe(subscriptionId);
} }
} }

View File

@ -28,26 +28,26 @@ export function attachInterface () {
return Promise return Promise
.all([ .all([
registry.getAddress.call({}, [api.util.sha3('githubhint'), 'A']), registry.getAddress.call({}, [api.util.sha3('githubhint'), 'A']),
api.eth.accounts(),
api.parity.accounts() api.parity.accounts()
]); ]);
}) })
.then(([address, addresses, accountsInfo]) => { .then(([address, accountsInfo]) => {
accountsInfo = accountsInfo || {};
console.log(`githubhint was found at ${address}`); console.log(`githubhint was found at ${address}`);
const contract = api.newContract(abis.githubhint, address); const contract = api.newContract(abis.githubhint, address);
const accounts = addresses.reduce((obj, address) => { const accounts = Object
const info = accountsInfo[address] || {}; .keys(accountsInfo)
.filter((address) => accountsInfo[address].uuid)
.reduce((obj, address) => {
const account = accountsInfo[address];
return Object.assign(obj, { return Object.assign(obj, {
[address]: { [address]: {
address, address,
name: info.name, name: account.name
uuid: info.uuid }
} });
}); }, {});
}, {});
const fromAddress = Object.keys(accounts)[0]; const fromAddress = Object.keys(accounts)[0];
return { return {

View File

@ -49,3 +49,15 @@
padding-bottom: 0 !important; padding-bottom: 0 !important;
} }
} }
.warning {
background: #f80;
bottom: 0;
color: #fff;
left: 0;
opacity: 1;
padding: 1.5em;
position: fixed;
right: 50%;
z-index: 100;
}

View File

@ -53,6 +53,7 @@ export default class Application extends Component {
}; };
render () { render () {
const { api } = window.parity;
const { const {
actions, actions,
accounts, contacts, accounts, contacts,
@ -60,9 +61,11 @@ export default class Application extends Component {
lookup, lookup,
events events
} = this.props; } = this.props;
let warning = null;
return ( return (
<div> <div>
{ warning }
<div className={ styles.header }> <div className={ styles.header }>
<h1>RΞgistry</h1> <h1>RΞgistry</h1>
<Accounts { ...accounts } actions={ actions.accounts } /> <Accounts { ...accounts } actions={ actions.accounts } />
@ -70,13 +73,11 @@ export default class Application extends Component {
{ contract && fee ? ( { contract && fee ? (
<div> <div>
<Lookup { ...lookup } accounts={ accounts.all } contacts={ contacts } actions={ actions.lookup } /> <Lookup { ...lookup } accounts={ accounts.all } contacts={ contacts } actions={ actions.lookup } />
{ this.renderActions() } { this.renderActions() }
<Events { ...events } accounts={ accounts.all } contacts={ contacts } actions={ actions.events } /> <Events { ...events } accounts={ accounts.all } contacts={ contacts } actions={ actions.events } />
<p className={ styles.address }> <div className={ styles.warning }>
The Registry is provided by the contract at <code>{ contract.address }.</code> WARNING: The name registry is experimental. Please ensure that you understand the risks, benefits & consequences of registering a name before doing so. A non-refundable fee of { api.util.fromWei(fee).toFormat(3) }<small>ETH</small> is required for all registrations.
</p> </div>
</div> </div>
) : ( ) : (
<CircularProgress size={ 60 } /> <CircularProgress size={ 60 } />

View File

@ -19,18 +19,16 @@ import { api } from '../parity';
export const set = (addresses) => ({ type: 'addresses set', addresses }); export const set = (addresses) => ({ type: 'addresses set', addresses });
export const fetch = () => (dispatch) => { export const fetch = () => (dispatch) => {
return Promise return api.parity
.all([ .accounts()
api.eth.accounts(), .then((accountsInfo) => {
api.parity.accounts() const addresses = Object
]) .keys(accountsInfo)
.then(([ accounts, data ]) => { .filter((address) => accountsInfo[address] && !accountsInfo[address].meta.deleted)
data = data || {};
const addresses = Object.keys(data)
.filter((address) => data[address] && !data[address].meta.deleted)
.map((address) => ({ .map((address) => ({
...data[address], address, ...accountsInfo[address],
isAccount: accounts.includes(address) address,
isAccount: !!accountsInfo[address].uuid
})); }));
dispatch(set(addresses)); dispatch(set(addresses));
}) })

View File

@ -146,7 +146,7 @@ export default class Import extends Component {
} }
sortFunctions = (a, b) => { sortFunctions = (a, b) => {
return a.name.localeCompare(b.name); return (a.name || '').localeCompare(b.name || '');
} }
countFunctions () { countFunctions () {

View File

@ -49,26 +49,26 @@ export function attachInterface (callback) {
return Promise return Promise
.all([ .all([
registry.getAddress.call({}, [api.util.sha3('signaturereg'), 'A']), registry.getAddress.call({}, [api.util.sha3('signaturereg'), 'A']),
api.eth.accounts(),
api.parity.accounts() api.parity.accounts()
]); ]);
}) })
.then(([address, addresses, accountsInfo]) => { .then(([address, accountsInfo]) => {
accountsInfo = accountsInfo || {};
console.log(`signaturereg was found at ${address}`); console.log(`signaturereg was found at ${address}`);
const contract = api.newContract(abis.signaturereg, address); const contract = api.newContract(abis.signaturereg, address);
const accounts = addresses.reduce((obj, address) => { const accounts = Object
const info = accountsInfo[address] || {}; .keys(accountsInfo)
.filter((address) => accountsInfo[address].uuid)
.reduce((obj, address) => {
const info = accountsInfo[address] || {};
return Object.assign(obj, { return Object.assign(obj, {
[address]: { [address]: {
address, address,
name: info.name || 'Unnamed', name: info.name || 'Unnamed'
uuid: info.uuid }
} });
}); }, {});
}, {});
const fromAddress = Object.keys(accounts)[0]; const fromAddress = Object.keys(accounts)[0];
return { return {

View File

@ -35,16 +35,13 @@ export const setSelectedAccount = (address) => ({
}); });
export const loadAccounts = () => (dispatch) => { export const loadAccounts = () => (dispatch) => {
Promise api.parity
.all([ .accounts()
api.eth.accounts(), .then((accountsInfo) => {
api.parity.accounts() const accountsList = Object
]) .keys(accountsInfo)
.then(([ accounts, accountsInfo ]) => { .filter((address) => accountsInfo[address].uuid)
accountsInfo = accountsInfo || {}; .map((address) => ({
const accountsList = accounts
.map(address => ({
...accountsInfo[address], ...accountsInfo[address],
address address
})); }));

View File

@ -20,3 +20,15 @@
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
} }
.warning {
background: #f80;
bottom: 0;
color: #fff;
left: 0;
opacity: 1;
padding: 1.5em;
position: fixed;
right: 50%;
z-index: 100;
}

View File

@ -17,6 +17,8 @@
import React, { Component, PropTypes } from 'react'; import React, { Component, PropTypes } from 'react';
import getMuiTheme from 'material-ui/styles/getMuiTheme'; import getMuiTheme from 'material-ui/styles/getMuiTheme';
import { api } from '../parity';
import Loading from '../Loading'; import Loading from '../Loading';
import Status from '../Status'; import Status from '../Status';
import Tokens from '../Tokens'; import Tokens from '../Tokens';
@ -59,6 +61,9 @@ export default class Application extends Component {
<Actions /> <Actions />
<Tokens /> <Tokens />
<div className={ styles.warning }>
WARNING: The token registry is experimental. Please ensure that you understand the steps, risks, benefits & consequences of registering a token before doing so. A non-refundable fee of { api.util.fromWei(contract.fee).toFormat(3) }<small>ETH</small> is required for all registrations.
</div>
</div> </div>
); );
} }

View File

@ -31,6 +31,12 @@
.title { .title {
font-size: 3rem; font-size: 3rem;
font-weight: 300; font-weight: 300;
margin-top: 0; margin: 0;
text-transform: uppercase; text-transform: uppercase;
} }
.byline {
font-size: 1.25em;
opacity: 0.75;
margin: 0 0 1.75em 0;
}

View File

@ -29,17 +29,12 @@ export default class Status extends Component {
}; };
render () { render () {
const { address, fee } = this.props; const { fee } = this.props;
return ( return (
<div className={ styles.status }> <div className={ styles.status }>
<h1 className={ styles.title }>Token Registry</h1> <h1 className={ styles.title }>Token Registry</h1>
<h3 className={ styles.byline }>A global registry of all recognised tokens on the network</h3>
<Chip
isAddress
value={ address }
label='Address' />
<Chip <Chip
isAddress={ false } isAddress={ false }
value={ api.util.fromWei(fee).toFixed(3) + 'ETH' } value={ api.util.fromWei(fee).toFixed(3) + 'ETH' }

View File

@ -48,7 +48,7 @@ mod tests {
"0x01" : "0x9a10c2b5bb8f3c602e674006d9b21f09167df57c87a78a5ce96d4159ecb76520" "0x01" : "0x9a10c2b5bb8f3c602e674006d9b21f09167df57c87a78a5ce96d4159ecb76520"
} }
}"#; }"#;
let _deserialized: Account= serde_json::from_str(s).unwrap(); let _deserialized: Account = serde_json::from_str(s).unwrap();
// TODO: validate all fields // TODO: validate all fields
} }
} }

View File

@ -16,6 +16,7 @@
//! Spec account deserialization. //! Spec account deserialization.
use std::collections::BTreeMap;
use uint::Uint; use uint::Uint;
use bytes::Bytes; use bytes::Bytes;
use spec::builtin::Builtin; use spec::builtin::Builtin;
@ -30,18 +31,21 @@ pub struct Account {
/// Nonce. /// Nonce.
pub nonce: Option<Uint>, pub nonce: Option<Uint>,
/// Code. /// Code.
pub code: Option<Bytes> pub code: Option<Bytes>,
/// Storage
pub storage: Option<BTreeMap<Uint, Uint>>,
} }
impl Account { impl Account {
/// Returns true if account does not have nonce and balance. /// Returns true if account does not have nonce and balance.
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.balance.is_none() && self.nonce.is_none() self.balance.is_none() && self.nonce.is_none() && self.code.is_none() && self.storage.is_none()
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::BTreeMap;
use serde_json; use serde_json;
use spec::account::Account; use spec::account::Account;
use util::U256; use util::U256;
@ -62,4 +66,21 @@ mod tests {
assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34])); assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34]));
assert!(deserialized.builtin.is_some()); // Further tested in builtin.rs assert!(deserialized.builtin.is_some()); // Further tested in builtin.rs
} }
#[test]
fn account_storage_deserialization() {
let s = r#"{
"balance": "1",
"nonce": "0",
"code": "1234",
"storage": { "0x7fffffffffffffff7fffffffffffffff": "0x1" }
}"#;
let deserialized: Account = serde_json::from_str(s).unwrap();
assert_eq!(deserialized.balance.unwrap(), Uint(U256::from(1)));
assert_eq!(deserialized.nonce.unwrap(), Uint(U256::from(0)));
assert_eq!(deserialized.code.unwrap(), Bytes::new(vec![0x12, 0x34]));
let mut storage = BTreeMap::new();
storage.insert(Uint(U256::from("7fffffffffffffff7fffffffffffffff")), Uint(U256::from(1)));
assert_eq!(deserialized.storage.unwrap(), storage);
}
} }

View File

@ -15,9 +15,8 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{io, env}; use std::{io, env};
use std::io::{Write, Read, BufReader, BufRead}; use std::io::{Write, BufReader, BufRead};
use std::time::Duration; use std::time::Duration;
use std::path::Path;
use std::fs::File; use std::fs::File;
use util::{clean_0x, U256, Uint, Address, path, CompactionProfile}; use util::{clean_0x, U256, Uint, Address, path, CompactionProfile};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
@ -300,13 +299,11 @@ pub fn password_prompt() -> Result<String, String> {
} }
/// Read a password from password file. /// Read a password from password file.
pub fn password_from_file<P>(path: P) -> Result<String, String> where P: AsRef<Path> { pub fn password_from_file(path: String) -> Result<String, String> {
let mut file = try!(File::open(path).map_err(|_| "Unable to open password file.")); let passwords = try!(passwords_from_files(vec![path]));
let mut file_content = String::new(); // use only first password from the file
match file.read_to_string(&mut file_content) { passwords.get(0).map(String::to_owned)
Ok(_) => Ok(file_content.trim().into()), .ok_or_else(|| "Password file seems to be empty.".to_owned())
Err(_) => Err("Unable to read password file.".into()),
}
} }
/// Reads passwords from files. Treats each line as a separate password. /// Reads passwords from files. Treats each line as a separate password.
@ -316,9 +313,10 @@ pub fn passwords_from_files(files: Vec<String>) -> Result<Vec<String>, String> {
let reader = BufReader::new(&file); let reader = BufReader::new(&file);
let lines = reader.lines() let lines = reader.lines()
.filter_map(|l| l.ok()) .filter_map(|l| l.ok())
.map(|pwd| pwd.trim().to_owned())
.collect::<Vec<String>>(); .collect::<Vec<String>>();
Ok(lines) Ok(lines)
}).collect::<Result<Vec<Vec<String>>, String>>(); }).collect::<Result<Vec<Vec<String>>, String>>();
Ok(try!(passwords).into_iter().flat_map(|x| x).collect()) Ok(try!(passwords).into_iter().flat_map(|x| x).collect())
} }
@ -419,7 +417,20 @@ mod tests {
let path = RandomTempPath::new(); let path = RandomTempPath::new();
let mut file = File::create(path.as_path()).unwrap(); let mut file = File::create(path.as_path()).unwrap();
file.write_all(b"a bc ").unwrap(); file.write_all(b"a bc ").unwrap();
assert_eq!(password_from_file(path).unwrap().as_bytes(), b"a bc"); assert_eq!(password_from_file(path.as_str().into()).unwrap().as_bytes(), b"a bc");
}
#[test]
fn test_password_multiline() {
let path = RandomTempPath::new();
let mut file = File::create(path.as_path()).unwrap();
file.write_all(br#" password with trailing whitespace
those passwords should be
ignored
but the first password is trimmed
"#).unwrap();
assert_eq!(&password_from_file(path.as_str().into()).unwrap(), "password with trailing whitespace");
} }
#[test] #[test]

View File

@ -15,7 +15,6 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::thread;
use std::time; use std::time;
use std::sync::Arc; use std::sync::Arc;
use devtools::{http_client, RandomTempPath}; use devtools::{http_client, RandomTempPath};

View File

@ -73,7 +73,7 @@ pub trait SyncProvider: Send + Sync {
/// Get peers information /// Get peers information
fn peers(&self) -> Vec<PeerInfo>; fn peers(&self) -> Vec<PeerInfo>;
/// Get the enode if available. /// Get the enode if available.
fn enode(&self) -> Option<String>; fn enode(&self) -> Option<String>;
} }
@ -231,6 +231,7 @@ impl ChainNotify for EthSync {
} }
fn stop(&self) { fn stop(&self) {
self.handler.snapshot_service.abort_restore();
self.network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e)); self.network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e));
} }
} }

View File

@ -17,10 +17,10 @@
//! Database of byte-slices keyed to their Keccak hash. //! Database of byte-slices keyed to their Keccak hash.
use hash::*; use hash::*;
use std::collections::HashMap; use std::collections::HashMap;
use elastic_array::ElasticArray256; use elastic_array::ElasticArray128;
/// `HashDB` value type. /// `HashDB` value type.
pub type DBValue = ElasticArray256<u8>; pub type DBValue = ElasticArray128<u8>;
/// Trait modelling datastore keyed by a 32-byte Keccak hash. /// Trait modelling datastore keyed by a 32-byte Keccak hash.
pub trait HashDB: AsHashDB + Send + Sync { pub trait HashDB: AsHashDB + Send + Sync {

View File

@ -464,6 +464,7 @@ impl Database {
try!(db.write_opt(batch, &self.write_opts)); try!(db.write_opt(batch, &self.write_opts));
for column in self.flushing.write().iter_mut() { for column in self.flushing.write().iter_mut() {
column.clear(); column.clear();
column.shrink_to_fit();
} }
Ok(()) Ok(())
}, },