Delete crates from parity-ethereum and fetch them from parity-common instead (#9083)

Use crates from parity-common: hashdb, keccak-hash, kvdb, kvdb-memorydb, kvdb-rocksdb, memorydb, parity-bytes, parity-crypto, path, patricia_trie, plain_hasher, rlp, target, test-support, trie-standardmap, triehash
This commit is contained in:
David 2018-07-10 14:59:19 +02:00 committed by GitHub
parent 6816f8b489
commit c7f608ec74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
134 changed files with 439 additions and 10041 deletions

411
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ fdlimit = "0.1"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
ethcore = { path = "ethcore", features = ["work-notify", "price-info", "stratum"] } ethcore = { path = "ethcore", features = ["work-notify", "price-info", "stratum"] }
ethcore-bytes = { path = "util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-io = { path = "util/io" } ethcore-io = { path = "util/io" }
ethcore-light = { path = "ethcore/light" } ethcore-light = { path = "ethcore/light" }
ethcore-logger = { path = "logger" } ethcore-logger = { path = "logger" }
@ -47,7 +47,7 @@ ethereum-types = "0.3"
node-filter = { path = "ethcore/node_filter" } node-filter = { path = "ethcore/node_filter" }
ethkey = { path = "ethkey" } ethkey = { path = "ethkey" }
node-health = { path = "dapps/node-health" } node-health = { path = "dapps/node-health" }
rlp = { path = "util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rpc-cli = { path = "rpc_cli" } rpc-cli = { path = "rpc_cli" }
parity-hash-fetch = { path = "hash-fetch" } parity-hash-fetch = { path = "hash-fetch" }
parity-ipfs-api = { path = "ipfs" } parity-ipfs-api = { path = "ipfs" }
@ -58,13 +58,13 @@ parity-rpc-client = { path = "rpc_client" }
parity-updater = { path = "updater" } parity-updater = { path = "updater" }
parity-version = { path = "util/version" } parity-version = { path = "util/version" }
parity-whisper = { path = "whisper" } parity-whisper = { path = "whisper" }
path = { path = "util/path" } path = { git = "https://github.com/paritytech/parity-common" }
dir = { path = "util/dir" } dir = { path = "util/dir" }
panic_hook = { path = "util/panic_hook" } panic_hook = { path = "util/panic_hook" }
keccak-hash = { path = "util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
migration-rocksdb = { path = "util/migration-rocksdb" } migration-rocksdb = { path = "util/migration-rocksdb" }
kvdb = { path = "util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
kvdb-rocksdb = { path = "util/kvdb-rocksdb" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" }
journaldb = { path = "util/journaldb" } journaldb = { path = "util/journaldb" }
mem = { path = "util/mem" } mem = { path = "util/mem" }
@ -137,6 +137,7 @@ members = [
"transaction-pool", "transaction-pool",
"whisper", "whisper",
"whisper/cli", "whisper/cli",
"util/triehash-ethereum",
"util/keccak-hasher", "util/keccak-hasher",
"util/patricia-trie-ethereum", "util/patricia-trie-ethereum",
] ]

View File

@ -27,14 +27,14 @@ itertools = "0.5"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
node-health = { path = "./node-health" } node-health = { path = "./node-health" }
parity-dapps-glue = { path = "./js-glue" } parity-dapps-glue = { path = "./js-glue" }
parity-hash-fetch = { path = "../hash-fetch" } parity-hash-fetch = { path = "../hash-fetch" }
parity-reactor = { path = "../util/reactor" } parity-reactor = { path = "../util/reactor" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
parity-version = { path = "../util/version" } parity-version = { path = "../util/version" }
registrar = { path = "../registrar" } registrar = { path = "../registrar" }

View File

@ -32,7 +32,7 @@ extern crate zip;
extern crate jsonrpc_http_server; extern crate jsonrpc_http_server;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate fetch; extern crate fetch;
extern crate node_health; extern crate node_health;

View File

@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
log = "0.3" log = "0.3"
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
primal = "0.2.3" primal = "0.2.3"
parking_lot = "0.6" parking_lot = "0.6"
crunchy = "0.1.0" crunchy = "0.1.0"

View File

@ -15,12 +15,12 @@ common-types = { path = "types" }
crossbeam = "0.3" crossbeam = "0.3"
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore-bloom-journal = { path = "../util/bloom" } ethcore-bloom-journal = { path = "../util/bloom" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
hashdb = { path = "../util/hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
memorydb = { path = "../util/memorydb" } memorydb = { git = "https://github.com/paritytech/parity-common" }
patricia-trie = { path = "../util/patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" } patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" }
ethcore-crypto = { path = "crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
error-chain = { version = "0.12", default-features = false } error-chain = { version = "0.12", default-features = false }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore-logger = { path = "../logger" } ethcore-logger = { path = "../logger" }
@ -47,11 +47,11 @@ parity-machine = { path = "../machine" }
parking_lot = "0.6" parking_lot = "0.6"
rayon = "1.0" rayon = "1.0"
rand = "0.4" rand = "0.4"
rlp = { path = "../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rlp_compress = { path = "../util/rlp_compress" } rlp_compress = { path = "../util/rlp_compress" }
rlp_derive = { path = "../util/rlp_derive" } rlp_derive = { path = "../util/rlp_derive" }
kvdb = { path = "../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }
snappy = { git = "https://github.com/paritytech/rust-snappy" } snappy = { git = "https://github.com/paritytech/rust-snappy" }
stop-guard = { path = "../util/stop-guard" } stop-guard = { path = "../util/stop-guard" }
macros = { path = "../util/macros" } macros = { path = "../util/macros" }
@ -61,12 +61,12 @@ trace-time = { path = "../util/trace-time" }
using_queue = { path = "../util/using_queue" } using_queue = { path = "../util/using_queue" }
vm = { path = "vm" } vm = { path = "vm" }
wasm = { path = "wasm" } wasm = { path = "wasm" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
triehash = { path = "../util/triehash" } triehash-ethereum = { version = "0.2", path = "../util/triehash-ethereum" }
unexpected = { path = "../util/unexpected" } unexpected = { path = "../util/unexpected" }
journaldb = { path = "../util/journaldb" } journaldb = { path = "../util/journaldb" }
keccak-hasher = { path = "../util/keccak-hasher" } keccak-hasher = { path = "../util/keccak-hasher" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" }
tempdir = {version="0.3", optional = true} tempdir = {version="0.3", optional = true}
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "android"))'.dependencies] [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "android"))'.dependencies]
@ -77,7 +77,7 @@ fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
[dev-dependencies] [dev-dependencies]
tempdir = "0.3" tempdir = "0.3"
trie-standardmap = { path = "../util/trie-standardmap" } trie-standardmap = { git = "https://github.com/paritytech/parity-common" }
[features] [features]
# Display EVM debug traces. # Display EVM debug traces.

View File

@ -20,7 +20,7 @@ extern crate test;
extern crate ethcore_util as util; extern crate ethcore_util as util;
extern crate rand; extern crate rand;
extern crate bn; extern crate bn;
extern crate ethcore_crypto; extern crate parity_crypto;
extern crate ethkey; extern crate ethkey;
extern crate rustc_hex; extern crate rustc_hex;
extern crate ethcore_bigint; extern crate ethcore_bigint;
@ -60,7 +60,7 @@ fn bn_128_mul(b: &mut Bencher) {
#[bench] #[bench]
fn sha256(b: &mut Bencher) { fn sha256(b: &mut Bencher) {
use ethcore_crypto::digest::sha256; use parity_crypto::digest::sha256;
let mut input: [u8; 256] = [0; 256]; let mut input: [u8; 256] = [0; 256];
let mut out = [0; 32]; let mut out = [0; 32];

View File

@ -1,12 +0,0 @@
[package]
name = "ethcore-crypto"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethereum-types = "0.3"
quick-error = "1.2.2"
ring = "0.12"
rust-crypto = "0.2.36"
tiny-keccak = "1.4"

View File

@ -1,5 +0,0 @@
# Ethcrypto
General cryptographic utilities for Ethereum.
By default, this library is compiled with the `secp256k1` feature, which provides ECDH and ECIES capability on that curve. It can be compiled without to avoid a dependency on the `libsecp256k1` library.

View File

@ -1,53 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use error::SymmError;
use rcrypto::blockmodes::{CtrMode, CbcDecryptor, PkcsPadding};
use rcrypto::aessafe::{AesSafe128Encryptor, AesSafe128Decryptor};
use rcrypto::symmetriccipher::{Encryptor, Decryptor};
use rcrypto::buffer::{RefReadBuffer, RefWriteBuffer, WriteBuffer};
/// Encrypt a message (CTR mode).
///
/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each.
/// An error is returned if the input lengths are invalid.
pub fn encrypt_128_ctr(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true)?;
Ok(())
}
/// Decrypt a message (CTR mode).
///
/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each.
/// An error is returned if the input lengths are invalid.
pub fn decrypt_128_ctr(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<(), SymmError> {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true)?;
Ok(())
}
/// Decrypt a message (CBC mode).
///
/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each.
/// An error is returned if the input lengths are invalid.
pub fn decrypt_128_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<usize, SymmError> {
let mut encryptor = CbcDecryptor::new(AesSafe128Decryptor::new(k), PkcsPadding, iv.to_vec());
let len = dest.len();
let mut buffer = RefWriteBuffer::new(dest);
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut buffer, true)?;
Ok(len - buffer.remaining())
}

View File

@ -1,198 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use error::SymmError;
use ring;
enum Mode { Aes128Gcm, Aes256Gcm }
/// AES GCM encryptor.
pub struct Encryptor<'a> {
mode: Mode,
key: ring::aead::SealingKey,
ad: &'a [u8],
offset: usize,
}
impl<'a> Encryptor<'a> {
pub fn aes_128_gcm(key: &[u8; 16]) -> Result<Encryptor<'a>, SymmError> {
let sk = ring::aead::SealingKey::new(&ring::aead::AES_128_GCM, key)?;
Ok(Encryptor {
mode: Mode::Aes128Gcm,
key: sk,
ad: &[],
offset: 0,
})
}
pub fn aes_256_gcm(key: &[u8; 32]) -> Result<Encryptor<'a>, SymmError> {
let sk = ring::aead::SealingKey::new(&ring::aead::AES_256_GCM, key)?;
Ok(Encryptor {
mode: Mode::Aes256Gcm,
key: sk,
ad: &[],
offset: 0,
})
}
/// Optional associated data which is not encrypted but authenticated.
pub fn associate(&mut self, data: &'a [u8]) -> &mut Self {
self.ad = data;
self
}
/// Optional offset value. Only the slice `[offset..]` will be encrypted.
pub fn offset(&mut self, off: usize) -> &mut Self {
self.offset = off;
self
}
/// Please note that the pair (key, nonce) must never be reused. Using random nonces
/// limits the number of messages encrypted with the same key to 2^32 (cf. [[1]])
///
/// [1]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
pub fn encrypt(&self, nonce: &[u8; 12], mut data: Vec<u8>) -> Result<Vec<u8>, SymmError> {
if self.offset > data.len() {
return Err(SymmError::offset_error(self.offset))
}
let tag_len = match self.mode {
Mode::Aes128Gcm => ring::aead::AES_128_GCM.tag_len(),
Mode::Aes256Gcm => ring::aead::AES_256_GCM.tag_len(),
};
data.extend(::std::iter::repeat(0).take(tag_len));
let len = ring::aead::seal_in_place(&self.key, nonce, self.ad, &mut data[self.offset ..], tag_len)?;
data.truncate(self.offset + len);
Ok(data)
}
}
/// AES GCM decryptor.
pub struct Decryptor<'a> {
key: ring::aead::OpeningKey,
ad: &'a [u8],
offset: usize,
}
impl<'a> Decryptor<'a> {
pub fn aes_128_gcm(key: &[u8; 16]) -> Result<Decryptor<'a>, SymmError> {
let ok = ring::aead::OpeningKey::new(&ring::aead::AES_128_GCM, key)?;
Ok(Decryptor {
key: ok,
ad: &[],
offset: 0,
})
}
pub fn aes_256_gcm(key: &[u8; 32]) -> Result<Decryptor<'a>, SymmError> {
let ok = ring::aead::OpeningKey::new(&ring::aead::AES_256_GCM, key)?;
Ok(Decryptor {
key: ok,
ad: &[],
offset: 0,
})
}
/// Optional associated data which is not encrypted but authenticated.
pub fn associate(&mut self, data: &'a [u8]) -> &mut Self {
self.ad = data;
self
}
/// Optional offset value. Only the slice `[offset..]` will be decrypted.
pub fn offset(&mut self, off: usize) -> &mut Self {
self.offset = off;
self
}
pub fn decrypt(&self, nonce: &[u8; 12], mut data: Vec<u8>) -> Result<Vec<u8>, SymmError> {
if self.offset > data.len() {
return Err(SymmError::offset_error(self.offset))
}
let len = ring::aead::open_in_place(&self.key, nonce, self.ad, 0, &mut data[self.offset ..])?.len();
data.truncate(self.offset + len);
Ok(data)
}
}
#[cfg(test)]
mod tests {
use super::{Encryptor, Decryptor};
#[test]
fn aes_gcm_128() {
let secret = b"1234567890123456";
let nonce = b"123456789012";
let message = b"So many books, so little time";
let ciphertext = Encryptor::aes_128_gcm(secret)
.unwrap()
.encrypt(nonce, message.to_vec())
.unwrap();
assert!(ciphertext != message);
let plaintext = Decryptor::aes_128_gcm(secret)
.unwrap()
.decrypt(nonce, ciphertext)
.unwrap();
assert_eq!(plaintext, message)
}
#[test]
fn aes_gcm_256() {
let secret = b"12345678901234567890123456789012";
let nonce = b"123456789012";
let message = b"So many books, so little time";
let ciphertext = Encryptor::aes_256_gcm(secret)
.unwrap()
.encrypt(nonce, message.to_vec())
.unwrap();
assert!(ciphertext != message);
let plaintext = Decryptor::aes_256_gcm(secret)
.unwrap()
.decrypt(nonce, ciphertext)
.unwrap();
assert_eq!(plaintext, message)
}
#[test]
fn aes_gcm_256_offset() {
let secret = b"12345678901234567890123456789012";
let nonce = b"123456789012";
let message = b"prefix data; So many books, so little time";
let ciphertext = Encryptor::aes_256_gcm(secret)
.unwrap()
.offset(13) // length of "prefix data; "
.encrypt(nonce, message.to_vec())
.unwrap();
assert!(ciphertext != &message[..]);
let plaintext = Decryptor::aes_256_gcm(secret)
.unwrap()
.offset(13) // length of "prefix data; "
.decrypt(nonce, ciphertext)
.unwrap();
assert_eq!(plaintext, &message[..])
}
}

View File

@ -1,109 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rcrypto::ripemd160;
use ring::digest::{self, Context, SHA256, SHA512};
use std::marker::PhantomData;
use std::ops::Deref;
/// The message digest.
pub struct Digest<T>(InnerDigest, PhantomData<T>);
enum InnerDigest {
Ring(digest::Digest),
Ripemd160([u8; 20]),
}
impl<T> Deref for Digest<T> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
match self.0 {
InnerDigest::Ring(ref d) => d.as_ref(),
InnerDigest::Ripemd160(ref d) => &d[..]
}
}
}
/// Single-step sha256 digest computation.
pub fn sha256(data: &[u8]) -> Digest<Sha256> {
Digest(InnerDigest::Ring(digest::digest(&SHA256, data)), PhantomData)
}
/// Single-step sha512 digest computation.
pub fn sha512(data: &[u8]) -> Digest<Sha512> {
Digest(InnerDigest::Ring(digest::digest(&SHA512, data)), PhantomData)
}
/// Single-step ripemd160 digest computation.
pub fn ripemd160(data: &[u8]) -> Digest<Ripemd160> {
let mut hasher = Hasher::ripemd160();
hasher.update(data);
hasher.finish()
}
pub enum Sha256 {}
pub enum Sha512 {}
pub enum Ripemd160 {}
/// Stateful digest computation.
pub struct Hasher<T>(Inner, PhantomData<T>);
enum Inner {
Ring(Context),
Ripemd160(ripemd160::Ripemd160)
}
impl Hasher<Sha256> {
pub fn sha256() -> Hasher<Sha256> {
Hasher(Inner::Ring(Context::new(&SHA256)), PhantomData)
}
}
impl Hasher<Sha512> {
pub fn sha512() -> Hasher<Sha512> {
Hasher(Inner::Ring(Context::new(&SHA512)), PhantomData)
}
}
impl Hasher<Ripemd160> {
pub fn ripemd160() -> Hasher<Ripemd160> {
Hasher(Inner::Ripemd160(ripemd160::Ripemd160::new()), PhantomData)
}
}
impl<T> Hasher<T> {
pub fn update(&mut self, data: &[u8]) {
match self.0 {
Inner::Ring(ref mut ctx) => ctx.update(data),
Inner::Ripemd160(ref mut ctx) => {
use rcrypto::digest::Digest;
ctx.input(data)
}
}
}
pub fn finish(self) -> Digest<T> {
match self.0 {
Inner::Ring(ctx) => Digest(InnerDigest::Ring(ctx.finish()), PhantomData),
Inner::Ripemd160(mut ctx) => {
use rcrypto::digest::Digest;
let mut d = [0; 20];
ctx.result(&mut d);
Digest(InnerDigest::Ripemd160(d), PhantomData)
}
}
}
}

View File

@ -1,82 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rcrypto;
use ring;
quick_error! {
#[derive(Debug)]
pub enum Error {
Scrypt(e: ScryptError) {
cause(e)
from()
}
Symm(e: SymmError) {
cause(e)
from()
}
}
}
quick_error! {
#[derive(Debug)]
pub enum ScryptError {
// log(N) < r / 16
InvalidN {
display("Invalid N argument of the scrypt encryption")
}
// p <= (2^31-1 * 32)/(128 * r)
InvalidP {
display("Invalid p argument of the scrypt encryption")
}
}
}
quick_error! {
#[derive(Debug)]
pub enum SymmError wraps PrivSymmErr {
RustCrypto(e: rcrypto::symmetriccipher::SymmetricCipherError) {
display("symmetric crypto error")
from()
}
Ring(e: ring::error::Unspecified) {
display("symmetric crypto error")
cause(e)
from()
}
Offset(x: usize) {
display("offset {} greater than slice length", x)
}
}
}
impl SymmError {
pub(crate) fn offset_error(x: usize) -> SymmError {
SymmError(PrivSymmErr::Offset(x))
}
}
impl From<ring::error::Unspecified> for SymmError {
fn from(e: ring::error::Unspecified) -> SymmError {
SymmError(PrivSymmErr::Ring(e))
}
}
impl From<rcrypto::symmetriccipher::SymmetricCipherError> for SymmError {
fn from(e: rcrypto::symmetriccipher::SymmetricCipherError) -> SymmError {
SymmError(PrivSymmErr::RustCrypto(e))
}
}

View File

@ -1,88 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use digest;
use ring::digest::{SHA256, SHA512};
use ring::hmac::{self, SigningContext};
use std::marker::PhantomData;
use std::ops::Deref;
/// HMAC signature.
pub struct Signature<T>(hmac::Signature, PhantomData<T>);
impl<T> Deref for Signature<T> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.0.as_ref()
}
}
/// HMAC signing key.
pub struct SigKey<T>(hmac::SigningKey, PhantomData<T>);
impl SigKey<digest::Sha256> {
pub fn sha256(key: &[u8]) -> SigKey<digest::Sha256> {
SigKey(hmac::SigningKey::new(&SHA256, key), PhantomData)
}
}
impl SigKey<digest::Sha512> {
pub fn sha512(key: &[u8]) -> SigKey<digest::Sha512> {
SigKey(hmac::SigningKey::new(&SHA512, key), PhantomData)
}
}
/// Compute HMAC signature of `data`.
pub fn sign<T>(k: &SigKey<T>, data: &[u8]) -> Signature<T> {
Signature(hmac::sign(&k.0, data), PhantomData)
}
/// Stateful HMAC computation.
pub struct Signer<T>(SigningContext, PhantomData<T>);
impl<T> Signer<T> {
pub fn with(key: &SigKey<T>) -> Signer<T> {
Signer(hmac::SigningContext::with_key(&key.0), PhantomData)
}
pub fn update(&mut self, data: &[u8]) {
self.0.update(data)
}
pub fn sign(self) -> Signature<T> {
Signature(self.0.sign(), PhantomData)
}
}
/// HMAC signature verification key.
pub struct VerifyKey<T>(hmac::VerificationKey, PhantomData<T>);
impl VerifyKey<digest::Sha256> {
pub fn sha256(key: &[u8]) -> VerifyKey<digest::Sha256> {
VerifyKey(hmac::VerificationKey::new(&SHA256, key), PhantomData)
}
}
impl VerifyKey<digest::Sha512> {
pub fn sha512(key: &[u8]) -> VerifyKey<digest::Sha512> {
VerifyKey(hmac::VerificationKey::new(&SHA512, key), PhantomData)
}
}
/// Verify HMAC signature of `data`.
pub fn verify<T>(k: &VerifyKey<T>, data: &[u8], sig: &[u8]) -> bool {
hmac::verify(&k.0, data, sig).is_ok()
}

View File

@ -1,76 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Crypto utils used ethstore and network.
extern crate crypto as rcrypto;
extern crate ethereum_types;
#[macro_use]
extern crate quick_error;
extern crate ring;
extern crate tiny_keccak;
pub mod aes;
pub mod aes_gcm;
pub mod error;
pub mod scrypt;
pub mod digest;
pub mod hmac;
pub mod pbkdf2;
pub use error::Error;
use tiny_keccak::Keccak;
pub const KEY_LENGTH: usize = 32;
pub const KEY_ITERATIONS: usize = 10240;
pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2;
/// Default authenticated data to use (in RPC).
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
pub trait Keccak256<T> {
fn keccak256(&self) -> T where T: Sized;
}
impl<T> Keccak256<[u8; 32]> for T where T: AsRef<[u8]> {
fn keccak256(&self) -> [u8; 32] {
let mut keccak = Keccak::new_keccak256();
let mut result = [0u8; 32];
keccak.update(self.as_ref());
keccak.finalize(&mut result);
result
}
}
pub fn derive_key_iterations(password: &[u8], salt: &[u8; 32], c: u32) -> (Vec<u8>, Vec<u8>) {
let mut derived_key = [0u8; KEY_LENGTH];
pbkdf2::sha256(c, pbkdf2::Salt(salt), pbkdf2::Secret(password), &mut derived_key);
let derived_right_bits = &derived_key[0..KEY_LENGTH_AES];
let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH];
(derived_right_bits.to_vec(), derived_left_bits.to_vec())
}
pub fn derive_mac(derived_left_bits: &[u8], cipher_text: &[u8]) -> Vec<u8> {
let mut mac = vec![0u8; KEY_LENGTH_AES + cipher_text.len()];
mac[0..KEY_LENGTH_AES].copy_from_slice(derived_left_bits);
mac[KEY_LENGTH_AES..cipher_text.len() + KEY_LENGTH_AES].copy_from_slice(cipher_text);
mac
}
pub fn is_equal(a: &[u8], b: &[u8]) -> bool {
ring::constant_time::verify_slices_are_equal(a, b).is_ok()
}

View File

@ -1,28 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ring;
pub struct Salt<'a>(pub &'a [u8]);
pub struct Secret<'a>(pub &'a [u8]);
pub fn sha256(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 32]) {
ring::pbkdf2::derive(&ring::digest::SHA256, iter, salt.0, sec.0, &mut out[..])
}
pub fn sha512(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 64]) {
ring::pbkdf2::derive(&ring::digest::SHA512, iter, salt.0, sec.0, &mut out[..])
}

View File

@ -1,38 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use error::ScryptError;
use rcrypto::scrypt::{scrypt, ScryptParams};
use super::{KEY_LENGTH_AES, KEY_LENGTH};
pub fn derive_key(pass: &[u8], salt: &[u8; 32], n: u32, p: u32, r: u32) -> Result<(Vec<u8>, Vec<u8>), ScryptError> {
// sanity checks
let log_n = (32 - n.leading_zeros() - 1) as u8;
if log_n as u32 >= r * 16 {
return Err(ScryptError::InvalidN);
}
if p as u64 > ((u32::max_value() as u64 - 1) * 32)/(128 * (r as u64)) {
return Err(ScryptError::InvalidP);
}
let mut derived_key = vec![0u8; KEY_LENGTH];
let scrypt_params = ScryptParams::new(log_n, r, p);
scrypt(pass, salt, &scrypt_params, &mut derived_key);
let derived_right_bits = &derived_key[0..KEY_LENGTH_AES];
let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH];
Ok((derived_right_bits.to_vec(), derived_left_bits.to_vec()))
}

View File

@ -10,7 +10,7 @@ heapsize = "0.4"
lazy_static = "1.0" lazy_static = "1.0"
log = "0.3" log = "0.3"
vm = { path = "../vm" } vm = { path = "../vm" }
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
parking_lot = "0.6" parking_lot = "0.6"
memory-cache = { path = "../../util/memory_cache" } memory-cache = { path = "../../util/memory_cache" }

View File

@ -9,19 +9,19 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
log = "0.3" log = "0.3"
ethcore = { path = ".."} ethcore = { path = ".."}
ethcore-bytes = { path = "../../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-transaction = { path = "../transaction" } ethcore-transaction = { path = "../transaction" }
ethereum-types = "0.3" ethereum-types = "0.3"
memorydb = { path = "../../util/memorydb" } memorydb = { git = "https://github.com/paritytech/parity-common" }
patricia-trie = { path = "../../util/patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
ethcore-network = { path = "../../util/network" } ethcore-network = { path = "../../util/network" }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
hashdb = { path = "../../util/hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
heapsize = "0.4" heapsize = "0.4"
vm = { path = "../vm" } vm = { path = "../vm" }
plain_hasher = { path = "../../util/plain_hasher" } plain_hasher = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rlp_derive = { path = "../../util/rlp_derive" } rlp_derive = { path = "../../util/rlp_derive" }
smallvec = "0.4" smallvec = "0.4"
futures = "0.1" futures = "0.1"
@ -32,16 +32,16 @@ serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
parking_lot = "0.6" parking_lot = "0.6"
stats = { path = "../../util/stats" } stats = { path = "../../util/stats" }
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
keccak-hasher = { path = "../../util/keccak-hasher" } keccak-hasher = { path = "../../util/keccak-hasher" }
triehash = { path = "../../util/triehash" } triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
kvdb = { path = "../../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
memory-cache = { path = "../../util/memory_cache" } memory-cache = { path = "../../util/memory_cache" }
error-chain = { version = "0.12", default-features = false } error-chain = { version = "0.12", default-features = false }
[dev-dependencies] [dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] } ethcore = { path = "..", features = ["test-helpers"] }
kvdb-memorydb = { path = "../../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }
tempdir = "0.3" tempdir = "0.3"
[features] [features]

View File

@ -56,7 +56,7 @@ extern crate log;
extern crate bincode; extern crate bincode;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_network as network; extern crate ethcore_network as network;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethcore; extern crate ethcore;
@ -79,7 +79,7 @@ extern crate smallvec;
extern crate stats; extern crate stats;
extern crate vm; extern crate vm;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate triehash; extern crate triehash_ethereum as triehash;
extern crate kvdb; extern crate kvdb;
extern crate memory_cache; extern crate memory_cache;
#[macro_use] #[macro_use]

View File

@ -20,6 +20,6 @@ lru-cache = "0.1"
[dev-dependencies] [dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] } ethcore = { path = "..", features = ["test-helpers"] }
kvdb-memorydb = { path = "../../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
tempdir = "0.3" tempdir = "0.3"

View File

@ -11,8 +11,8 @@ ethabi = "5.1"
ethabi-contract = "5.0" ethabi-contract = "5.0"
ethabi-derive = "5.0" ethabi-derive = "5.0"
ethcore = { path = ".." } ethcore = { path = ".." }
ethcore-bytes = { path = "../../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-crypto = { path = "../crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
ethcore-logger = { path = "../../logger" } ethcore-logger = { path = "../../logger" }
ethcore-miner = { path = "../../miner" } ethcore-miner = { path = "../../miner" }
@ -22,13 +22,13 @@ ethjson = { path = "../../json" }
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }
fetch = { path = "../../util/fetch" } fetch = { path = "../../util/fetch" }
futures = "0.1" futures = "0.1"
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
log = "0.3" log = "0.3"
parking_lot = "0.6" parking_lot = "0.6"
patricia-trie = { path = "../../util/patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
rand = "0.3" rand = "0.3"
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rlp_derive = { path = "../../util/rlp_derive" } rlp_derive = { path = "../../util/rlp_derive" }
rustc-hex = "1.0" rustc-hex = "1.0"
serde = "1.0" serde = "1.0"

View File

@ -26,8 +26,8 @@ mod messages;
mod error; mod error;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_miner; extern crate ethcore_miner;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;

View File

@ -10,7 +10,7 @@ ethcore = { path = ".." }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
ethcore-private-tx = { path = "../private-tx" } ethcore-private-tx = { path = "../private-tx" }
ethcore-sync = { path = "../sync" } ethcore-sync = { path = "../sync" }
kvdb = { path = "../../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
log = "0.3" log = "0.3"
stop-guard = { path = "../../util/stop-guard" } stop-guard = { path = "../../util/stop-guard" }
trace-time = { path = "../../util/trace-time" } trace-time = { path = "../../util/trace-time" }
@ -18,4 +18,4 @@ trace-time = { path = "../../util/trace-time" }
[dev-dependencies] [dev-dependencies]
ethcore = { path = "..", features = ["test-helpers"] } ethcore = { path = "..", features = ["test-helpers"] }
tempdir = "0.3" tempdir = "0.3"
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" }

View File

@ -18,7 +18,7 @@ use std::cmp::{max, min};
use std::io::{self, Read}; use std::io::{self, Read};
use byteorder::{ByteOrder, BigEndian}; use byteorder::{ByteOrder, BigEndian};
use ethcore_crypto::digest; use parity_crypto::digest;
use num::{BigUint, Zero, One}; use num::{BigUint, Zero, One};
use hash::keccak; use hash::keccak;

View File

@ -65,9 +65,9 @@ extern crate crossbeam;
extern crate common_types as types; extern crate common_types as types;
extern crate ethash; extern crate ethash;
extern crate ethcore_bloom_journal as bloom_journal; extern crate ethcore_bloom_journal as bloom_journal;
extern crate ethcore_crypto; extern crate parity_crypto;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_logger; extern crate ethcore_logger;
extern crate ethcore_miner; extern crate ethcore_miner;
#[cfg(feature = "stratum")] #[cfg(feature = "stratum")]
@ -97,7 +97,7 @@ extern crate heapsize;
extern crate memorydb; extern crate memorydb;
extern crate patricia_trie as trie; extern crate patricia_trie as trie;
extern crate patricia_trie_ethereum as ethtrie; extern crate patricia_trie_ethereum as ethtrie;
extern crate triehash; extern crate triehash_ethereum as triehash;
extern crate ansi_term; extern crate ansi_term;
extern crate unexpected; extern crate unexpected;
extern crate snappy; extern crate snappy;

View File

@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ethereum-types = "0.3" ethereum-types = "0.3"
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[lib] [lib]
[dependencies] [dependencies]
ethcore-bytes = { path = "../../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-network = { path = "../../util/network" } ethcore-network = { path = "../../util/network" }
ethcore-network-devp2p = { path = "../../util/network-devp2p" } ethcore-network-devp2p = { path = "../../util/network-devp2p" }
ethcore-io = { path = "../../util/io" } ethcore-io = { path = "../../util/io" }
@ -16,14 +16,14 @@ ethcore-light = { path = "../light" }
ethcore-transaction = { path = "../transaction" } ethcore-transaction = { path = "../transaction" }
ethcore = { path = ".." } ethcore = { path = ".." }
ethereum-types = "0.3" ethereum-types = "0.3"
hashdb = { version = "0.2", path = "../../util/hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
plain_hasher = { version = "0.2", path = "../../util/plain_hasher" } plain_hasher = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rustc-hex = "1.0" rustc-hex = "1.0"
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
keccak-hasher = { path = "../../util/keccak-hasher" } keccak-hasher = { path = "../../util/keccak-hasher" }
triehash = { path = "../../util/triehash" } triehash-ethereum = {version = "0.2", path = "../../util/triehash-ethereum" }
kvdb = { path = "../../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
macros = { path = "../../util/macros" } macros = { path = "../../util/macros" }
log = "0.3" log = "0.3"
env_logger = "0.4" env_logger = "0.4"
@ -38,6 +38,6 @@ ipnetwork = "0.12.6"
[dev-dependencies] [dev-dependencies]
ethcore-io = { path = "../../util/io", features = ["mio"] } ethcore-io = { path = "../../util/io", features = ["mio"] }
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }
kvdb-memorydb = { path = "../../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }
ethcore-private-tx = { path = "../private-tx" } ethcore-private-tx = { path = "../private-tx" }
ethcore = { path = "..", features = ["test-helpers"] } ethcore = { path = "..", features = ["test-helpers"] }

View File

@ -20,7 +20,7 @@ use smallvec::SmallVec;
use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP};
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use ethereum_types::H256; use ethereum_types::H256;
use triehash::ordered_trie_root; use triehash_ethereum::ordered_trie_root;
use bytes::Bytes; use bytes::Bytes;
use rlp::{Rlp, RlpStream, DecoderError}; use rlp::{Rlp, RlpStream, DecoderError};
use network; use network;

View File

@ -23,7 +23,7 @@
extern crate ethcore_network as network; extern crate ethcore_network as network;
extern crate ethcore_network_devp2p as devp2p; extern crate ethcore_network_devp2p as devp2p;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;
#[macro_use] #[macro_use]
@ -40,7 +40,7 @@ extern crate rlp;
extern crate ipnetwork; extern crate ipnetwork;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;
extern crate keccak_hasher; extern crate keccak_hasher;
extern crate triehash; extern crate triehash_ethereum;
extern crate kvdb; extern crate kvdb;
extern crate ethcore_light as light; extern crate ethcore_light as light;

View File

@ -9,8 +9,8 @@ ethjson = { path = "../../json" }
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }
evm = { path = "../evm" } evm = { path = "../evm" }
heapsize = "0.4" heapsize = "0.4"
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
unexpected = { path = "../../util/unexpected" } unexpected = { path = "../../util/unexpected" }
ethereum-types = "0.3" ethereum-types = "0.3"

View File

@ -5,12 +5,12 @@ version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
rlp_derive = { path = "../../util/rlp_derive" } rlp_derive = { path = "../../util/rlp_derive" }
ethcore-bytes = { path = "../../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
ethjson = { path = "../../json" } ethjson = { path = "../../json" }
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
heapsize = "0.4" heapsize = "0.4"
[dev-dependencies] [dev-dependencies]

View File

@ -17,7 +17,7 @@
//! Types used in the public API //! Types used in the public API
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethjson; extern crate ethjson;
extern crate rlp; extern crate rlp;
#[macro_use] #[macro_use]

View File

@ -5,12 +5,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
byteorder = "1.0" byteorder = "1.0"
ethcore-bytes = { path = "../../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
patricia-trie = { path = "../../util/patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" }
log = "0.3" log = "0.3"
common-types = { path = "../types" } common-types = { path = "../types" }
ethjson = { path = "../../json" } ethjson = { path = "../../json" }
rlp = { path = "../../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
keccak-hash = { path = "../../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }

View File

@ -17,7 +17,7 @@
//! Virtual machines support library //! Virtual machines support library
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate common_types as types; extern crate common_types as types;
extern crate ethjson; extern crate ethjson;
extern crate rlp; extern crate rlp;

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
byteorder = "1.0" byteorder = "1.0"
edit-distance = "2.0" edit-distance = "2.0"
ethcore-crypto = { path = "../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
ethereum-types = "0.3" ethereum-types = "0.3"
lazy_static = "1.0" lazy_static = "1.0"

View File

@ -16,7 +16,7 @@
use secp256k1; use secp256k1;
use std::io; use std::io;
use ethcore_crypto::error::SymmError; use parity_crypto::error::SymmError;
quick_error! { quick_error! {
#[derive(Debug)] #[derive(Debug)]
@ -67,7 +67,7 @@ pub mod ecdh {
/// ECIES function /// ECIES function
pub mod ecies { pub mod ecies {
use ethcore_crypto::{aes, digest, hmac, is_equal}; use parity_crypto::{aes, digest, hmac, is_equal};
use ethereum_types::H128; use ethereum_types::H128;
use super::{ecdh, Error}; use super::{ecdh, Error};
use {Random, Generator, Public, Secret}; use {Random, Generator, Public, Secret};

View File

@ -207,7 +207,7 @@ impl ExtendedKeyPair {
// Work is based on BIP0032 // Work is based on BIP0032
// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki // https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
mod derivation { mod derivation {
use ethcore_crypto::hmac; use parity_crypto::hmac;
use ethereum_types::{U256, U512, H512, H256}; use ethereum_types::{U256, U512, H512, H256};
use secp256k1::key::{SecretKey, PublicKey}; use secp256k1::key::{SecretKey, PublicKey};
use SECP256K1; use SECP256K1;

View File

@ -18,7 +18,7 @@
extern crate byteorder; extern crate byteorder;
extern crate edit_distance; extern crate edit_distance;
extern crate ethcore_crypto; extern crate parity_crypto;
extern crate ethereum_types; extern crate ethereum_types;
extern crate mem; extern crate mem;
extern crate parity_wordlist; extern crate parity_wordlist;

View File

@ -16,7 +16,7 @@ tiny-keccak = "1.4"
time = "0.1.34" time = "0.1.34"
itertools = "0.5" itertools = "0.5"
parking_lot = "0.6" parking_lot = "0.6"
ethcore-crypto = { path = "../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
dir = { path = "../util/dir" } dir = { path = "../util/dir" }
smallvec = "0.4" smallvec = "0.4"

View File

@ -31,7 +31,7 @@ extern crate time;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate tempdir; extern crate tempdir;
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethkey as _ethkey; extern crate ethkey as _ethkey;
extern crate parity_wordlist; extern crate parity_wordlist;

View File

@ -12,7 +12,7 @@ path = "./src/main.rs"
docopt = "0.8" docopt = "0.8"
ethcore = { path = "../ethcore", features = ["test-helpers", "json-tests"] } ethcore = { path = "../ethcore", features = ["test-helpers", "json-tests"] }
ethjson = { path = "../json" } ethjson = { path = "../json" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-transaction = { path = "../ethcore/transaction" } ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.3" ethereum-types = "0.3"
evm = { path = "../ethcore/evm" } evm = { path = "../ethcore/evm" }

View File

@ -26,7 +26,7 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate docopt; extern crate docopt;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate vm; extern crate vm;
extern crate evm; extern crate evm;

View File

@ -15,10 +15,10 @@ mime_guess = "2.0.0-alpha.2"
rand = "0.4" rand = "0.4"
rustc-hex = "1.0" rustc-hex = "1.0"
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
parity-reactor = { path = "../util/reactor" } parity-reactor = { path = "../util/reactor" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
registrar = { path = "../registrar" } registrar = { path = "../registrar" }
ethabi = "5.1" ethabi = "5.1"

View File

@ -22,7 +22,7 @@
extern crate log; extern crate log;
extern crate ethabi; extern crate ethabi;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate futures; extern crate futures;
extern crate futures_cpupool; extern crate futures_cpupool;

View File

@ -7,11 +7,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
rlp = { path = "../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
cid = "0.2" cid = "0.2"
multihash = "0.7" multihash = "0.7"
unicase = "2.0" unicase = "2.0"

View File

@ -20,7 +20,7 @@ extern crate unicase;
extern crate rlp; extern crate rlp;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate jsonrpc_core as core; extern crate jsonrpc_core as core;
extern crate jsonrpc_http_server as http; extern crate jsonrpc_http_server as http;

View File

@ -8,9 +8,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore-transaction = { path = "../ethcore/transaction" } ethcore-transaction = { path = "../ethcore/transaction" }
kvdb = { path = "../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
log = "0.3" log = "0.3"
rlp = { path = "../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
@ -18,4 +18,4 @@ serde_json = "1.0"
[dev-dependencies] [dev-dependencies]
ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore = { path = "../ethcore", features = ["test-helpers"] }
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }

View File

@ -22,12 +22,12 @@ ethereum-types = "0.3"
futures = "0.1" futures = "0.1"
futures-cpupool = "0.1" futures-cpupool = "0.1"
heapsize = "0.4" heapsize = "0.4"
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
linked-hash-map = "0.5" linked-hash-map = "0.5"
log = "0.3" log = "0.3"
parking_lot = "0.6" parking_lot = "0.6"
price-info = { path = "../price-info", optional = true } price-info = { path = "../price-info", optional = true }
rlp = { path = "../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
trace-time = { path = "../util/trace-time" } trace-time = { path = "../util/trace-time" }
transaction-pool = { path = "../transaction-pool" } transaction-pool = { path = "../transaction-pool" }

View File

@ -45,7 +45,7 @@ extern crate toml;
extern crate blooms_db; extern crate blooms_db;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_light as light; extern crate ethcore_light as light;
extern crate ethcore_logger; extern crate ethcore_logger;

View File

@ -10,4 +10,4 @@ futures = "0.1"
ethabi = "5.1.0" ethabi = "5.1.0"
ethabi-derive = "5.0.5" ethabi-derive = "5.0.5"
ethabi-contract = "5.0.3" ethabi-contract = "5.0.3"
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }

View File

@ -37,8 +37,8 @@ jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "
ethash = { path = "../ethash" } ethash = { path = "../ethash" }
ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore = { path = "../ethcore", features = ["test-helpers"] }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-crypto = { path = "../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethcore-devtools = { path = "../devtools" } ethcore-devtools = { path = "../devtools" }
ethcore-io = { path = "../util/io" } ethcore-io = { path = "../util/io" }
ethcore-light = { path = "../ethcore/light" } ethcore-light = { path = "../ethcore/light" }
@ -53,13 +53,13 @@ ethjson = { path = "../json" }
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
ethstore = { path = "../ethstore" } ethstore = { path = "../ethstore" }
fetch = { path = "../util/fetch" } fetch = { path = "../util/fetch" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
node-health = { path = "../dapps/node-health" } node-health = { path = "../dapps/node-health" }
parity-reactor = { path = "../util/reactor" } parity-reactor = { path = "../util/reactor" }
parity-updater = { path = "../updater" } parity-updater = { path = "../updater" }
parity-version = { path = "../util/version" } parity-version = { path = "../util/version" }
patricia-trie = { path = "../util/patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../util/rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
stats = { path = "../util/stats" } stats = { path = "../util/stats" }
vm = { path = "../ethcore/vm" } vm = { path = "../ethcore/vm" }
@ -73,7 +73,7 @@ fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore = { path = "../ethcore", features = ["test-helpers"] }
ethcore-network = { path = "../util/network" } ethcore-network = { path = "../util/network" }
fake-fetch = { path = "../util/fake-fetch" } fake-fetch = { path = "../util/fake-fetch" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }
macros = { path = "../util/macros" } macros = { path = "../util/macros" }
pretty_assertions = "0.1" pretty_assertions = "0.1"
transaction-pool = { path = "../transaction-pool" } transaction-pool = { path = "../transaction-pool" }

View File

@ -45,8 +45,8 @@ extern crate jsonrpc_pubsub;
extern crate ethash; extern crate ethash;
#[cfg_attr(test, macro_use)] #[cfg_attr(test, macro_use)]
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethcore_devtools as devtools; extern crate ethcore_devtools as devtools;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_light as light; extern crate ethcore_light as light;

View File

@ -17,4 +17,4 @@ parking_lot = "0.6"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-ws-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
parity-rpc = { path = "../rpc" } parity-rpc = { path = "../rpc" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }

View File

@ -24,14 +24,14 @@ tokio-service = "0.1"
tokio-proto = "0.1" tokio-proto = "0.1"
url = "1.0" url = "1.0"
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-crypto = { path = "../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethcore-logger = { path = "../logger" } ethcore-logger = { path = "../logger" }
ethcore-sync = { path = "../ethcore/sync" } ethcore-sync = { path = "../ethcore/sync" }
ethcore-transaction = { path = "../ethcore/transaction" } ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.3" ethereum-types = "0.3"
kvdb = { path = "../util/kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
ethkey = { path = "../ethkey" } ethkey = { path = "../ethkey" }
lazy_static = "1.0" lazy_static = "1.0"
ethabi = "5.1" ethabi = "5.1"
@ -41,4 +41,4 @@ ethabi-contract = "5.0"
[dev-dependencies] [dev-dependencies]
ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore = { path = "../ethcore", features = ["test-helpers"] }
tempdir = "0.3" tempdir = "0.3"
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" }

View File

@ -17,8 +17,8 @@
extern crate byteorder; extern crate byteorder;
extern crate ethabi; extern crate ethabi;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethcore_logger as logger; extern crate ethcore_logger as logger;
extern crate ethcore_sync as sync; extern crate ethcore_sync as sync;
extern crate ethcore_transaction as transaction; extern crate ethcore_transaction as transaction;

View File

@ -6,7 +6,7 @@ license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"] authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
keccak-hash = { path = "../util/hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
lazy_static = "1.0" lazy_static = "1.0"
log = "0.3" log = "0.3"
ethabi = "5.1" ethabi = "5.1"
@ -15,13 +15,13 @@ ethabi-contract = "5.0"
target_info = "0.1" target_info = "0.1"
semver = "0.9" semver = "0.9"
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
ethcore-bytes = { path = "../util/bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-sync = { path = "../ethcore/sync" } ethcore-sync = { path = "../ethcore/sync" }
ethereum-types = "0.3" ethereum-types = "0.3"
parking_lot = "0.6" parking_lot = "0.6"
parity-hash-fetch = { path = "../hash-fetch" } parity-hash-fetch = { path = "../hash-fetch" }
parity-version = { path = "../util/version" } parity-version = { path = "../util/version" }
path = { path = "../util/path" } path = { git = "https://github.com/paritytech/parity-common" }
rand = "0.4" rand = "0.4"
[dev-dependencies] [dev-dependencies]

View File

@ -20,7 +20,7 @@
extern crate ethabi; extern crate ethabi;
extern crate ethcore; extern crate ethcore;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate ethcore_sync as sync; extern crate ethcore_sync as sync;
extern crate ethereum_types; extern crate ethereum_types;
extern crate keccak_hash as hash; extern crate keccak_hash as hash;

View File

@ -1,8 +0,0 @@
[package]
name = "ethcore-bytes"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "byte utilities for Parity"
license = "GPL-3.0"
[dependencies]

View File

@ -1,179 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! General bytes-related utilities.
//!
//! Includes a pretty-printer for bytes, in the form of `ToPretty` and `PrettySlice`
//! as
use std::fmt;
use std::cmp::min;
use std::ops::{Deref, DerefMut};
/// Slice pretty print helper
pub struct PrettySlice<'a> (&'a [u8]);
impl<'a> fmt::Debug for PrettySlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.0.len() {
match i > 0 {
true => { write!(f, "·{:02x}", self.0[i])?; },
false => { write!(f, "{:02x}", self.0[i])?; },
}
}
Ok(())
}
}
impl<'a> fmt::Display for PrettySlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.0.len() {
write!(f, "{:02x}", self.0[i])?;
}
Ok(())
}
}
/// Trait to allow a type to be pretty-printed in `format!`, where unoverridable
/// defaults cannot otherwise be avoided.
pub trait ToPretty {
/// Convert a type into a derivative form in order to make `format!` print it prettily.
fn pretty(&self) -> PrettySlice;
/// Express the object as a hex string.
fn to_hex(&self) -> String {
format!("{}", self.pretty())
}
}
impl<T: AsRef<[u8]>> ToPretty for T {
fn pretty(&self) -> PrettySlice {
PrettySlice(self.as_ref())
}
}
/// A byte collection reference that can either be a slice or a vector
pub enum BytesRef<'a> {
/// This is a reference to a vector
Flexible(&'a mut Bytes),
/// This is a reference to a slice
Fixed(&'a mut [u8])
}
impl<'a> BytesRef<'a> {
/// Writes given `input` to this `BytesRef` starting at `offset`.
/// Returns number of bytes written to the ref.
/// NOTE can return number greater then `input.len()` in case flexible vector had to be extended.
pub fn write(&mut self, offset: usize, input: &[u8]) -> usize {
match *self {
BytesRef::Flexible(ref mut data) => {
let data_len = data.len();
let wrote = input.len() + if data_len > offset { 0 } else { offset - data_len };
data.resize(offset, 0);
data.extend_from_slice(input);
wrote
},
BytesRef::Fixed(ref mut data) if offset < data.len() => {
let max = min(data.len() - offset, input.len());
for i in 0..max {
data[offset + i] = input[i];
}
max
},
_ => 0
}
}
}
impl<'a> Deref for BytesRef<'a> {
type Target = [u8];
fn deref(&self) -> &[u8] {
match *self {
BytesRef::Flexible(ref bytes) => bytes,
BytesRef::Fixed(ref bytes) => bytes,
}
}
}
impl <'a> DerefMut for BytesRef<'a> {
fn deref_mut(&mut self) -> &mut [u8] {
match *self {
BytesRef::Flexible(ref mut bytes) => bytes,
BytesRef::Fixed(ref mut bytes) => bytes,
}
}
}
/// Vector of bytes.
pub type Bytes = Vec<u8>;
#[cfg(test)]
mod tests {
use super::BytesRef;
#[test]
fn should_write_bytes_to_fixed_bytesref() {
// given
let mut data1 = vec![0, 0, 0];
let mut data2 = vec![0, 0, 0];
let (res1, res2) = {
let mut bytes1 = BytesRef::Fixed(&mut data1[..]);
let mut bytes2 = BytesRef::Fixed(&mut data2[1..2]);
// when
let res1 = bytes1.write(1, &[1, 1, 1]);
let res2 = bytes2.write(3, &[1, 1, 1]);
(res1, res2)
};
// then
assert_eq!(&data1, &[0, 1, 1]);
assert_eq!(res1, 2);
assert_eq!(&data2, &[0, 0, 0]);
assert_eq!(res2, 0);
}
#[test]
fn should_write_bytes_to_flexible_bytesref() {
// given
let mut data1 = vec![0, 0, 0];
let mut data2 = vec![0, 0, 0];
let mut data3 = vec![0, 0, 0];
let (res1, res2, res3) = {
let mut bytes1 = BytesRef::Flexible(&mut data1);
let mut bytes2 = BytesRef::Flexible(&mut data2);
let mut bytes3 = BytesRef::Flexible(&mut data3);
// when
let res1 = bytes1.write(1, &[1, 1, 1]);
let res2 = bytes2.write(3, &[1, 1, 1]);
let res3 = bytes3.write(5, &[1, 1, 1]);
(res1, res2, res3)
};
// then
assert_eq!(&data1, &[0, 1, 1, 1]);
assert_eq!(res1, 3);
assert_eq!(&data2, &[0, 0, 0, 1, 1, 1]);
assert_eq!(res2, 3);
assert_eq!(&data3, &[0, 0, 0, 0, 0, 1, 1, 1]);
assert_eq!(res3, 5);
}
}

View File

@ -1,15 +0,0 @@
[package]
description = "Rust bindings for tinykeccak C library"
homepage = "https://github.com/paritytech/keccak-hash"
readme = "README.md"
license = "GPL-3.0"
name = "keccak-hash"
version = "0.1.2"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethereum-types = "0.3"
tiny-keccak = "1.4"
[dev-dependencies]
tempdir = "0.3"

View File

@ -1,52 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![feature(test)]
extern crate test;
extern crate ethereum_types;
extern crate keccak_hash;
use keccak_hash::{keccak, write_keccak};
use test::Bencher;
#[bench]
fn bench_keccak_256_with_empty_input(b: &mut Bencher) {
let empty = [0u8;0];
b.bytes = empty.len() as u64;
b.iter(|| {
let _out = keccak(empty);
})
}
#[bench]
fn bench_keccak_256_with_typical_input(b: &mut Bencher) {
let data: Vec<u8> = From::from("some medum length string with important information");
b.bytes = data.len() as u64;
b.iter(|| {
let _out = keccak(&data);
})
}
#[bench]
fn bench_keccak_256_with_large_input(b: &mut Bencher) {
// 4096 chars
let data: Vec<u8> = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS");
b.bytes = data.len() as u64;
b.iter(|| {
let _out = keccak(&data);
})
}

View File

@ -1,141 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate ethereum_types;
extern crate tiny_keccak;
use std::io;
use std::slice;
use tiny_keccak::Keccak;
pub use ethereum_types::H256;
/// Get the KECCAK (i.e. Keccak) hash of the empty bytes string.
pub const KECCAK_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
/// The KECCAK of the RLP encoding of empty data.
pub const KECCAK_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
/// The KECCAK of the RLP encoding of empty list.
pub const KECCAK_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
pub fn keccak<T: AsRef<[u8]>>(s: T) -> H256 {
let mut result = [0u8; 32];
write_keccak(s, &mut result);
H256(result)
}
pub unsafe fn keccak_256_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
Keccak::keccak256(
slice::from_raw_parts(input, inputlen),
slice::from_raw_parts_mut(out, outlen)
);
}
pub unsafe fn keccak_512_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
Keccak::keccak512(
slice::from_raw_parts(input, inputlen),
slice::from_raw_parts_mut(out, outlen)
);
}
pub fn keccak_256(input: &[u8], mut output: &mut [u8]) { Keccak::keccak256(input, &mut output); }
pub fn keccak_512(input: &[u8], mut output: &mut [u8]) { Keccak::keccak512(input, &mut output); }
pub fn write_keccak<T: AsRef<[u8]>>(s: T, dest: &mut [u8]) { Keccak::keccak256(s.as_ref(), dest); }
pub fn keccak_pipe(r: &mut io::BufRead, w: &mut io::Write) -> Result<H256, io::Error> {
let mut output = [0u8; 32];
let mut input = [0u8; 1024];
let mut keccak = Keccak::new_keccak256();
// read file
loop {
let some = r.read(&mut input)?;
if some == 0 {
break;
}
keccak.update(&input[0..some]);
w.write_all(&input[0..some])?;
}
keccak.finalize(&mut output);
Ok(output.into())
}
pub fn keccak_buffer(r: &mut io::BufRead) -> Result<H256, io::Error> {
keccak_pipe(r, &mut io::sink())
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use std::fs;
use std::io::{Write, BufReader};
use self::tempdir::TempDir;
use super::{keccak, write_keccak, keccak_buffer, KECCAK_EMPTY};
#[test]
fn keccak_empty() {
assert_eq!(keccak([0u8; 0]), KECCAK_EMPTY);
}
#[test]
fn keccak_as() {
assert_eq!(keccak([0x41u8; 32]), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}
#[test]
fn write_keccak_with_content() {
let data: Vec<u8> = From::from("hello world");
let expected = vec![
0x47, 0x17, 0x32, 0x85, 0xa8, 0xd7, 0x34, 0x1e,
0x5e, 0x97, 0x2f, 0xc6, 0x77, 0x28, 0x63, 0x84,
0xf8, 0x02, 0xf8, 0xef, 0x42, 0xa5, 0xec, 0x5f,
0x03, 0xbb, 0xfa, 0x25, 0x4c, 0xb0, 0x1f, 0xad
];
let mut dest = [0u8;32];
write_keccak(data, &mut dest);
assert_eq!(dest, expected.as_ref());
}
#[test]
fn should_keccak_a_file() {
// given
let tempdir = TempDir::new("keccak").unwrap();
let mut path = tempdir.path().to_owned();
path.push("should_keccak_a_file");
// Prepare file
{
let mut file = fs::File::create(&path).unwrap();
file.write_all(b"something").unwrap();
}
let mut file = BufReader::new(fs::File::open(&path).unwrap());
// when
let hash = keccak_buffer(&mut file).unwrap();
// then
assert_eq!(format!("{:x}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87");
}
}

View File

@ -1,10 +0,0 @@
[package]
name = "hashdb"
version = "0.2.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "trait for hash-keyed databases."
license = "GPL-3.0"
[dependencies]
elastic-array = "0.10"
heapsize = "0.4"

View File

@ -1,83 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Database of byte-slices keyed to their hash.
extern crate elastic_array;
extern crate heapsize;
use elastic_array::ElasticArray128;
use heapsize::HeapSizeOf;
use std::collections::HashMap;
use std::{fmt::Debug, hash::Hash};
/// Trait describing an object that can hash a slice of bytes. Used to abstract
/// other types over the hashing algorithm. Defines a single `hash` method and an
/// `Out` associated type with the necessary bounds.
pub trait Hasher: Sync + Send {
/// The output type of the `Hasher`
type Out: AsRef<[u8]> + AsMut<[u8]> + Default + HeapSizeOf + Debug + PartialEq + Eq + Hash + Send + Sync + Clone + Copy;
/// What to use to build `HashMap`s with this `Hasher`
type StdHasher: Sync + Send + Default + std::hash::Hasher;
/// The length in bytes of the `Hasher` output
const LENGTH: usize;
/// Compute the hash of the provided slice of bytes returning the `Out` type of the `Hasher`
fn hash(x: &[u8]) -> Self::Out;
}
/// `HashDB` value type.
pub type DBValue = ElasticArray128<u8>;
/// Trait modelling datastore keyed by a hash defined by the `Hasher`.
pub trait HashDB<H: Hasher>: Send + Sync + AsHashDB<H> {
/// Get the keys in the database together with number of underlying references.
fn keys(&self) -> HashMap<H::Out, i32>;
/// Look up a given hash into the bytes that hash to it, returning None if the
/// hash is not known.
fn get(&self, key: &H::Out) -> Option<DBValue>;
/// Check for the existance of a hash-key.
fn contains(&self, key: &H::Out) -> bool;
/// Insert a datum item into the DB and return the datum's hash for a later lookup. Insertions
/// are counted and the equivalent number of `remove()`s must be performed before the data
/// is considered dead.
fn insert(&mut self, value: &[u8]) -> H::Out;
/// Like `insert()`, except you provide the key and the data is all moved.
fn emplace(&mut self, key: H::Out, value: DBValue);
/// Remove a datum previously inserted. Insertions can be "owed" such that the same number of `insert()`s may
/// happen without the data being eventually being inserted into the DB. It can be "owed" more than once.
fn remove(&mut self, key: &H::Out);
}
/// Upcast trait.
pub trait AsHashDB<H: Hasher> {
/// Perform upcast to HashDB for anything that derives from HashDB.
fn as_hashdb(&self) -> &HashDB<H>;
/// Perform mutable upcast to HashDB for anything that derives from HashDB.
fn as_hashdb_mut(&mut self) -> &mut HashDB<H>;
}
// NOTE: There used to be a `impl<T> AsHashDB for T` but that does not work with generics. See https://stackoverflow.com/questions/48432842/implementing-a-trait-for-reference-and-non-reference-types-causes-conflicting-im
// This means we need concrete impls of AsHashDB in several places, which somewhat defeats the point of the trait.
impl<'a, H: Hasher> AsHashDB<H> for &'a mut HashDB<H> {
fn as_hashdb(&self) -> &HashDB<H> { &**self }
fn as_hashdb_mut(&mut self) -> &mut HashDB<H> { &mut **self }
}

View File

@ -6,19 +6,19 @@ description = "A `HashDB` which can manage a short-term journal potentially cont
license = "GPL3" license = "GPL3"
[dependencies] [dependencies]
ethcore-bytes = { path = "../bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
hashdb = { version = "0.2.0", path = "../hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
heapsize = "0.4" heapsize = "0.4"
keccak-hasher = { path = "../keccak-hasher" } keccak-hasher = { path = "../keccak-hasher" }
kvdb = { path = "../kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
log = "0.3" log = "0.3"
memorydb = { version = "0.2.0", path = "../memorydb" } memorydb = { git = "https://github.com/paritytech/parity-common" }
parking_lot = "0.6" parking_lot = "0.6"
plain_hasher = { path = "../plain_hasher" } plain_hasher = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
[dev-dependencies] [dev-dependencies]
ethcore-logger = { path = "../../logger" } ethcore-logger = { path = "../../logger" }
keccak-hash = { path = "../hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
kvdb-memorydb = { path = "../kvdb-memorydb" } kvdb-memorydb = { git = "https://github.com/paritytech/parity-common" }

View File

@ -21,7 +21,7 @@ extern crate heapsize;
extern crate log; extern crate log;
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethcore_bytes as bytes; extern crate parity_bytes as bytes;
extern crate hashdb; extern crate hashdb;
extern crate keccak_hasher; extern crate keccak_hasher;
extern crate kvdb; extern crate kvdb;

View File

@ -8,5 +8,5 @@ license = "GPL-3.0"
[dependencies] [dependencies]
ethereum-types = "0.3" ethereum-types = "0.3"
tiny-keccak = "1.4.2" tiny-keccak = "1.4.2"
hashdb = { path = "../hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
plain_hasher = { path = "../plain_hasher" } plain_hasher = { git = "https://github.com/paritytech/parity-common" }

View File

@ -1,8 +0,0 @@
[package]
name = "kvdb-memorydb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
parking_lot = "0.6"
kvdb = { path = "../kvdb" }

View File

@ -1,118 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate parking_lot;
extern crate kvdb;
use std::collections::{BTreeMap, HashMap};
use std::io;
use parking_lot::RwLock;
use kvdb::{DBValue, DBTransaction, KeyValueDB, DBOp};
/// A key-value database fulfilling the `KeyValueDB` trait, living in memory.
/// This is generally intended for tests and is not particularly optimized.
#[derive(Default)]
pub struct InMemory {
columns: RwLock<HashMap<Option<u32>, BTreeMap<Vec<u8>, DBValue>>>,
}
/// Create an in-memory database with the given number of columns.
/// Columns will be indexable by 0..`num_cols`
pub fn create(num_cols: u32) -> InMemory {
let mut cols = HashMap::new();
cols.insert(None, BTreeMap::new());
for idx in 0..num_cols {
cols.insert(Some(idx), BTreeMap::new());
}
InMemory {
columns: RwLock::new(cols)
}
}
impl KeyValueDB for InMemory {
fn get(&self, col: Option<u32>, key: &[u8]) -> io::Result<Option<DBValue>> {
let columns = self.columns.read();
match columns.get(&col) {
None => Err(io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col))),
Some(map) => Ok(map.get(key).cloned()),
}
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
let columns = self.columns.read();
match columns.get(&col) {
None => None,
Some(map) =>
map.iter()
.find(|&(ref k ,_)| k.starts_with(prefix))
.map(|(_, v)| v.to_vec().into_boxed_slice())
}
}
fn write_buffered(&self, transaction: DBTransaction) {
let mut columns = self.columns.write();
let ops = transaction.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
if let Some(col) = columns.get_mut(&col) {
col.insert(key.into_vec(), value);
}
},
DBOp::Delete { col, key } => {
if let Some(col) = columns.get_mut(&col) {
col.remove(&*key);
}
},
}
}
}
fn flush(&self) -> io::Result<()> {
Ok(())
}
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
match self.columns.read().get(&col) {
Some(map) => Box::new( // TODO: worth optimizing at all?
map.clone()
.into_iter()
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
match self.columns.read().get(&col) {
Some(map) => Box::new(
map.clone()
.into_iter()
.skip_while(move |&(ref k, _)| !k.starts_with(prefix))
.map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice()))
),
None => Box::new(None.into_iter()),
}
}
fn restore(&self, _new_db: &str) -> io::Result<()> {
Err(io::Error::new(io::ErrorKind::Other, "Attempted to restore in-memory database"))
}
}

View File

@ -1,19 +0,0 @@
[package]
name = "kvdb-rocksdb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
elastic-array = "0.10"
ethereum-types = "0.3"
fs-swap = "0.2.1"
interleaved-ordered = "0.1.0"
kvdb = { path = "../kvdb" }
log = "0.3"
num_cpus = "1.0"
parking_lot = "0.6"
regex = "0.2"
rocksdb = { git = "https://github.com/paritytech/rust-rocksdb" }
[dev-dependencies]
tempdir = "0.3"

View File

@ -1,864 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#[macro_use]
extern crate log;
extern crate elastic_array;
extern crate fs_swap;
extern crate interleaved_ordered;
extern crate num_cpus;
extern crate parking_lot;
extern crate regex;
extern crate rocksdb;
extern crate ethereum_types;
extern crate kvdb;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::{cmp, fs, io, mem, result, error};
use std::path::Path;
use parking_lot::{Mutex, MutexGuard, RwLock};
use rocksdb::{
DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
Options, BlockBasedOptions, Direction, Cache, Column, ReadOptions
};
use interleaved_ordered::{interleave_ordered, InterleaveOrdered};
use elastic_array::ElasticArray32;
use fs_swap::{swap, swap_nonatomic};
use kvdb::{KeyValueDB, DBTransaction, DBValue, DBOp};
#[cfg(target_os = "linux")]
use regex::Regex;
#[cfg(target_os = "linux")]
use std::process::Command;
#[cfg(target_os = "linux")]
use std::fs::File;
#[cfg(target_os = "linux")]
use std::path::PathBuf;
fn other_io_err<E>(e: E) -> io::Error where E: Into<Box<error::Error + Send + Sync>> {
io::Error::new(io::ErrorKind::Other, e)
}
const DB_DEFAULT_MEMORY_BUDGET_MB: usize = 128;
enum KeyState {
Insert(DBValue),
Delete,
}
/// Compaction profile for the database settings
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct CompactionProfile {
/// L0-L1 target file size
pub initial_file_size: u64,
/// block size
pub block_size: usize,
/// rate limiter for background flushes and compactions, bytes/sec, if any
pub write_rate_limit: Option<u64>,
}
impl Default for CompactionProfile {
/// Default profile suitable for most storage
fn default() -> CompactionProfile {
CompactionProfile::ssd()
}
}
/// Given output of df command return Linux rotational flag file path.
#[cfg(target_os = "linux")]
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
use std::str;
str::from_utf8(df_out.as_slice())
.ok()
// Get the drive name.
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
.ok()
.and_then(|re| re.captures(df_str))
.and_then(|captures| captures.get(1)))
// Generate path e.g. /sys/block/sda/queue/rotational
.map(|drive_path| {
let mut p = PathBuf::from("/sys/block");
p.push(drive_path.as_str());
p.push("queue/rotational");
p
})
}
impl CompactionProfile {
/// Attempt to determine the best profile automatically, only Linux for now.
#[cfg(target_os = "linux")]
pub fn auto(db_path: &Path) -> CompactionProfile {
use std::io::Read;
let hdd_check_file = db_path
.to_str()
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
.and_then(|df_res| match df_res.status.success() {
true => Some(df_res.stdout),
false => None,
})
.and_then(rotational_from_df_output);
// Read out the file and match compaction profile.
if let Some(hdd_check) = hdd_check_file {
if let Ok(mut file) = File::open(hdd_check.as_path()) {
let mut buffer = [0; 1];
if file.read_exact(&mut buffer).is_ok() {
// 0 means not rotational.
if buffer == [48] { return Self::ssd(); }
// 1 means rotational.
if buffer == [49] { return Self::hdd(); }
}
}
}
// Fallback if drive type was not determined.
Self::default()
}
/// Just default for other platforms.
#[cfg(not(target_os = "linux"))]
pub fn auto(_db_path: &Path) -> CompactionProfile {
Self::default()
}
/// Default profile suitable for SSD storage
pub fn ssd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 64 * 1024 * 1024,
block_size: 16 * 1024,
write_rate_limit: None,
}
}
/// Slow HDD compaction profile
pub fn hdd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 256 * 1024 * 1024,
block_size: 64 * 1024,
write_rate_limit: Some(16 * 1024 * 1024),
}
}
}
/// Database configuration
#[derive(Clone)]
pub struct DatabaseConfig {
/// Max number of open files.
pub max_open_files: i32,
/// Memory budget (in MiB) used for setting block cache size, write buffer size.
pub memory_budget: Option<usize>,
/// Compaction profile
pub compaction: CompactionProfile,
/// Set number of columns
pub columns: Option<u32>,
/// Should we keep WAL enabled?
pub wal: bool,
}
impl DatabaseConfig {
/// Create new `DatabaseConfig` with default parameters and specified set of columns.
/// Note that cache sizes must be explicitly set.
pub fn with_columns(columns: Option<u32>) -> Self {
let mut config = Self::default();
config.columns = columns;
config
}
pub fn memory_budget(&self) -> usize {
self.memory_budget.unwrap_or(DB_DEFAULT_MEMORY_BUDGET_MB) * 1024 * 1024
}
pub fn memory_budget_per_col(&self) -> usize {
self.memory_budget() / self.columns.unwrap_or(1) as usize
}
}
impl Default for DatabaseConfig {
fn default() -> DatabaseConfig {
DatabaseConfig {
max_open_files: 512,
memory_budget: None,
compaction: CompactionProfile::default(),
columns: None,
wal: true,
}
}
}
/// Database iterator (for flushed data only)
// The compromise of holding only a virtual borrow vs. holding a lock on the
// inner DB (to prevent closing via restoration) may be re-evaluated in the future.
//
pub struct DatabaseIterator<'a> {
iter: InterleaveOrdered<::std::vec::IntoIter<(Box<[u8]>, Box<[u8]>)>, DBIterator>,
_marker: PhantomData<&'a Database>,
}
impl<'a> Iterator for DatabaseIterator<'a> {
type Item = (Box<[u8]>, Box<[u8]>);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
}
struct DBAndColumns {
db: DB,
cfs: Vec<Column>,
}
// get column family configuration from database config.
fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> io::Result<Options> {
let mut opts = Options::new();
opts.set_parsed_options("level_compaction_dynamic_level_bytes=true").map_err(other_io_err)?;
opts.set_block_based_table_factory(block_opts);
opts.set_parsed_options(
&format!("block_based_table_factory={{{};{}}}",
"cache_index_and_filter_blocks=true",
"pin_l0_filter_and_index_blocks_in_cache=true")).map_err(other_io_err)?;
opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32);
opts.set_target_file_size_base(config.compaction.initial_file_size);
opts.set_parsed_options("compression_per_level=").map_err(other_io_err)?;
Ok(opts)
}
/// Key-Value database.
pub struct Database {
db: RwLock<Option<DBAndColumns>>,
config: DatabaseConfig,
write_opts: WriteOptions,
read_opts: ReadOptions,
block_opts: BlockBasedOptions,
path: String,
// Dirty values added with `write_buffered`. Cleaned on `flush`.
overlay: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Values currently being flushed. Cleared when `flush` completes.
flushing: RwLock<Vec<HashMap<ElasticArray32<u8>, KeyState>>>,
// Prevents concurrent flushes.
// Value indicates if a flush is in progress.
flushing_lock: Mutex<bool>,
}
#[inline]
fn check_for_corruption<T, P: AsRef<Path>>(path: P, res: result::Result<T, String>) -> io::Result<T> {
if let Err(ref s) = res {
if s.starts_with("Corruption:") {
warn!("DB corrupted: {}. Repair will be triggered on next restart", s);
let _ = fs::File::create(path.as_ref().join(Database::CORRUPTION_FILE_NAME));
}
}
res.map_err(other_io_err)
}
fn is_corrupted(s: &str) -> bool {
s.starts_with("Corruption:") || s.starts_with("Invalid argument: You have to open all column families")
}
impl Database {
const CORRUPTION_FILE_NAME: &'static str = "CORRUPTED";
/// Open database with default settings.
pub fn open_default(path: &str) -> io::Result<Database> {
Database::open(&DatabaseConfig::default(), path)
}
/// Open database file. Creates if it does not exist.
pub fn open(config: &DatabaseConfig, path: &str) -> io::Result<Database> {
let mut opts = Options::new();
if let Some(rate_limit) = config.compaction.write_rate_limit {
opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit)).map_err(other_io_err)?;
}
opts.set_use_fsync(false);
opts.create_if_missing(true);
opts.set_max_open_files(config.max_open_files);
opts.set_parsed_options("keep_log_file_num=1").map_err(other_io_err)?;
opts.set_parsed_options("bytes_per_sync=1048576").map_err(other_io_err)?;
opts.set_db_write_buffer_size(config.memory_budget_per_col() / 2);
opts.increase_parallelism(cmp::max(1, ::num_cpus::get() as i32 / 2));
let mut block_opts = BlockBasedOptions::new();
{
block_opts.set_block_size(config.compaction.block_size);
let cache_size = cmp::max(8, config.memory_budget() / 3);
let cache = Cache::new(cache_size);
block_opts.set_cache(cache);
}
// attempt database repair if it has been previously marked as corrupted
let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME);
if db_corrupted.exists() {
warn!("DB has been previously marked as corrupted, attempting repair");
DB::repair(&opts, path).map_err(other_io_err)?;
fs::remove_file(db_corrupted)?;
}
let columns = config.columns.unwrap_or(0) as usize;
let mut cf_options = Vec::with_capacity(columns);
let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect();
let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
for _ in 0 .. config.columns.unwrap_or(0) {
cf_options.push(col_config(&config, &block_opts)?);
}
let mut write_opts = WriteOptions::new();
if !config.wal {
write_opts.disable_wal(true);
}
let mut read_opts = ReadOptions::new();
read_opts.set_verify_checksums(false);
let mut cfs: Vec<Column> = Vec::new();
let db = match config.columns {
Some(_) => {
match DB::open_cf(&opts, path, &cfnames, &cf_options) {
Ok(db) => {
cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
Ok(db)
}
Err(_) => {
// retry and create CFs
match DB::open_cf(&opts, path, &[], &[]) {
Ok(mut db) => {
cfs = cfnames.iter()
.enumerate()
.map(|(i, n)| db.create_cf(n, &cf_options[i]))
.collect::<::std::result::Result<_, _>>()
.map_err(other_io_err)?;
Ok(db)
},
err => err,
}
}
}
},
None => DB::open(&opts, path)
};
let db = match db {
Ok(db) => db,
Err(ref s) if is_corrupted(s) => {
warn!("DB corrupted: {}, attempting repair", s);
DB::repair(&opts, path).map_err(other_io_err)?;
match cfnames.is_empty() {
true => DB::open(&opts, path).map_err(other_io_err)?,
false => {
let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?;
cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
db
},
}
},
Err(s) => {
return Err(other_io_err(s))
}
};
let num_cols = cfs.len();
Ok(Database {
db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })),
config: config.clone(),
write_opts: write_opts,
overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()),
flushing_lock: Mutex::new(false),
path: path.to_owned(),
read_opts: read_opts,
block_opts: block_opts,
})
}
/// Helper to create new transaction for this database.
pub fn transaction(&self) -> DBTransaction {
DBTransaction::new()
}
fn to_overlay_column(col: Option<u32>) -> usize {
col.map_or(0, |c| (c + 1) as usize)
}
/// Commit transaction to database.
pub fn write_buffered(&self, tr: DBTransaction) {
let mut overlay = self.overlay.write();
let ops = tr.ops;
for op in ops {
match op {
DBOp::Insert { col, key, value } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Insert(value));
},
DBOp::Delete { col, key } => {
let c = Self::to_overlay_column(col);
overlay[c].insert(key, KeyState::Delete);
},
}
};
}
/// Commit buffered changes to database. Must be called under `flush_lock`
fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<bool>) -> io::Result<()> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write());
{
for (c, column) in self.flushing.read().iter().enumerate() {
for (key, state) in column.iter() {
match *state {
KeyState::Delete => {
if c > 0 {
batch.delete_cf(cfs[c - 1], key).map_err(other_io_err)?;
} else {
batch.delete(key).map_err(other_io_err)?;
}
},
KeyState::Insert(ref value) => {
if c > 0 {
batch.put_cf(cfs[c - 1], key, value).map_err(other_io_err)?;
} else {
batch.put(key, value).map_err(other_io_err)?;
}
},
}
}
}
}
check_for_corruption(
&self.path,
db.write_opt(batch, &self.write_opts))?;
for column in self.flushing.write().iter_mut() {
column.clear();
column.shrink_to_fit();
}
Ok(())
},
None => Err(other_io_err("Database is closed"))
}
}
/// Commit buffered changes to database.
pub fn flush(&self) -> io::Result<()> {
let mut lock = self.flushing_lock.lock();
// If RocksDB batch allocation fails the thread gets terminated and the lock is released.
// The value inside the lock is used to detect that.
if *lock {
// This can only happen if another flushing thread is terminated unexpectedly.
return Err(other_io_err("Database write failure. Running low on memory perhaps?"))
}
*lock = true;
let result = self.write_flushing_with_lock(&mut lock);
*lock = false;
result
}
/// Commit transaction to database.
pub fn write(&self, tr: DBTransaction) -> io::Result<()> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let batch = WriteBatch::new();
let ops = tr.ops;
for op in ops {
// remove any buffered operation for this key
self.overlay.write()[Self::to_overlay_column(op.col())].remove(op.key());
match op {
DBOp::Insert { col, key, value } => match col {
None => batch.put(&key, &value).map_err(other_io_err)?,
Some(c) => batch.put_cf(cfs[c as usize], &key, &value).map_err(other_io_err)?,
},
DBOp::Delete { col, key } => match col {
None => batch.delete(&key).map_err(other_io_err)?,
Some(c) => batch.delete_cf(cfs[c as usize], &key).map_err(other_io_err)?,
}
}
}
check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts))
},
None => Err(other_io_err("Database is closed")),
}
}
/// Get value by key.
pub fn get(&self, col: Option<u32>, key: &[u8]) -> io::Result<Option<DBValue>> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
match overlay.get(key) {
Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
let flushing = &self.flushing.read()[Self::to_overlay_column(col)];
match flushing.get(key) {
Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())),
Some(&KeyState::Delete) => Ok(None),
None => {
col.map_or_else(
|| db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))),
|c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))))
.map_err(other_io_err)
},
}
},
}
},
None => Ok(None),
}
}
/// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values.
// TODO: support prefix seek for unflushed data
pub fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
self.iter_from_prefix(col, prefix).and_then(|mut iter| {
match iter.next() {
// TODO: use prefix_same_as_start read option (not availabele in C API currently)
Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None },
_ => None
}
})
}
/// Get database iterator for flushed data.
pub fn iter(&self, col: Option<u32>) -> Option<DatabaseIterator> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let overlay = &self.overlay.read()[Self::to_overlay_column(col)];
let mut overlay_data = overlay.iter()
.filter_map(|(k, v)| match *v {
KeyState::Insert(ref value) =>
Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_vec().into_boxed_slice())),
KeyState::Delete => None,
}).collect::<Vec<_>>();
overlay_data.sort();
let iter = col.map_or_else(
|| db.iterator_opt(IteratorMode::Start, &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts)
.expect("iterator params are valid; qed")
);
Some(DatabaseIterator {
iter: interleave_ordered(overlay_data, iter),
_marker: PhantomData,
})
},
None => None,
}
}
fn iter_from_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<DatabaseIterator> {
match *self.db.read() {
Some(DBAndColumns { ref db, ref cfs }) => {
let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts),
|c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts)
.expect("iterator params are valid; qed"));
Some(DatabaseIterator {
iter: interleave_ordered(Vec::new(), iter),
_marker: PhantomData,
})
},
None => None,
}
}
/// Close the database
fn close(&self) {
*self.db.write() = None;
self.overlay.write().clear();
self.flushing.write().clear();
}
/// Restore the database from a copy at given path.
pub fn restore(&self, new_db: &str) -> io::Result<()> {
self.close();
// swap is guaranteed to be atomic
match swap(new_db, &self.path) {
Ok(_) => {
// ignore errors
let _ = fs::remove_dir_all(new_db);
},
Err(err) => {
warn!("DB atomic swap failed: {}", err);
match swap_nonatomic(new_db, &self.path) {
Ok(_) => {
// ignore errors
let _ = fs::remove_dir_all(new_db);
},
Err(err) => {
warn!("DB nonatomic atomic swap failed: {}", err);
return Err(err.into());
}
}
}
}
// reopen the database and steal handles into self
let db = Self::open(&self.config, &self.path)?;
*self.db.write() = mem::replace(&mut *db.db.write(), None);
*self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new());
*self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new());
Ok(())
}
/// The number of non-default column families.
pub fn num_columns(&self) -> u32 {
self.db.read().as_ref()
.and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } )
.map(|n| n as u32)
.unwrap_or(0)
}
/// Drop a column family.
pub fn drop_column(&self) -> io::Result<()> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
if let Some(col) = cfs.pop() {
let name = format!("col{}", cfs.len());
drop(col);
db.drop_cf(&name).map_err(other_io_err)?;
}
Ok(())
},
None => Ok(()),
}
}
/// Add a column family.
pub fn add_column(&self) -> io::Result<()> {
match *self.db.write() {
Some(DBAndColumns { ref mut db, ref mut cfs }) => {
let col = cfs.len() as u32;
let name = format!("col{}", col);
cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?).map_err(other_io_err)?);
Ok(())
},
None => Ok(()),
}
}
}
// duplicate declaration of methods here to avoid trait import in certain existing cases
// at time of addition.
impl KeyValueDB for Database {
fn get(&self, col: Option<u32>, key: &[u8]) -> io::Result<Option<DBValue>> {
Database::get(self, col, key)
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
Database::get_by_prefix(self, col, prefix)
}
fn write_buffered(&self, transaction: DBTransaction) {
Database::write_buffered(self, transaction)
}
fn write(&self, transaction: DBTransaction) -> io::Result<()> {
Database::write(self, transaction)
}
fn flush(&self) -> io::Result<()> {
Database::flush(self)
}
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a> {
let unboxed = Database::iter(self, col);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>
{
let unboxed = Database::iter_from_prefix(self, col, prefix);
Box::new(unboxed.into_iter().flat_map(|inner| inner))
}
fn restore(&self, new_db: &str) -> io::Result<()> {
Database::restore(self, new_db)
}
}
impl Drop for Database {
fn drop(&mut self) {
// write all buffered changes if we can.
let _ = self.flush();
}
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use std::str::FromStr;
use self::tempdir::TempDir;
use ethereum_types::H256;
use super::*;
fn test_db(config: &DatabaseConfig) {
let tempdir = TempDir::new("").unwrap();
let db = Database::open(config, tempdir.path().to_str().unwrap()).unwrap();
let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
batch.put(None, &key2, b"dog");
db.write(batch).unwrap();
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"cat");
let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect();
assert_eq!(contents.len(), 2);
assert_eq!(&*contents[0].0, &*key1);
assert_eq!(&*contents[0].1, b"cat");
assert_eq!(&*contents[1].0, &*key2);
assert_eq!(&*contents[1].1, b"dog");
let mut batch = db.transaction();
batch.delete(None, &key1);
db.write(batch).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
let mut batch = db.transaction();
batch.put(None, &key1, b"cat");
db.write(batch).unwrap();
let mut transaction = db.transaction();
transaction.put(None, &key3, b"elephant");
transaction.delete(None, &key1);
db.write(transaction).unwrap();
assert!(db.get(None, &key1).unwrap().is_none());
assert_eq!(&*db.get(None, &key3).unwrap().unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant");
assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog");
let mut transaction = db.transaction();
transaction.put(None, &key1, b"horse");
transaction.delete(None, &key3);
db.write_buffered(transaction);
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
db.flush().unwrap();
assert!(db.get(None, &key3).unwrap().is_none());
assert_eq!(&*db.get(None, &key1).unwrap().unwrap(), b"horse");
}
#[test]
fn kvdb() {
let tempdir = TempDir::new("").unwrap();
let _ = Database::open_default(tempdir.path().to_str().unwrap()).unwrap();
test_db(&DatabaseConfig::default());
}
#[test]
#[cfg(target_os = "linux")]
fn df_to_rotational() {
use std::path::PathBuf;
// Example df output.
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
assert_eq!(rotational_from_df_output(example_df), expected_output);
}
#[test]
fn add_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let tempdir = TempDir::new("").unwrap();
// open empty, add 5.
{
let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
for i in 0..5 {
db.add_column().unwrap();
assert_eq!(db.num_columns(), i + 1);
}
}
// reopen as 5.
{
let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
}
}
#[test]
fn drop_columns() {
let config = DatabaseConfig::default();
let config_5 = DatabaseConfig::with_columns(Some(5));
let tempdir = TempDir::new("").unwrap();
// open 5, remove all.
{
let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 5);
for i in (0..5).rev() {
db.drop_column().unwrap();
assert_eq!(db.num_columns(), i);
}
}
// reopen as 0.
{
let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap();
assert_eq!(db.num_columns(), 0);
}
}
#[test]
fn write_clears_buffered_ops() {
let tempdir = TempDir::new("").unwrap();
let config = DatabaseConfig::default();
let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap();
let mut batch = db.transaction();
batch.put(None, b"foo", b"bar");
db.write_buffered(batch);
let mut batch = db.transaction();
batch.put(None, b"foo", b"baz");
db.write(batch).unwrap();
assert_eq!(db.get(None, b"foo").unwrap().unwrap().as_ref(), b"baz");
}
}

View File

@ -1,8 +0,0 @@
[package]
name = "kvdb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
elastic-array = "0.10"
ethcore-bytes = { path = "../bytes" }

View File

@ -1,175 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Key-Value store abstraction with `RocksDB` backend.
extern crate elastic_array;
extern crate ethcore_bytes as bytes;
use std::io;
use std::path::Path;
use std::sync::Arc;
use elastic_array::{ElasticArray128, ElasticArray32};
use bytes::Bytes;
/// Required length of prefixes.
pub const PREFIX_LEN: usize = 12;
/// Database value.
pub type DBValue = ElasticArray128<u8>;
/// Write transaction. Batches a sequence of put/delete operations for efficiency.
#[derive(Default, Clone, PartialEq)]
pub struct DBTransaction {
/// Database operations.
pub ops: Vec<DBOp>,
}
/// Database operation.
#[derive(Clone, PartialEq)]
pub enum DBOp {
Insert {
col: Option<u32>,
key: ElasticArray32<u8>,
value: DBValue,
},
Delete {
col: Option<u32>,
key: ElasticArray32<u8>,
}
}
impl DBOp {
/// Returns the key associated with this operation.
pub fn key(&self) -> &[u8] {
match *self {
DBOp::Insert { ref key, .. } => key,
DBOp::Delete { ref key, .. } => key,
}
}
/// Returns the column associated with this operation.
pub fn col(&self) -> Option<u32> {
match *self {
DBOp::Insert { col, .. } => col,
DBOp::Delete { col, .. } => col,
}
}
}
impl DBTransaction {
/// Create new transaction.
pub fn new() -> DBTransaction {
DBTransaction::with_capacity(256)
}
/// Create new transaction with capacity.
pub fn with_capacity(cap: usize) -> DBTransaction {
DBTransaction {
ops: Vec::with_capacity(cap)
}
}
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
pub fn put(&mut self, col: Option<u32>, key: &[u8], value: &[u8]) {
let mut ekey = ElasticArray32::new();
ekey.append_slice(key);
self.ops.push(DBOp::Insert {
col: col,
key: ekey,
value: DBValue::from_slice(value),
});
}
/// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write.
pub fn put_vec(&mut self, col: Option<u32>, key: &[u8], value: Bytes) {
let mut ekey = ElasticArray32::new();
ekey.append_slice(key);
self.ops.push(DBOp::Insert {
col: col,
key: ekey,
value: DBValue::from_vec(value),
});
}
/// Delete value by key.
pub fn delete(&mut self, col: Option<u32>, key: &[u8]) {
let mut ekey = ElasticArray32::new();
ekey.append_slice(key);
self.ops.push(DBOp::Delete {
col: col,
key: ekey,
});
}
}
/// Generic key-value database.
///
/// This makes a distinction between "buffered" and "flushed" values. Values which have been
/// written can always be read, but may be present in an in-memory buffer. Values which have
/// been flushed have been moved to backing storage, like a RocksDB instance. There are certain
/// operations which are only guaranteed to operate on flushed data and not buffered,
/// although implementations may differ in this regard.
///
/// The contents of an interior buffer may be explicitly flushed using the `flush` method.
///
/// The `KeyValueDB` also deals in "column families", which can be thought of as distinct
/// stores within a database. Keys written in one column family will not be accessible from
/// any other. The number of column families must be specified at initialization, with a
/// differing interface for each database. The `None` argument in place of a column index
/// is always supported.
///
/// The API laid out here, along with the `Sync` bound implies interior synchronization for
/// implementation.
pub trait KeyValueDB: Sync + Send {
/// Helper to create a new transaction.
fn transaction(&self) -> DBTransaction { DBTransaction::new() }
/// Get a value by key.
fn get(&self, col: Option<u32>, key: &[u8]) -> io::Result<Option<DBValue>>;
/// Get a value by partial key. Only works for flushed data.
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>>;
/// Write a transaction of changes to the buffer.
fn write_buffered(&self, transaction: DBTransaction);
/// Write a transaction of changes to the backing store.
fn write(&self, transaction: DBTransaction) -> io::Result<()> {
self.write_buffered(transaction);
self.flush()
}
/// Flush all buffered data.
fn flush(&self) -> io::Result<()>;
/// Iterate over flushed data for a given column.
fn iter<'a>(&'a self, col: Option<u32>) -> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
/// Iterate over flushed data for a given column, starting from a given prefix.
fn iter_from_prefix<'a>(&'a self, col: Option<u32>, prefix: &'a [u8])
-> Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>;
/// Attempt to replace this database with a new one located at the given path.
fn restore(&self, new_db: &str) -> io::Result<()>;
}
/// Generic key-value database handler. This trait contains one function `open`. When called, it opens database with a
/// predefined config.
pub trait KeyValueDBHandler: Send + Sync {
/// Open the predefined key-value database.
fn open(&self, path: &Path) -> io::Result<Arc<KeyValueDB>>;
}

View File

@ -1,19 +0,0 @@
[package]
name = "memorydb"
version = "0.2.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "in-memory implementation of hashdb"
license = "GPL-3.0"
[dependencies]
elastic-array = "0.10"
heapsize = "0.4"
hashdb = { version = "0.2.0", path = "../hashdb" }
plain_hasher = { path = "../plain_hasher" }
rlp = { version = "0.2.1", path = "../rlp" }
[dev-dependencies]
tiny-keccak = "1.4.2"
ethereum-types = "0.3"
keccak-hasher = { path = "../keccak-hasher" }
keccak-hash = { path = "../hash" }

View File

@ -1,79 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![feature(test)]
extern crate hashdb;
extern crate memorydb;
extern crate keccak_hasher;
extern crate keccak_hash;
extern crate rlp;
extern crate test;
use memorydb::MemoryDB;
use keccak_hasher::KeccakHasher;
use hashdb::{HashDB, Hasher};
use keccak_hash::KECCAK_NULL_RLP;
use rlp::NULL_RLP;
use test::{Bencher, black_box};
#[bench]
fn instantiation(b: &mut Bencher) {
b.iter(|| {
MemoryDB::<KeccakHasher>::new();
})
}
#[bench]
fn compare_to_null_embedded_in_struct(b: &mut Bencher) {
struct X {a_hash: <KeccakHasher as Hasher>::Out};
let x = X {a_hash: KeccakHasher::hash(&NULL_RLP)};
let key = KeccakHasher::hash(b"abc");
b.iter(|| {
black_box(key == x.a_hash);
})
}
#[bench]
fn compare_to_null_in_const(b: &mut Bencher) {
let key = KeccakHasher::hash(b"abc");
b.iter(|| {
black_box(key == KECCAK_NULL_RLP);
})
}
#[bench]
fn contains_with_non_null_key(b: &mut Bencher) {
let mut m = MemoryDB::<KeccakHasher>::new();
let key = KeccakHasher::hash(b"abc");
m.insert(b"abcefghijklmnopqrstuvxyz");
b.iter(|| {
m.contains(&key);
})
}
#[bench]
fn contains_with_null_key(b: &mut Bencher) {
let mut m = MemoryDB::<KeccakHasher>::new();
let null_key = KeccakHasher::hash(&NULL_RLP);
m.insert(b"abcefghijklmnopqrstuvxyz");
b.iter(|| {
m.contains(&null_key);
})
}

View File

@ -1,344 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Reference-counted memory-based `HashDB` implementation.
extern crate elastic_array;
extern crate hashdb;
extern crate heapsize;
extern crate rlp;
#[cfg(test)] extern crate keccak_hasher;
#[cfg(test)] extern crate tiny_keccak;
#[cfg(test)] extern crate ethereum_types;
use hashdb::{HashDB, Hasher as KeyHasher, DBValue, AsHashDB};
use heapsize::HeapSizeOf;
use rlp::NULL_RLP;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::hash;
use std::mem;
// Backing `HashMap` parametrized with a `Hasher` for the keys `Hasher::Out` and the `Hasher::StdHasher` as hash map builder.
type FastMap<H, T> = HashMap<<H as KeyHasher>::Out, T, hash::BuildHasherDefault<<H as KeyHasher>::StdHasher>>;
/// Reference-counted memory-based `HashDB` implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `remove()`, check for existence with `contains()` and lookup a hash to derive
/// the data with `get()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate hashdb;
/// extern crate keccak_hasher;
/// extern crate memorydb;
///
/// use hashdb::*;
/// use keccak_hasher::KeccakHasher;
/// use memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::<KeccakHasher>::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
///
/// m.insert(d);
/// assert!(!m.contains(&k));
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
/// }
/// ```
#[derive(Default, Clone, PartialEq)]
pub struct MemoryDB<H: KeyHasher> {
data: FastMap<H, (DBValue, i32)>,
hashed_null_node: H::Out,
}
impl<H: KeyHasher> MemoryDB<H> {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB<H> {
MemoryDB {
data: FastMap::<H,_>::default(),
hashed_null_node: H::hash(&NULL_RLP)
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate hashdb;
/// extern crate keccak_hasher;
/// extern crate memorydb;
///
/// use hashdb::*;
/// use keccak_hasher::KeccakHasher;
/// use memorydb::*;
///
/// fn main() {
/// let mut m = MemoryDB::<KeccakHasher>::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.contains(&hash));
/// m.clear();
/// assert!(!m.contains(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn purge(&mut self) {
self.data.retain(|_, &mut (_, rc)| rc != 0);
}
/// Return the internal map of hashes to data, clearing the current state.
pub fn drain(&mut self) -> FastMap<H, (DBValue, i32)> {
mem::replace(&mut self.data, FastMap::<H,_>::default())
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &<H as KeyHasher>::Out) -> Option<(DBValue, i32)> {
if key == &self.hashed_null_node {
return Some((DBValue::from_slice(&NULL_RLP), 1));
}
self.data.get(key).cloned()
}
/// Returns the size of allocated heap memory
pub fn mem_used(&self) -> usize {
self.data.heap_size_of_children()
}
/// Remove an element and delete it from storage if reference count reaches zero.
/// If the value was purged, return the old value.
pub fn remove_and_purge(&mut self, key: &<H as KeyHasher>::Out) -> Option<DBValue> {
if key == &self.hashed_null_node {
return None;
}
match self.data.entry(key.clone()) {
Entry::Occupied(mut entry) =>
if entry.get().1 == 1 {
Some(entry.remove().0)
} else {
entry.get_mut().1 -= 1;
None
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
None
}
}
}
/// Consolidate all the entries of `other` into `self`.
pub fn consolidate(&mut self, mut other: Self) {
for (key, (value, rc)) in other.drain() {
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
if entry.get().1 < 0 {
entry.get_mut().0 = value;
}
entry.get_mut().1 += rc;
}
Entry::Vacant(entry) => {
entry.insert((value, rc));
}
}
}
}
}
impl<H: KeyHasher> HashDB<H> for MemoryDB<H> {
fn keys(&self) -> HashMap<H::Out, i32> {
self.data.iter()
.filter_map(|(k, v)| if v.1 != 0 {
Some((*k, v.1))
} else {
None
})
.collect()
}
fn get(&self, key: &H::Out) -> Option<DBValue> {
if key == &self.hashed_null_node {
return Some(DBValue::from_slice(&NULL_RLP));
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
fn contains(&self, key: &H::Out) -> bool {
if key == &self.hashed_null_node {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H::Out {
if value == &NULL_RLP {
return self.hashed_null_node.clone();
}
let key = H::hash(value);
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
let &mut (ref mut old_value, ref mut rc) = entry.get_mut();
if *rc <= 0 {
*old_value = DBValue::from_slice(value);
}
*rc += 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::from_slice(value), 1));
},
}
key
}
fn emplace(&mut self, key:H::Out, value: DBValue) {
if &*value == &NULL_RLP {
return;
}
match self.data.entry(key) {
Entry::Occupied(mut entry) => {
let &mut (ref mut old_value, ref mut rc) = entry.get_mut();
if *rc <= 0 {
*old_value = value;
}
*rc += 1;
},
Entry::Vacant(entry) => {
entry.insert((value, 1));
},
}
}
fn remove(&mut self, key: &H::Out) {
if key == &self.hashed_null_node {
return;
}
match self.data.entry(*key) {
Entry::Occupied(mut entry) => {
let &mut (_, ref mut rc) = entry.get_mut();
*rc -= 1;
},
Entry::Vacant(entry) => {
entry.insert((DBValue::new(), -1));
},
}
}
}
impl<H: KeyHasher> AsHashDB<H> for MemoryDB<H> {
fn as_hashdb(&self) -> &HashDB<H> { self }
fn as_hashdb_mut(&mut self) -> &mut HashDB<H> { self }
}
#[cfg(test)]
mod tests {
use super::*;
use tiny_keccak::Keccak;
use ethereum_types::H256;
use keccak_hasher::KeccakHasher;
#[test]
fn memorydb_remove_and_purge() {
let hello_bytes = b"Hello world!";
let mut hello_key = [0;32];
Keccak::keccak256(hello_bytes, &mut hello_key);
let hello_key = H256(hello_key);
let mut m = MemoryDB::<KeccakHasher>::new();
m.remove(&hello_key);
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.purge();
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 0);
m.purge();
assert_eq!(m.raw(&hello_key), None);
let mut m = MemoryDB::<KeccakHasher>::new();
assert!(m.remove_and_purge(&hello_key).is_none());
assert_eq!(m.raw(&hello_key).unwrap().1, -1);
m.insert(hello_bytes);
m.insert(hello_bytes);
assert_eq!(m.raw(&hello_key).unwrap().1, 1);
assert_eq!(&*m.remove_and_purge(&hello_key).unwrap(), hello_bytes);
assert_eq!(m.raw(&hello_key), None);
assert!(m.remove_and_purge(&hello_key).is_none());
}
#[test]
fn consolidate() {
let mut main = MemoryDB::<KeccakHasher>::new();
let mut other = MemoryDB::<KeccakHasher>::new();
let remove_key = other.insert(b"doggo");
main.remove(&remove_key);
let insert_key = other.insert(b"arf");
main.emplace(insert_key, DBValue::from_slice(b"arf"));
let negative_remove_key = other.insert(b"negative");
other.remove(&negative_remove_key); // ref cnt: 0
other.remove(&negative_remove_key); // ref cnt: -1
main.remove(&negative_remove_key); // ref cnt: -1
main.consolidate(other);
let overlay = main.drain();
assert_eq!(overlay.get(&remove_key).unwrap(), &(DBValue::from_slice(b"doggo"), 0));
assert_eq!(overlay.get(&insert_key).unwrap(), &(DBValue::from_slice(b"arf"), 2));
assert_eq!(overlay.get(&negative_remove_key).unwrap(), &(DBValue::from_slice(b"negative"), -2));
}
}

View File

@ -6,8 +6,8 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
log = "0.3" log = "0.3"
macros = { path = "../macros" } macros = { path = "../macros" }
kvdb = { path = "../kvdb" } kvdb = { git = "https://github.com/paritytech/parity-common" }
kvdb-rocksdb = { path = "../kvdb-rocksdb" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" }
[dev-dependencies] [dev-dependencies]
tempdir = "0.3" tempdir = "0.3"

View File

@ -20,16 +20,16 @@ parking_lot = "0.6"
ansi_term = "0.10" ansi_term = "0.10"
rustc-hex = "1.0" rustc-hex = "1.0"
ethcore-io = { path = "../io", features = ["mio"] } ethcore-io = { path = "../io", features = ["mio"] }
ethcore-bytes = { path = "../bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-crypto = { path = "../../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethcore-logger = { path ="../../logger" } ethcore-logger = { path ="../../logger" }
ethcore-network = { path = "../network" } ethcore-network = { path = "../network" }
ethereum-types = "0.3" ethereum-types = "0.3"
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }
rlp = { path = "../rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
path = { path = "../path" } path = { git = "https://github.com/paritytech/parity-common" }
ipnetwork = "0.12.6" ipnetwork = "0.12.6"
keccak-hash = { path = "../hash" } keccak-hash = { git = "https://github.com/paritytech/parity-common" }
snappy = { git = "https://github.com/paritytech/rust-snappy" } snappy = { git = "https://github.com/paritytech/rust-snappy" }
serde = "1.0" serde = "1.0"
serde_json = "1.0" serde_json = "1.0"

View File

@ -23,7 +23,7 @@ use mio::{Token, Ready, PollOpt};
use mio::deprecated::{Handler, EventLoop, TryRead, TryWrite}; use mio::deprecated::{Handler, EventLoop, TryRead, TryWrite};
use mio::tcp::*; use mio::tcp::*;
use ethereum_types::{H128, H256, H512}; use ethereum_types::{H128, H256, H512};
use ethcore_bytes::*; use parity_bytes::*;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use std::io::{self, Cursor, Read, Write}; use std::io::{self, Cursor, Read, Write};
use io::{IoContext, StreamToken}; use io::{IoContext, StreamToken};
@ -502,7 +502,7 @@ mod tests {
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use mio::{Ready}; use mio::{Ready};
use ethcore_bytes::Bytes; use parity_bytes::Bytes;
use io::*; use io::*;
use super::*; use super::*;

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore_bytes::Bytes; use parity_bytes::Bytes;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::collections::{HashSet, HashMap, VecDeque}; use std::collections::{HashSet, HashMap, VecDeque};
use std::default::Default; use std::default::Default;

View File

@ -19,7 +19,7 @@ use rand::random;
use hash::write_keccak; use hash::write_keccak;
use mio::tcp::*; use mio::tcp::*;
use ethereum_types::{H256, H520}; use ethereum_types::{H256, H520};
use ethcore_bytes::Bytes; use parity_bytes::Bytes;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use connection::{Connection}; use connection::{Connection};
use node_table::NodeId; use node_table::NodeId;

View File

@ -61,8 +61,8 @@
#![allow(deprecated)] #![allow(deprecated)]
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_bytes; extern crate parity_bytes;
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethereum_types; extern crate ethereum_types;
extern crate parking_lot; extern crate parking_lot;
extern crate mio; extern crate mio;

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate parking_lot; extern crate parking_lot;
extern crate ethcore_bytes; extern crate parity_bytes;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_logger; extern crate ethcore_logger;
extern crate ethcore_network; extern crate ethcore_network;
@ -27,7 +27,7 @@ use std::sync::Arc;
use std::thread; use std::thread;
use std::time::*; use std::time::*;
use parking_lot::Mutex; use parking_lot::Mutex;
use ethcore_bytes::Bytes; use parity_bytes::Bytes;
use ethcore_network::*; use ethcore_network::*;
use ethcore_network_devp2p::NetworkService; use ethcore_network_devp2p::NetworkService;
use ethkey::{Random, Generator}; use ethkey::{Random, Generator};

View File

@ -8,12 +8,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
error-chain = { version = "0.12", default-features = false } error-chain = { version = "0.12", default-features = false }
ethcore-crypto = { path = "../../ethcore/crypto" } parity-crypto = { git = "https://github.com/paritytech/parity-common" }
ethcore-io = { path = "../io" } ethcore-io = { path = "../io" }
ethereum-types = "0.3" ethereum-types = "0.3"
ethkey = { path = "../../ethkey" } ethkey = { path = "../../ethkey" }
ipnetwork = "0.12.6" ipnetwork = "0.12.6"
rlp = { path = "../rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
libc = "0.2" libc = "0.2"
snappy = { git = "https://github.com/paritytech/rust-snappy" } snappy = { git = "https://github.com/paritytech/rust-snappy" }

View File

@ -16,7 +16,7 @@
#![recursion_limit="128"] #![recursion_limit="128"]
extern crate ethcore_crypto as crypto; extern crate parity_crypto as crypto;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethereum_types; extern crate ethereum_types;
extern crate ethkey; extern crate ethkey;

View File

@ -1,8 +0,0 @@
[package]
name = "path"
version = "0.1.1"
authors = ["Parity Technologies <admin@parity.io>"]
license = "GPL3"
[dependencies]
dirs = "1.0.2"

View File

@ -1,102 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Path utilities
extern crate dirs;
use std::path::Path;
use std::path::PathBuf;
#[cfg(target_os = "macos")]
/// Get the config path for application `name`.
/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`.
pub fn config_path(name: &str) -> PathBuf {
let mut home = dirs::home_dir().expect("Failed to get home dir");
home.push("Library");
home.push(name);
home
}
#[cfg(windows)]
/// Get the config path for application `name`.
/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`.
pub fn config_path(name: &str) -> PathBuf {
let mut home = dirs::home_dir().expect("Failed to get home dir");
home.push("AppData");
home.push("Roaming");
home.push(name);
home
}
#[cfg(not(any(target_os = "macos", windows)))]
/// Get the config path for application `name`.
/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`.
pub fn config_path(name: &str) -> PathBuf {
let mut home = dirs::home_dir().expect("Failed to get home dir");
home.push(format!(".{}", name.to_lowercase()));
home
}
/// Get the specific folder inside a config path.
pub fn config_path_with(name: &str, then: &str) -> PathBuf {
let mut path = config_path(name);
path.push(then);
path
}
/// Default ethereum paths
pub mod ethereum {
use std::path::PathBuf;
/// Default path for ethereum installation on Mac Os
pub fn default() -> PathBuf { super::config_path("Ethereum") }
/// Default path for ethereum installation (testnet)
pub fn test() -> PathBuf {
let mut path = default();
path.push("testnet");
path
}
/// Get the specific folder inside default ethereum installation
pub fn with_default(s: &str) -> PathBuf {
let mut path = default();
path.push(s);
path
}
/// Get the specific folder inside default ethereum installation configured for testnet
pub fn with_testnet(s: &str) -> PathBuf {
let mut path = default();
path.push("testnet");
path.push(s);
path
}
}
/// Restricts the permissions of given path only to the owner.
#[cfg(unix)]
pub fn restrict_permissions_owner(file_path: &Path, write: bool, executable: bool) -> Result<(), String> {
let perms = ::std::os::unix::fs::PermissionsExt::from_mode(0o400 + write as u32 * 0o200 + executable as u32 * 0o100);
::std::fs::set_permissions(file_path, perms).map_err(|e| format!("{:?}", e))
}
/// Restricts the permissions of given path only to the owner.
#[cfg(not(unix))]
pub fn restrict_permissions_owner(_file_path: &Path, _write: bool, _executable: bool) -> Result<(), String> {
//TODO: implement me
Ok(())
}

View File

@ -6,10 +6,10 @@ description = "Merkle-Patricia Trie (Ethereum Style)"
license = "GPL-3.0" license = "GPL-3.0"
[dependencies] [dependencies]
patricia-trie = { path = "../patricia_trie" } patricia-trie = { git = "https://github.com/paritytech/parity-common" }
keccak-hasher = { path = "../keccak-hasher" } keccak-hasher = { path = "../keccak-hasher" }
hashdb = { path = "../hashdb" } hashdb = { git = "https://github.com/paritytech/parity-common" }
rlp = { path = "../rlp" } rlp = { git = "https://github.com/paritytech/parity-common" }
ethcore-bytes = { path = "../bytes" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethereum-types = "0.3" ethereum-types = "0.3"
elastic-array = "0.10" elastic-array = "0.10"

View File

@ -18,7 +18,7 @@
pub extern crate patricia_trie as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa pub extern crate patricia_trie as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa
extern crate elastic_array; extern crate elastic_array;
extern crate ethcore_bytes; extern crate parity_bytes;
extern crate ethereum_types; extern crate ethereum_types;
extern crate hashdb; extern crate hashdb;
extern crate keccak_hasher; extern crate keccak_hasher;

View File

@ -1,25 +0,0 @@
[package]
name = "patricia-trie"
version = "0.2.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "Merkle-Patricia Trie generic over key hasher and node encoding"
license = "GPL-3.0"
[dependencies]
elastic-array = "0.10"
ethcore-bytes = { version = "0.1.0", path = "../bytes" }
hashdb = { version = "0.2", path = "../hashdb" }
heapsize = "0.4"
log = "0.3"
rand = "0.4"
[dev-dependencies]
env_logger = "0.5"
ethereum-types = "0.3"
keccak-hash = { version = "0.1.0", path = "../hash" }
keccak-hasher = { path = "../keccak-hasher" }
memorydb = { version = "0.2", path = "../memorydb" }
patricia-trie-ethereum = { path = "../patricia-trie-ethereum" }
rlp = { version = "0.2.1", path = "../rlp" }
trie-standardmap = { path = "../trie-standardmap" }
triehash = { version = "0.1.0", path = "../triehash" }

View File

@ -1,214 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![feature(test)]
extern crate test;
extern crate ethcore_bytes;
extern crate ethereum_types;
extern crate memorydb;
extern crate patricia_trie as trie;
extern crate patricia_trie_ethereum as ethtrie;
extern crate keccak_hasher;
extern crate keccak_hash;
extern crate trie_standardmap;
extern crate hashdb;
use ethcore_bytes::Bytes;
use ethereum_types::H256;
use keccak_hash::keccak;
use memorydb::MemoryDB;
use test::{Bencher, black_box};
use trie::{TrieMut, Trie};
use trie_standardmap::{Alphabet, ValueMode, StandardMap};
use keccak_hasher::KeccakHasher;
use ethtrie::{TrieDB, TrieDBMut};
fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + diff_count <= 32);
*seed = keccak(&seed);
let r = min_count + (seed[31] as usize % (diff_count + 1));
let mut ret: Vec<u8> = Vec::with_capacity(r);
for i in 0..r {
ret.push(alphabet[seed[i] as usize % alphabet.len()]);
}
ret
}
fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + diff_count <= 32);
*seed = keccak(&seed);
let r = min_count + (seed[31] as usize % (diff_count + 1));
seed[0..r].to_vec()
}
fn random_value(seed: &mut H256) -> Bytes {
*seed = keccak(&seed);
match seed[0] % 2 {
1 => vec![seed[31];1],
_ => seed.to_vec(),
}
}
#[bench]
fn trie_insertions_32_mir_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Mirror,
count: 1000,
};
let d = st.make();
b.iter(&mut ||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
});
}
#[bench]
fn trie_iter(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Mirror,
count: 1000,
};
let d = st.make();
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
{
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
}
b.iter(&mut ||{
let t = TrieDB::new(&memdb, &root).unwrap();
for n in t.iter().unwrap() {
black_box(n).unwrap();
}
});
}
#[bench]
fn trie_insertions_32_ran_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Random,
count: 1000,
};
let d = st.make();
let mut r = H256::new();
b.iter(&mut ||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
r = t.root().clone();
});
}
#[bench]
fn trie_insertions_six_high(b: &mut Bencher) {
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_bytes(6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
})
}
#[bench]
fn trie_insertions_six_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
})
}
#[bench]
fn trie_insertions_random_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 1, 5, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
})
}
#[bench]
fn trie_insertions_six_low(b: &mut Bencher) {
let alphabet = b"abcdef";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1).unwrap();
}
})
}

View File

@ -1,147 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use hashdb::{HashDB, Hasher};
use super::{Result, TrieDB, Trie, TrieDBIterator, TrieItem, TrieIterator, Query};
use node_codec::NodeCodec;
/// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
/// Additionaly it stores inserted hash-key mappings for later retrieval.
///
/// Use it as a `Trie` or `TrieMut` trait object.
pub struct FatDB<'db, H, C>
where
H: Hasher + 'db,
C: NodeCodec<H>
{
raw: TrieDB<'db, H, C>,
}
impl<'db, H, C> FatDB<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
/// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block.
/// This guarantees the trie is built correctly.
pub fn new(db: &'db HashDB<H>, root: &'db H::Out) -> Result<Self, H::Out, C::Error> {
Ok(FatDB { raw: TrieDB::new(db, root)? })
}
/// Get the backing database.
pub fn db(&self) -> &HashDB<H> { self.raw.db() }
}
impl<'db, H, C> Trie<H, C> for FatDB<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
fn root(&self) -> &H::Out { self.raw.root() }
fn contains(&self, key: &[u8]) -> Result<bool, H::Out, C::Error> {
self.raw.contains(H::hash(key).as_ref())
}
fn get_with<'a, 'key, Q: Query<H>>(&'a self, key: &'key [u8], query: Q) -> Result<Option<Q::Item>, H::Out, C::Error>
where 'a: 'key
{
self.raw.get_with(H::hash(key).as_ref(), query)
}
fn iter<'a>(&'a self) -> Result<Box<TrieIterator<H, C, Item = TrieItem<H::Out, C::Error>> + 'a>, <H as Hasher>::Out, C::Error> {
FatDBIterator::<H, C>::new(&self.raw).map(|iter| Box::new(iter) as Box<_>)
}
}
/// Itarator over inserted pairs of key values.
pub struct FatDBIterator<'db, H, C>
where
H: Hasher + 'db,
C: NodeCodec<H> + 'db
{
trie_iterator: TrieDBIterator<'db, H, C>,
trie: &'db TrieDB<'db, H, C>,
}
impl<'db, H, C> FatDBIterator<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
/// Creates new iterator.
pub fn new(trie: &'db TrieDB<H, C>) -> Result<Self, H::Out, C::Error> {
Ok(FatDBIterator {
trie_iterator: TrieDBIterator::new(trie)?,
trie: trie,
})
}
}
impl<'db, H, C> TrieIterator<H, C> for FatDBIterator<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, C::Error> {
let hashed_key = H::hash(key);
self.trie_iterator.seek(hashed_key.as_ref())
}
}
impl<'db, H, C> Iterator for FatDBIterator<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
type Item = TrieItem<'db, H::Out, C::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.trie_iterator.next()
.map(|res| {
res.map(|(hash, value)| {
let aux_hash = H::hash(&hash);
(self.trie.db().get(&aux_hash).expect("Missing fatdb hash").into_vec(), value)
})
})
}
}
#[cfg(test)]
mod test {
use memorydb::MemoryDB;
use hashdb::DBValue;
use keccak_hasher::KeccakHasher;
use ethtrie::trie::{Trie, TrieMut};
use ethtrie::{FatDB, FatDBMut};
use ethereum_types::H256;
#[test]
fn fatdb_to_trie() {
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
{
let mut t = FatDBMut::new(&mut memdb, &mut root);
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap();
}
let t = FatDB::new(&memdb, &root).unwrap();
assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23]));
assert_eq!(
t.iter().unwrap().map(Result::unwrap).collect::<Vec<_>>(),
vec![(vec![0x01u8, 0x23], DBValue::from_slice(&[0x01u8, 0x23] as &[u8]))]);
}
}

View File

@ -1,129 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use hashdb::{HashDB, DBValue, Hasher};
use super::{Result, TrieDBMut, TrieMut};
use node_codec::NodeCodec;
/// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
/// Additionaly it stores inserted hash-key mappings for later retrieval.
///
/// Use it as a `Trie` or `TrieMut` trait object.
pub struct FatDBMut<'db, H, C>
where
H: Hasher + 'db,
C: NodeCodec<H>
{
raw: TrieDBMut<'db, H, C>,
}
impl<'db, H, C> FatDBMut<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
/// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block.
/// This guarantees the trie is built correctly.
pub fn new(db: &'db mut HashDB<H>, root: &'db mut H::Out) -> Self {
FatDBMut { raw: TrieDBMut::new(db, root) }
}
/// Create a new trie with the backing database `db` and `root`.
///
/// Returns an error if root does not exist.
pub fn from_existing(db: &'db mut HashDB<H>, root: &'db mut H::Out) -> Result<Self, H::Out, C::Error> {
Ok(FatDBMut { raw: TrieDBMut::from_existing(db, root)? })
}
/// Get the backing database.
pub fn db(&self) -> &HashDB<H> {
self.raw.db()
}
/// Get the backing database.
pub fn db_mut(&mut self) -> &mut HashDB<H> {
self.raw.db_mut()
}
}
impl<'db, H, C> TrieMut<H, C> for FatDBMut<'db, H, C>
where
H: Hasher,
C: NodeCodec<H>
{
fn root(&mut self) -> &H::Out { self.raw.root() }
fn is_empty(&self) -> bool { self.raw.is_empty() }
fn contains(&self, key: &[u8]) -> Result<bool, H::Out, C::Error> {
self.raw.contains(H::hash(key).as_ref())
}
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result<Option<DBValue>, H::Out, C::Error>
where 'a: 'key
{
self.raw.get(H::hash(key).as_ref())
}
fn insert(&mut self, key: &[u8], value: &[u8]) -> Result<Option<DBValue>, H::Out, C::Error> {
let hash = H::hash(key);
let out = self.raw.insert(hash.as_ref(), value)?;
let db = self.raw.db_mut();
// don't insert if it doesn't exist.
if out.is_none() {
let aux_hash = H::hash(hash.as_ref());
db.emplace(aux_hash, DBValue::from_slice(key));
}
Ok(out)
}
fn remove(&mut self, key: &[u8]) -> Result<Option<DBValue>, H::Out, C::Error> {
let hash = H::hash(key);
let out = self.raw.remove(hash.as_ref())?;
// don't remove if it already exists.
if out.is_some() {
self.raw.db_mut().remove(&hash);
}
Ok(out)
}
}
#[cfg(test)]
mod test {
use hashdb::DBValue;
use memorydb::MemoryDB;
use ethtrie::trie::{Trie, TrieMut};
use ethtrie::{TrieDB, FatDBMut};
use keccak_hasher::KeccakHasher;
use keccak;
use ethereum_types::H256;
#[test]
fn fatdbmut_to_trie() {
let mut memdb = MemoryDB::<KeccakHasher>::new();
let mut root = H256::new();
{
let mut t = FatDBMut::new(&mut memdb, &mut root);
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap();
}
let t = TrieDB::new(&memdb, &root).unwrap();
assert_eq!(t.get(&keccak::keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23]));
}
}

View File

@ -1,319 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie interface and implementation.
extern crate elastic_array;
extern crate ethcore_bytes as bytes;
extern crate hashdb;
extern crate heapsize;
extern crate rand;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate env_logger;
#[cfg(test)]
extern crate ethereum_types;
#[cfg(test)]
extern crate trie_standardmap as standardmap;
#[cfg(test)]
extern crate patricia_trie_ethereum as ethtrie;
#[cfg(test)]
extern crate memorydb;
#[cfg(test)]
extern crate rlp;
#[cfg(test)]
extern crate keccak_hash as keccak;
#[cfg(test)]
extern crate keccak_hasher;
#[cfg(test)]
extern crate triehash;
use std::{fmt, error};
use hashdb::{HashDB, DBValue, Hasher};
use std::marker::PhantomData;
pub mod node;
pub mod triedb;
pub mod triedbmut;
pub mod sectriedb;
pub mod sectriedbmut;
pub mod recorder;
mod fatdb;
mod fatdbmut;
mod lookup;
mod nibblevec;
mod nibbleslice;
mod node_codec;
pub use self::triedb::{TrieDB, TrieDBIterator};
pub use self::triedbmut::{TrieDBMut, ChildReference};
pub use self::sectriedbmut::SecTrieDBMut;
pub use self::sectriedb::SecTrieDB;
pub use self::fatdb::{FatDB, FatDBIterator};
pub use self::fatdbmut::FatDBMut;
pub use self::recorder::Recorder;
pub use self::lookup::Lookup;
pub use self::nibbleslice::NibbleSlice;
pub use node_codec::NodeCodec;
/// Trie Errors.
///
/// These borrow the data within them to avoid excessive copying on every
/// trie operation.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum TrieError<T, E> {
/// Attempted to create a trie with a state root not in the DB.
InvalidStateRoot(T),
/// Trie item not found in the database,
IncompleteDatabase(T),
/// Corrupt Trie item
DecoderError(T, E),
}
impl<T, E> fmt::Display for TrieError<T, E> where T: std::fmt::Debug, E: std::fmt::Debug {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TrieError::InvalidStateRoot(ref root) => write!(f, "Invalid state root: {:?}", root),
TrieError::IncompleteDatabase(ref missing) => write!(f, "Database missing expected key: {:?}", missing),
TrieError::DecoderError(ref hash, ref decoder_err) => write!(f, "Decoding failed for hash {:?}; err: {:?}", hash, decoder_err),
}
}
}
impl<T, E> error::Error for TrieError<T, E> where T: std::fmt::Debug, E: std::error::Error {
fn description(&self) -> &str {
match *self {
TrieError::InvalidStateRoot(_) => "Invalid state root",
TrieError::IncompleteDatabase(_) => "Incomplete database",
TrieError::DecoderError(_, ref err) => err.description(),
}
}
}
/// Trie result type. Boxed to avoid copying around extra space for the `Hasher`s `Out` on successful queries.
pub type Result<T, H, E> = ::std::result::Result<T, Box<TrieError<H, E>>>;
/// Trie-Item type used for iterators over trie data.
pub type TrieItem<'a, U, E> = Result<(Vec<u8>, DBValue), U, E>;
/// Description of what kind of query will be made to the trie.
///
/// This is implemented for any &mut recorder (where the query will return
/// a DBValue), any function taking raw bytes (where no recording will be made),
/// or any tuple of (&mut Recorder, FnOnce(&[u8]))
pub trait Query<H: Hasher> {
/// Output item.
type Item;
/// Decode a byte-slice into the desired item.
fn decode(self, data: &[u8]) -> Self::Item;
/// Record that a node has been passed through.
fn record(&mut self, _hash: &H::Out, _data: &[u8], _depth: u32) {}
}
impl<'a, H: Hasher> Query<H> for &'a mut Recorder<H::Out> {
type Item = DBValue;
fn decode(self, value: &[u8]) -> DBValue { DBValue::from_slice(value) }
fn record(&mut self, hash: &H::Out, data: &[u8], depth: u32) {
(&mut **self).record(hash, data, depth);
}
}
impl<F, T, H: Hasher> Query<H> for F where F: for<'a> FnOnce(&'a [u8]) -> T {
type Item = T;
fn decode(self, value: &[u8]) -> T { (self)(value) }
}
impl<'a, F, T, H: Hasher> Query<H> for (&'a mut Recorder<H::Out>, F) where F: FnOnce(&[u8]) -> T {
type Item = T;
fn decode(self, value: &[u8]) -> T { (self.1)(value) }
fn record(&mut self, hash: &H::Out, data: &[u8], depth: u32) {
self.0.record(hash, data, depth)
}
}
/// A key-value datastore implemented as a database-backed modified Merkle tree.
pub trait Trie<H: Hasher, C: NodeCodec<H>> {
/// Return the root of the trie.
fn root(&self) -> &H::Out;
/// Is the trie empty?
fn is_empty(&self) -> bool { *self.root() == C::HASHED_NULL_NODE }
/// Does the trie contain a given key?
fn contains(&self, key: &[u8]) -> Result<bool, H::Out, C::Error> {
self.get(key).map(|x|x.is_some() )
}
/// What is the value of the given key in this trie?
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result<Option<DBValue>, H::Out, C::Error> where 'a: 'key {
self.get_with(key, DBValue::from_slice)
}
/// Search for the key with the given query parameter. See the docs of the `Query`
/// trait for more details.
fn get_with<'a, 'key, Q: Query<H>>(&'a self, key: &'key [u8], query: Q) -> Result<Option<Q::Item>, H::Out, C::Error> where 'a: 'key;
/// Returns a depth-first iterator over the elements of trie.
fn iter<'a>(&'a self) -> Result<Box<TrieIterator<H, C, Item = TrieItem<H::Out, C::Error >> + 'a>, H::Out, C::Error>;
}
/// A key-value datastore implemented as a database-backed modified Merkle tree.
pub trait TrieMut<H: Hasher, C: NodeCodec<H>> {
/// Return the root of the trie.
fn root(&mut self) -> &H::Out;
/// Is the trie empty?
fn is_empty(&self) -> bool;
/// Does the trie contain a given key?
fn contains(&self, key: &[u8]) -> Result<bool, H::Out, C::Error> {
self.get(key).map(|x| x.is_some())
}
/// What is the value of the given key in this trie?
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result<Option<DBValue>, H::Out, C::Error> where 'a: 'key;
/// Insert a `key`/`value` pair into the trie. An empty value is equivalent to removing
/// `key` from the trie. Returns the old value associated with this key, if it existed.
fn insert(&mut self, key: &[u8], value: &[u8]) -> Result<Option<DBValue>, H::Out, C::Error>;
/// Remove a `key` from the trie. Equivalent to making it equal to the empty
/// value. Returns the old value associated with this key, if it existed.
fn remove(&mut self, key: &[u8]) -> Result<Option<DBValue>, H::Out, C::Error>;
}
/// A trie iterator that also supports random access (`seek()`).
pub trait TrieIterator<H: Hasher, C: NodeCodec<H>>: Iterator {
/// Position the iterator on the first element with key > `key`
fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, <C as NodeCodec<H>>::Error>;
}
/// Trie types
#[derive(Debug, PartialEq, Clone)]
pub enum TrieSpec {
/// Generic trie.
Generic,
/// Secure trie.
Secure,
/// Secure trie with fat database.
Fat,
}
impl Default for TrieSpec {
fn default() -> TrieSpec {
TrieSpec::Secure
}
}
/// Trie factory.
#[derive(Default, Clone)]
pub struct TrieFactory<H: Hasher, C: NodeCodec<H>> {
spec: TrieSpec,
mark_hash: PhantomData<H>,
mark_codec: PhantomData<C>,
}
/// All different kinds of tries.
/// This is used to prevent a heap allocation for every created trie.
pub enum TrieKinds<'db, H: Hasher + 'db, C: NodeCodec<H>> {
/// A generic trie db.
Generic(TrieDB<'db, H, C>),
/// A secure trie db.
Secure(SecTrieDB<'db, H, C>),
/// A fat trie db.
Fat(FatDB<'db, H, C>),
}
// wrapper macro for making the match easier to deal with.
macro_rules! wrapper {
($me: ident, $f_name: ident, $($param: ident),*) => {
match *$me {
TrieKinds::Generic(ref t) => t.$f_name($($param),*),
TrieKinds::Secure(ref t) => t.$f_name($($param),*),
TrieKinds::Fat(ref t) => t.$f_name($($param),*),
}
}
}
impl<'db, H: Hasher, C: NodeCodec<H>> Trie<H, C> for TrieKinds<'db, H, C> {
fn root(&self) -> &H::Out {
wrapper!(self, root,)
}
fn is_empty(&self) -> bool {
wrapper!(self, is_empty,)
}
fn contains(&self, key: &[u8]) -> Result<bool, H::Out, C::Error> {
wrapper!(self, contains, key)
}
fn get_with<'a, 'key, Q: Query<H>>(&'a self, key: &'key [u8], query: Q) -> Result<Option<Q::Item>, H::Out, C::Error>
where 'a: 'key
{
wrapper!(self, get_with, key, query)
}
fn iter<'a>(&'a self) -> Result<Box<TrieIterator<H, C, Item = TrieItem<H::Out, C::Error>> + 'a>, H::Out, C::Error> {
wrapper!(self, iter,)
}
}
impl<'db, H, C> TrieFactory<H, C>
where
H: Hasher,
C: NodeCodec<H> + 'db
{
/// Creates new factory.
pub fn new(spec: TrieSpec) -> Self {
TrieFactory { spec, mark_hash: PhantomData, mark_codec: PhantomData }
}
/// Create new immutable instance of Trie.
pub fn readonly(&self, db: &'db HashDB<H>, root: &'db H::Out) -> Result<TrieKinds<'db, H, C>, H::Out, <C as NodeCodec<H>>::Error> {
match self.spec {
TrieSpec::Generic => Ok(TrieKinds::Generic(TrieDB::new(db, root)?)),
TrieSpec::Secure => Ok(TrieKinds::Secure(SecTrieDB::new(db, root)?)),
TrieSpec::Fat => Ok(TrieKinds::Fat(FatDB::new(db, root)?)),
}
}
/// Create new mutable instance of Trie.
pub fn create(&self, db: &'db mut HashDB<H>, root: &'db mut H::Out) -> Box<TrieMut<H, C> + 'db> {
match self.spec {
TrieSpec::Generic => Box::new(TrieDBMut::<_, C>::new(db, root)),
TrieSpec::Secure => Box::new(SecTrieDBMut::<_, C>::new(db, root)),
TrieSpec::Fat => Box::new(FatDBMut::<_, C>::new(db, root)),
}
}
/// Create new mutable instance of trie and check for errors.
pub fn from_existing(&self, db: &'db mut HashDB<H>, root: &'db mut H::Out) -> Result<Box<TrieMut<H,C> + 'db>, H::Out, <C as NodeCodec<H>>::Error> {
match self.spec {
TrieSpec::Generic => Ok(Box::new(TrieDBMut::<_, C>::from_existing(db, root)?)),
TrieSpec::Secure => Ok(Box::new(SecTrieDBMut::<_, C>::from_existing(db, root)?)),
TrieSpec::Fat => Ok(Box::new(FatDBMut::<_, C>::from_existing(db, root)?)),
}
}
/// Returns true iff the trie DB is a fat DB (allows enumeration of keys).
pub fn is_fat(&self) -> bool { self.spec == TrieSpec::Fat }
}

View File

@ -1,104 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie lookup via HashDB.
use hashdb::{HashDB, Hasher};
use nibbleslice::NibbleSlice;
use node::Node;
use node_codec::NodeCodec;
use super::{Result, TrieError, Query};
use std::marker::PhantomData;
/// Trie lookup helper object.
pub struct Lookup<'a, H: Hasher + 'a, C: NodeCodec<H>, Q: Query<H>> {
/// database to query from.
pub db: &'a HashDB<H>,
/// Query object to record nodes and transform data.
pub query: Q,
/// Hash to start at
pub hash: H::Out,
pub marker: PhantomData<C>, // TODO: probably not needed when all is said and done? When Query is made generic?
}
impl<'a, H, C, Q> Lookup<'a, H, C, Q>
where
H: Hasher + 'a,
C: NodeCodec<H> + 'a,
Q: Query<H>,
{
/// Look up the given key. If the value is found, it will be passed to the given
/// function to decode or copy.
pub fn look_up(mut self, mut key: NibbleSlice) -> Result<Option<Q::Item>, H::Out, C::Error> {
let mut hash = self.hash;
// this loop iterates through non-inline nodes.
for depth in 0.. {
let node_data = match self.db.get(&hash) {
Some(value) => value,
None => return Err(Box::new(match depth {
0 => TrieError::InvalidStateRoot(hash),
_ => TrieError::IncompleteDatabase(hash),
})),
};
self.query.record(&hash, &node_data, depth);
// this loop iterates through all inline children (usually max 1)
// without incrementing the depth.
let mut node_data = &node_data[..];
loop {
let decoded = match C::decode(node_data) {
Ok(node) => node,
Err(e) => {
return Err(Box::new(TrieError::DecoderError(hash, e)))
}
};
match decoded {
Node::Leaf(slice, value) => {
return Ok(match slice == key {
true => Some(self.query.decode(value)),
false => None,
})
}
Node::Extension(slice, item) => {
if key.starts_with(&slice) {
node_data = item;
key = key.mid(slice.len());
} else {
return Ok(None)
}
}
Node::Branch(children, value) => match key.is_empty() {
true => return Ok(value.map(move |val| self.query.decode(val))),
false => {
node_data = children[key.at(0) as usize];
key = key.mid(1);
}
},
_ => return Ok(None),
}
// check if new node data is inline or hash.
if let Some(h) = C::try_decode_hash(&node_data) {
hash = h;
break
}
}
}
Ok(None)
}
}

View File

@ -1,311 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Nibble-orientated view onto byte-slice, allowing nibble-precision offsets.
use std::cmp::*;
use std::fmt;
use elastic_array::ElasticArray36;
/// Nibble-orientated view onto byte-slice, allowing nibble-precision offsets.
///
/// This is an immutable struct. No operations actually change it.
///
/// # Example
/// ```snippet
/// use patricia_trie::nibbleslice::NibbleSlice;
/// fn main() {
/// let d1 = &[0x01u8, 0x23, 0x45];
/// let d2 = &[0x34u8, 0x50, 0x12];
/// let d3 = &[0x00u8, 0x12];
/// let n1 = NibbleSlice::new(d1); // 0,1,2,3,4,5
/// let n2 = NibbleSlice::new(d2); // 3,4,5,0,1,2
/// let n3 = NibbleSlice::new_offset(d3, 1); // 0,1,2
/// assert!(n1 > n3); // 0,1,2,... > 0,1,2
/// assert!(n1 < n2); // 0,... < 3,...
/// assert!(n2.mid(3) == n3); // 0,1,2 == 0,1,2
/// assert!(n1.starts_with(&n3));
/// assert_eq!(n1.common_prefix(&n3), 3);
/// assert_eq!(n2.mid(3).common_prefix(&n1), 3);
/// }
/// ```
#[derive(Copy, Clone, Eq, Ord)]
pub struct NibbleSlice<'a> {
data: &'a [u8],
offset: usize,
data_encode_suffix: &'a [u8],
offset_encode_suffix: usize,
}
/// Iterator type for a nibble slice.
pub struct NibbleSliceIterator<'a> {
p: &'a NibbleSlice<'a>,
i: usize,
}
impl<'a> Iterator for NibbleSliceIterator<'a> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
self.i += 1;
match self.i <= self.p.len() {
true => Some(self.p.at(self.i - 1)),
false => None,
}
}
}
impl<'a> NibbleSlice<'a> {
/// Create a new nibble slice with the given byte-slice.
pub fn new(data: &'a [u8]) -> Self { NibbleSlice::new_offset(data, 0) }
/// Create a new nibble slice with the given byte-slice with a nibble offset.
pub fn new_offset(data: &'a [u8], offset: usize) -> Self {
NibbleSlice {
data,
offset,
data_encode_suffix: &b""[..],
offset_encode_suffix: 0
}
}
/// Create a composed nibble slice; one followed by the other.
pub fn new_composed(a: &NibbleSlice<'a>, b: &NibbleSlice<'a>) -> Self {
NibbleSlice {
data: a.data,
offset: a.offset,
data_encode_suffix: b.data,
offset_encode_suffix: b.offset
}
}
/// Get an iterator for the series of nibbles.
pub fn iter(&'a self) -> NibbleSliceIterator<'a> {
NibbleSliceIterator { p: self, i: 0 }
}
/// Create a new nibble slice from the given HPE encoded data (e.g. output of `encoded()`).
pub fn from_encoded(data: &'a [u8]) -> (NibbleSlice, bool) {
(Self::new_offset(data, if data[0] & 16 == 16 {1} else {2}), data[0] & 32 == 32)
}
/// Is this an empty slice?
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Get the length (in nibbles, naturally) of this slice.
#[inline]
pub fn len(&self) -> usize { (self.data.len() + self.data_encode_suffix.len()) * 2 - self.offset - self.offset_encode_suffix }
/// Get the nibble at position `i`.
#[inline(always)]
pub fn at(&self, i: usize) -> u8 {
let l = self.data.len() * 2 - self.offset;
if i < l {
if (self.offset + i) & 1 == 1 {
self.data[(self.offset + i) / 2] & 15u8
}
else {
self.data[(self.offset + i) / 2] >> 4
}
}
else {
let i = i - l;
if (self.offset_encode_suffix + i) & 1 == 1 {
self.data_encode_suffix[(self.offset_encode_suffix + i) / 2] & 15u8
}
else {
self.data_encode_suffix[(self.offset_encode_suffix + i) / 2] >> 4
}
}
}
/// Return object which represents a view on to this slice (further) offset by `i` nibbles.
pub fn mid(&self, i: usize) -> NibbleSlice<'a> {
NibbleSlice {
data: self.data,
offset: self.offset + i,
data_encode_suffix: &b""[..],
offset_encode_suffix: 0
}
}
/// Do we start with the same nibbles as the whole of `them`?
pub fn starts_with(&self, them: &Self) -> bool { self.common_prefix(them) == them.len() }
/// How many of the same nibbles at the beginning do we match with `them`?
pub fn common_prefix(&self, them: &Self) -> usize {
let s = min(self.len(), them.len());
let mut i = 0usize;
while i < s {
if self.at(i) != them.at(i) { break; }
i += 1;
}
i
}
/// Encode while nibble slice in prefixed hex notation, noting whether it `is_leaf`.
#[inline]
pub fn encoded(&self, is_leaf: bool) -> ElasticArray36<u8> {
let l = self.len();
let mut r = ElasticArray36::new();
let mut i = l % 2;
r.push(if i == 1 {0x10 + self.at(0)} else {0} + if is_leaf {0x20} else {0});
while i < l {
r.push(self.at(i) * 16 + self.at(i + 1));
i += 2;
}
r
}
/// Encode only the leftmost `n` bytes of the nibble slice in prefixed hex notation,
/// noting whether it `is_leaf`.
pub fn encoded_leftmost(&self, n: usize, is_leaf: bool) -> ElasticArray36<u8> {
let l = min(self.len(), n);
let mut r = ElasticArray36::new();
let mut i = l % 2;
r.push(if i == 1 {0x10 + self.at(0)} else {0} + if is_leaf {0x20} else {0});
while i < l {
r.push(self.at(i) * 16 + self.at(i + 1));
i += 2;
}
r
}
}
impl<'a> PartialEq for NibbleSlice<'a> {
fn eq(&self, them: &Self) -> bool {
self.len() == them.len() && self.starts_with(them)
}
}
impl<'a> PartialOrd for NibbleSlice<'a> {
fn partial_cmp(&self, them: &Self) -> Option<Ordering> {
let s = min(self.len(), them.len());
let mut i = 0usize;
while i < s {
match self.at(i).partial_cmp(&them.at(i)).unwrap() {
Ordering::Less => return Some(Ordering::Less),
Ordering::Greater => return Some(Ordering::Greater),
_ => i += 1,
}
}
self.len().partial_cmp(&them.len())
}
}
impl<'a> fmt::Debug for NibbleSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.len() {
match i {
0 => write!(f, "{:01x}", self.at(i))?,
_ => write!(f, "'{:01x}", self.at(i))?,
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::NibbleSlice;
use elastic_array::ElasticArray36;
static D: &'static [u8;3] = &[0x01u8, 0x23, 0x45];
#[test]
fn basics() {
let n = NibbleSlice::new(D);
assert_eq!(n.len(), 6);
assert!(!n.is_empty());
let n = NibbleSlice::new_offset(D, 6);
assert!(n.is_empty());
let n = NibbleSlice::new_offset(D, 3);
assert_eq!(n.len(), 3);
for i in 0..3 {
assert_eq!(n.at(i), i as u8 + 3);
}
}
#[test]
fn iterator() {
let n = NibbleSlice::new(D);
let mut nibbles: Vec<u8> = vec![];
nibbles.extend(n.iter());
assert_eq!(nibbles, (0u8..6).collect::<Vec<_>>())
}
#[test]
fn mid() {
let n = NibbleSlice::new(D);
let m = n.mid(2);
for i in 0..4 {
assert_eq!(m.at(i), i as u8 + 2);
}
let m = n.mid(3);
for i in 0..3 {
assert_eq!(m.at(i), i as u8 + 3);
}
}
#[test]
fn encoded() {
let n = NibbleSlice::new(D);
assert_eq!(n.encoded(false), ElasticArray36::from_slice(&[0x00, 0x01, 0x23, 0x45]));
assert_eq!(n.encoded(true), ElasticArray36::from_slice(&[0x20, 0x01, 0x23, 0x45]));
assert_eq!(n.mid(1).encoded(false), ElasticArray36::from_slice(&[0x11, 0x23, 0x45]));
assert_eq!(n.mid(1).encoded(true), ElasticArray36::from_slice(&[0x31, 0x23, 0x45]));
}
#[test]
fn from_encoded() {
let n = NibbleSlice::new(D);
assert_eq!((n, false), NibbleSlice::from_encoded(&[0x00, 0x01, 0x23, 0x45]));
assert_eq!((n, true), NibbleSlice::from_encoded(&[0x20, 0x01, 0x23, 0x45]));
assert_eq!((n.mid(1), false), NibbleSlice::from_encoded(&[0x11, 0x23, 0x45]));
assert_eq!((n.mid(1), true), NibbleSlice::from_encoded(&[0x31, 0x23, 0x45]));
}
#[test]
fn shared() {
let n = NibbleSlice::new(D);
let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45, 0x67];
let m = NibbleSlice::new(other);
assert_eq!(n.common_prefix(&m), 4);
assert_eq!(m.common_prefix(&n), 4);
assert_eq!(n.mid(1).common_prefix(&m.mid(1)), 3);
assert_eq!(n.mid(1).common_prefix(&m.mid(2)), 0);
assert_eq!(n.common_prefix(&m.mid(4)), 6);
assert!(!n.starts_with(&m.mid(4)));
assert!(m.mid(4).starts_with(&n));
}
#[test]
fn compare() {
let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45];
let n = NibbleSlice::new(D);
let m = NibbleSlice::new(other);
assert!(n != m);
assert!(n > m);
assert!(m < n);
assert!(n == m.mid(4));
assert!(n >= m.mid(4));
assert!(n <= m.mid(4));
}
}

View File

@ -1,146 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! An owning, nibble-oriented byte vector.
use elastic_array::ElasticArray36;
use nibbleslice::NibbleSlice;
/// Owning, nibble-oriented byte vector. Counterpart to `NibbleSlice`.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct NibbleVec {
inner: ElasticArray36<u8>,
len: usize,
}
impl Default for NibbleVec {
fn default() -> Self {
NibbleVec::new()
}
}
impl NibbleVec {
/// Make a new `NibbleVec`
pub fn new() -> Self {
NibbleVec {
inner: ElasticArray36::new(),
len: 0
}
}
/// Length of the `NibbleVec`
#[inline(always)]
pub fn len(&self) -> usize { self.len }
/// Retrurns true if `NibbleVec` has zero length
pub fn is_empty(&self) -> bool { self.len == 0 }
/// Try to get the nibble at the given offset.
#[inline]
pub fn at(&self, idx: usize) -> u8 {
if idx % 2 == 0 {
self.inner[idx / 2] >> 4
} else {
self.inner[idx / 2] & 0x0F
}
}
/// Push a nibble onto the `NibbleVec`. Ignores the high 4 bits.
pub fn push(&mut self, nibble: u8) {
let nibble = nibble & 0x0F;
if self.len % 2 == 0 {
self.inner.push(nibble << 4);
} else {
*self.inner.last_mut().expect("len != 0 since len % 2 != 0; inner has a last element; qed") |= nibble;
}
self.len += 1;
}
/// Try to pop a nibble off the `NibbleVec`. Fails if len == 0.
pub fn pop(&mut self) -> Option<u8> {
if self.is_empty() {
return None;
}
let byte = self.inner.pop().expect("len != 0; inner has last elem; qed");
let nibble = if self.len % 2 == 0 {
self.inner.push(byte & 0xF0);
byte & 0x0F
} else {
byte >> 4
};
self.len -= 1;
Some(nibble)
}
/// Try to treat this `NibbleVec` as a `NibbleSlice`. Works only if len is even.
pub fn as_nibbleslice(&self) -> Option<NibbleSlice> {
if self.len % 2 == 0 {
Some(NibbleSlice::new(self.inner()))
} else {
None
}
}
/// Get the underlying byte slice.
pub fn inner(&self) -> &[u8] {
&self.inner[..]
}
}
impl<'a> From<NibbleSlice<'a>> for NibbleVec {
fn from(s: NibbleSlice<'a>) -> Self {
let mut v = NibbleVec::new();
for i in 0..s.len() {
v.push(s.at(i));
}
v
}
}
#[cfg(test)]
mod tests {
use super::NibbleVec;
#[test]
fn push_pop() {
let mut v = NibbleVec::new();
for i in 0..16 {
v.push(i);
assert_eq!(v.len() - 1, i as usize);
assert_eq!(v.at(i as usize), i);
}
for i in (0..16).rev() {
assert_eq!(v.pop(), Some(i));
assert_eq!(v.len(), i as usize);
}
}
#[test]
fn nibbleslice_conv() {
let mut v = NibbleVec::new();
for i in 0..10 {
v.push(i);
}
let v2: NibbleVec = v.as_nibbleslice().unwrap().into();
assert_eq!(v, v2);
}
}

View File

@ -1,69 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use elastic_array::ElasticArray36;
use nibbleslice::NibbleSlice;
use nibblevec::NibbleVec;
use hashdb::DBValue;
/// Partial node key type.
pub type NodeKey = ElasticArray36<u8>;
/// Type of node in the trie and essential information thereof.
#[derive(Eq, PartialEq, Debug, Clone)]
pub enum Node<'a> {
/// Null trie node; could be an empty root or an empty branch entry.
Empty,
/// Leaf node; has key slice and value. Value may not be empty.
Leaf(NibbleSlice<'a>, &'a [u8]),
/// Extension node; has key slice and node data. Data may not be null.
Extension(NibbleSlice<'a>, &'a [u8]),
/// Branch node; has array of 16 child nodes (each possibly null) and an optional immediate node data.
Branch([&'a [u8]; 16], Option<&'a [u8]>),
}
/// An owning node type. Useful for trie iterators.
#[derive(Debug, PartialEq, Eq)]
pub enum OwnedNode {
/// Empty trie node.
Empty,
/// Leaf node: partial key and value.
Leaf(NibbleVec, DBValue),
/// Extension node: partial key and child node.
Extension(NibbleVec, DBValue),
/// Branch node: 16 children and an optional value.
Branch([NodeKey; 16], Option<DBValue>),
}
impl<'a> From<Node<'a>> for OwnedNode {
fn from(node: Node<'a>) -> Self {
match node {
Node::Empty => OwnedNode::Empty,
Node::Leaf(k, v) => OwnedNode::Leaf(k.into(), DBValue::from_slice(v)),
Node::Extension(k, child) => OwnedNode::Extension(k.into(), DBValue::from_slice(child)),
Node::Branch(c, val) => {
let children = [
NodeKey::from_slice(c[0]), NodeKey::from_slice(c[1]), NodeKey::from_slice(c[2]), NodeKey::from_slice(c[3]),
NodeKey::from_slice(c[4]), NodeKey::from_slice(c[5]), NodeKey::from_slice(c[6]), NodeKey::from_slice(c[7]),
NodeKey::from_slice(c[8]), NodeKey::from_slice(c[9]), NodeKey::from_slice(c[10]), NodeKey::from_slice(c[11]),
NodeKey::from_slice(c[12]), NodeKey::from_slice(c[13]), NodeKey::from_slice(c[14]), NodeKey::from_slice(c[15]),
];
OwnedNode::Branch(children, val.map(DBValue::from_slice))
}
}
}
}

View File

@ -1,55 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generic trait for trie node encoding/decoding. Takes a `hashdb::Hasher`
//! to parametrize the hashes used in the codec.
use hashdb::Hasher;
use node::Node;
use ChildReference;
use elastic_array::{ElasticArray1024, ElasticArray128};
/// Trait for trie node encoding/decoding
pub trait NodeCodec<H: Hasher>: Sized {
/// Encoding error type
type Error: ::std::error::Error;
/// Null node type
const HASHED_NULL_NODE: H::Out;
/// Decode bytes to a `Node`. Returns `Self::E` on failure.
fn decode(data: &[u8]) -> Result<Node, Self::Error>;
/// Decode bytes to the `Hasher`s output type. Returns `None` on failure.
fn try_decode_hash(data: &[u8]) -> Option<H::Out>;
/// Check if the provided bytes correspond to the codecs "empty" node.
fn is_empty_node(data: &[u8]) -> bool;
/// Returns an empty node
fn empty_node() -> ElasticArray1024<u8>;
/// Returns an encoded leaft node
fn leaf_node(partial: &[u8], value: &[u8]) -> ElasticArray1024<u8>;
/// Returns an encoded extension node
fn ext_node(partial: &[u8], child_ref: ChildReference<H::Out>) -> ElasticArray1024<u8>;
/// Returns an encoded branch node. Takes an iterator yielding `ChildReference<H::Out>` and an optional value
fn branch_node<I>(children: I, value: Option<ElasticArray128<u8>>) -> ElasticArray1024<u8>
where I: IntoIterator<Item=Option<ChildReference<H::Out>>>;
}

Some files were not shown because too many files have changed in this diff Show More