From 9caa8686039c5f5c830ac3bf770e9c6ae2a47173 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 2 Jul 2018 18:50:05 +0200 Subject: [PATCH] Make HashDB generic (#8739) The `patricia_trie` crate is generic over the hasher (by way of HashDB) and node encoding scheme. Adds a new `patricia_trie_ethereum` crate with concrete impls for Keccak/RLP. --- Cargo.lock | 161 +++++-- Cargo.toml | 2 + ethcore/Cargo.toml | 6 +- ethcore/light/Cargo.toml | 2 + ethcore/light/src/cht.rs | 16 +- ethcore/light/src/client/header_chain.rs | 19 +- ethcore/light/src/lib.rs | 2 + ethcore/light/src/net/mod.rs | 20 +- ethcore/light/src/net/status.rs | 2 +- ethcore/light/src/net/tests/mod.rs | 12 +- ethcore/light/src/on_demand/request.rs | 26 +- ethcore/private-tx/Cargo.toml | 1 + ethcore/private-tx/src/error.rs | 2 +- ethcore/private-tx/src/lib.rs | 1 + ethcore/src/account_db.rs | 56 ++- ethcore/src/block.rs | 17 +- ethcore/src/blockchain/blockchain.rs | 49 +- ethcore/src/blockchain/extras.rs | 11 +- ethcore/src/client/client.rs | 2 +- ethcore/src/client/error.rs | 2 +- ethcore/src/client/evm_test_client.rs | 3 +- ethcore/src/client/test_client.rs | 11 +- ethcore/src/engines/authority_round/mod.rs | 3 - ethcore/src/engines/tendermint/message.rs | 10 +- ethcore/src/engines/tendermint/mod.rs | 2 +- .../engines/validator_set/safe_contract.rs | 22 +- ethcore/src/error.rs | 2 +- ethcore/src/executed.rs | 13 +- ethcore/src/factory.rs | 4 +- ethcore/src/json_tests/executive.rs | 4 +- ethcore/src/json_tests/trie.rs | 6 +- ethcore/src/lib.rs | 2 + ethcore/src/pod_account.rs | 4 +- ethcore/src/snapshot/account.rs | 14 +- ethcore/src/snapshot/block.rs | 1 - ethcore/src/snapshot/consensus/authority.rs | 1 + ethcore/src/snapshot/error.rs | 2 +- ethcore/src/snapshot/mod.rs | 10 +- ethcore/src/snapshot/tests/helpers.rs | 8 +- ethcore/src/state/account.rs | 20 +- ethcore/src/state/backend.rs | 53 ++- ethcore/src/state/mod.rs | 90 ++-- ethcore/src/state_db.rs | 35 +- ethcore/sync/Cargo.toml | 4 +- ethcore/sync/src/chain/supplier.rs | 1 + ethcore/sync/src/lib.rs | 2 + ethcore/vm/Cargo.toml | 1 + ethcore/vm/src/error.rs | 11 +- ethcore/vm/src/lib.rs | 1 + util/hashdb/Cargo.toml | 4 +- util/hashdb/src/lib.rs | 69 +-- util/journaldb/Cargo.toml | 7 +- util/journaldb/src/archivedb.rs | 24 +- util/journaldb/src/as_hash_db_impls.rs | 49 ++ util/journaldb/src/earlymergedb.rs | 22 +- util/journaldb/src/lib.rs | 2 + util/journaldb/src/overlaydb.rs | 5 +- util/journaldb/src/overlayrecentdb.rs | 32 +- util/journaldb/src/refcounteddb.rs | 20 +- util/journaldb/src/traits.rs | 15 +- util/keccak-hasher/Cargo.toml | 12 + util/keccak-hasher/src/lib.rs | 39 ++ util/memorydb/Cargo.toml | 12 +- util/memorydb/benches/memdb.rs | 79 ++++ util/memorydb/src/lib.rs | 129 +++--- util/patricia-trie-ethereum/Cargo.toml | 15 + util/patricia-trie-ethereum/src/lib.rs | 62 +++ .../src/rlp_node_codec.rs | 124 ++++++ util/patricia_trie/Cargo.toml | 20 +- util/patricia_trie/benches/trie.rs | 21 +- util/patricia_trie/src/fatdb.rs | 135 +++--- util/patricia_trie/src/fatdbmut.rs | 107 +++-- util/patricia_trie/src/lib.rs | 183 ++++---- util/patricia_trie/src/lookup.rs | 36 +- util/patricia_trie/src/nibbleslice.rs | 3 + util/patricia_trie/src/nibblevec.rs | 2 + util/patricia_trie/src/node.rs | 81 +--- util/patricia_trie/src/node_codec.rs | 55 +++ util/patricia_trie/src/recorder.rs | 36 +- util/patricia_trie/src/sectriedb.rs | 82 ++-- util/patricia_trie/src/sectriedbmut.rs | 84 ++-- util/patricia_trie/src/triedb.rs | 388 ++++++++-------- util/patricia_trie/src/triedbmut.rs | 418 +++++++++--------- util/plain_hasher/Cargo.toml | 3 +- util/plain_hasher/src/lib.rs | 5 +- util/rlp/benches/rlp.rs | 22 +- util/rlp/src/lib.rs | 2 +- util/rlp/src/stream.rs | 90 ++-- util/rlp/tests/tests.rs | 1 - 89 files changed, 1962 insertions(+), 1282 deletions(-) create mode 100644 util/journaldb/src/as_hash_db_impls.rs create mode 100644 util/keccak-hasher/Cargo.toml create mode 100644 util/keccak-hasher/src/lib.rs create mode 100644 util/memorydb/benches/memdb.rs create mode 100644 util/patricia-trie-ethereum/Cargo.toml create mode 100644 util/patricia-trie-ethereum/src/lib.rs create mode 100644 util/patricia-trie-ethereum/src/rlp_node_codec.rs create mode 100644 util/patricia_trie/src/node_codec.rs diff --git a/Cargo.lock b/Cargo.lock index e97180bb2..a54711fba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -388,7 +388,7 @@ version = "0.1.0" dependencies = [ "app_dirs 1.2.1 (git+https://github.com/paritytech/app-dirs-rs)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "journaldb 0.1.0", + "journaldb 0.2.0", ] [[package]] @@ -435,6 +435,18 @@ dependencies = [ "regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "env_logger" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "error-chain" version = "0.11.0" @@ -548,11 +560,12 @@ dependencies = [ "fake-hardware-wallet 0.0.1", "fetch 0.1.0", "hardware-wallet 1.12.0", - "hashdb 0.1.1", + "hashdb 0.2.0", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", - "journaldb 0.1.0", + "journaldb 0.2.0", "keccak-hash 0.1.2", + "keccak-hasher 0.1.0", "kvdb 0.1.0", "kvdb-memorydb 0.1.0", "kvdb-rocksdb 0.1.0", @@ -561,12 +574,13 @@ dependencies = [ "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "memory-cache 0.1.0", - "memorydb 0.1.1", + "memorydb 0.2.0", "num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-machine 0.1.0", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "patricia-trie 0.1.0", + "patricia-trie 0.2.0", + "patricia-trie-ethereum 0.1.0", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", @@ -641,18 +655,20 @@ dependencies = [ "ethcore-transaction 0.1.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "hashdb 0.1.1", + "hashdb 0.2.0", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2", + "keccak-hasher 0.1.0", "kvdb 0.1.0", "kvdb-memorydb 0.1.0", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "memory-cache 0.1.0", - "memorydb 0.1.1", + "memorydb 0.2.0", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "patricia-trie 0.1.0", - "plain_hasher 0.1.0", + "patricia-trie 0.2.0", + "patricia-trie-ethereum 0.1.0", + "plain_hasher 0.2.0", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", "rlp_derive 0.1.0", @@ -784,7 +800,8 @@ dependencies = [ "keccak-hash 0.1.2", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "patricia-trie 0.1.0", + "patricia-trie 0.2.0", + "patricia-trie-ethereum 0.1.0", "rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", "rlp_derive 0.1.0", @@ -885,15 +902,17 @@ dependencies = [ "ethcore-transaction 0.1.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", + "hashdb 0.2.0", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2", + "keccak-hasher 0.1.0", "kvdb 0.1.0", "kvdb-memorydb 0.1.0", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "plain_hasher 0.1.0", + "plain_hasher 0.2.0", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1228,10 +1247,10 @@ dependencies = [ [[package]] name = "hashdb" -version = "0.1.1" +version = "0.2.0" dependencies = [ "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1269,6 +1288,14 @@ name = "httparse" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "humantime" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hyper" version = "0.11.24" @@ -1375,20 +1402,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "journaldb" -version = "0.1.0" +version = "0.2.0" dependencies = [ "ethcore-bytes 0.1.0", "ethcore-logger 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashdb 0.1.1", + "hashdb 0.2.0", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2", + "keccak-hasher 0.1.0", "kvdb 0.1.0", "kvdb-memorydb 0.1.0", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "memorydb 0.1.1", + "memorydb 0.2.0", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "plain_hasher 0.1.0", + "plain_hasher 0.2.0", "rlp 0.2.1", "util-error 0.1.0", ] @@ -1498,6 +1526,16 @@ dependencies = [ "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "keccak-hasher" +version = "0.1.0" +dependencies = [ + "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashdb 0.2.0", + "plain_hasher 0.2.0", + "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -1678,15 +1716,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "memorydb" -version = "0.1.1" +version = "0.2.0" dependencies = [ "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashdb 0.1.1", + "hashdb 0.2.0", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2", - "plain_hasher 0.1.0", + "keccak-hasher 0.1.0", + "plain_hasher 0.2.0", "rlp 0.2.1", + "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2035,7 +2075,7 @@ dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", - "journaldb 0.1.0", + "journaldb 0.2.0", "jsonrpc-core 8.0.1 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "keccak-hash 0.1.2", "kvdb 0.1.0", @@ -2254,7 +2294,7 @@ dependencies = [ "parity-updater 1.12.0", "parity-version 1.12.0", "parking_lot 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "patricia-trie 0.1.0", + "patricia-trie 0.2.0", "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", @@ -2421,22 +2461,37 @@ version = "0.1.0" [[package]] name = "patricia-trie" -version = "0.1.0" +version = "0.2.0" dependencies = [ "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-bytes 0.1.0", - "ethcore-logger 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "hashdb 0.1.1", + "hashdb 0.2.0", "keccak-hash 0.1.2", + "keccak-hasher 0.1.0", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "memorydb 0.1.1", + "memorydb 0.2.0", + "patricia-trie-ethereum 0.1.0", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1", "trie-standardmap 0.1.0", "triehash 0.1.0", ] +[[package]] +name = "patricia-trie-ethereum" +version = "0.1.0" +dependencies = [ + "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-bytes 0.1.0", + "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashdb 0.2.0", + "keccak-hasher 0.1.0", + "patricia-trie 0.2.0", + "rlp 0.2.1", +] + [[package]] name = "percent-encoding" version = "1.0.0" @@ -2488,10 +2543,11 @@ dependencies = [ [[package]] name = "plain_hasher" -version = "0.1.0" +version = "0.2.0" dependencies = [ "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hashdb 0.2.0", ] [[package]] @@ -2720,11 +2776,31 @@ dependencies = [ "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "regex" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex-syntax" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "regex-syntax" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "registrar" version = "0.0.1" @@ -3173,6 +3249,14 @@ dependencies = [ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "termcolor" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "termion" version = "1.5.1" @@ -3505,6 +3589,11 @@ dependencies = [ "trie-standardmap 0.1.0", ] +[[package]] +name = "ucd-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "uint" version = "0.2.1" @@ -3649,7 +3738,8 @@ dependencies = [ "ethjson 0.1.0", "keccak-hash 0.1.2", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "patricia-trie 0.1.0", + "patricia-trie 0.2.0", + "patricia-trie-ethereum 0.1.0", "rlp 0.2.1", ] @@ -3749,6 +3839,14 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "wincolor" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ws" version = "0.7.5" @@ -3858,6 +3956,7 @@ dependencies = [ "checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3" "checksum elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "88d4851b005ef16de812ea9acdb7bece2f0a40dd86c07b85631d7dafa54537bb" "checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0e6e40ebb0e66918a37b38c7acab4e10d299e0463fe2af5d29b9cc86710cfd2a" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" "checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" "checksum eth-secp256k1 0.5.7 (git+https://github.com/paritytech/rust-secp256k1)" = "" @@ -3888,6 +3987,7 @@ dependencies = [ "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "" "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" +"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" "checksum hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)" = "df4dd5dae401458087396b6db7fabc4d6760aa456a5fa8e92bda549f39cae661" "checksum hyper-rustls 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d6cdc1751771a14b8175764394f025e309a28c825ed9eaf97fa62bb831dc8c5" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" @@ -3988,7 +4088,9 @@ dependencies = [ "checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" "checksum regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "744554e01ccbd98fff8c457c3b092cd67af62a555a43bfe97ae8a0451f7799fa" +"checksum regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13c93d55961981ba9226a213b385216f83ab43bd6ac53ab16b2eeb47e337cf4e" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum regex-syntax 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05b06a75f5217880fc5e905952a42750bf44787e56a6c6d6852ed0992f5e1d54" "checksum relay 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" "checksum ring 0.12.1 (git+https://github.com/paritytech/ring)" = "" "checksum rocksdb 0.4.5 (git+https://github.com/paritytech/rust-rocksdb)" = "" @@ -4038,6 +4140,7 @@ dependencies = [ "checksum tempfile 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11ce2fe9db64b842314052e2421ac61a73ce41b898dc8e3750398b219c5fc1e0" "checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" "checksum term_size 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9e5b9a66db815dcfd2da92db471106457082577c3c278d4138ab3e3b4e189327" +"checksum termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "adc4587ead41bf016f11af03e55a624c06568b5a19db4e90fde573d805074f83" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693" "checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" @@ -4065,6 +4168,7 @@ dependencies = [ "checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" "checksum transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "715254c8f0811be1a79ad3ea5e6fa3c8eddec2b03d7f5ba78cf093e56d79c24f" "checksum trezor-sys 1.0.0 (git+https://github.com/paritytech/trezor-sys)" = "" +"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" "checksum uint 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "38051a96565903d81c9a9210ce11076b2218f3b352926baa1f5f6abbdfce8273" "checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicase 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284b6d3db520d67fbe88fd778c21510d1b0ba4a551e5d0fbb023d33405f6de8a" @@ -4091,6 +4195,7 @@ dependencies = [ "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eeb06499a3a4d44302791052df005d5232b927ed1a9658146d842165c4de7767" "checksum ws 0.7.5 (git+https://github.com/tomusdrw/ws-rs)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a66b7c2281ebde13cf4391d70d4c7e5946c3c25e72a7b859ca8f677dcd0b0c61" diff --git a/Cargo.toml b/Cargo.toml index 7424ba38b..a141f6dc5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,6 +133,8 @@ members = [ "transaction-pool", "whisper", "whisper/cli", + "util/keccak-hasher", + "util/patricia-trie-ethereum", ] [patch.crates-io] diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 4d9309260..0e449f867 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -20,6 +20,7 @@ fetch = { path = "../util/fetch" } hashdb = { path = "../util/hashdb" } memorydb = { path = "../util/memorydb" } patricia-trie = { path = "../util/patricia_trie" } +patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" } ethcore-crypto = { path = "crypto" } error-chain = { version = "0.12", default-features = false } ethcore-io = { path = "../util/io" } @@ -66,8 +67,9 @@ keccak-hash = { path = "../util/hash" } triehash = { path = "../util/triehash" } unexpected = { path = "../util/unexpected" } journaldb = { path = "../util/journaldb" } -tempdir = { version = "0.3", optional = true } +keccak-hasher = { path = "../util/keccak-hasher" } kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } +tempdir = {version="0.3", optional = true} [target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "android"))'.dependencies] hardware-wallet = { path = "../hw" } @@ -76,8 +78,8 @@ hardware-wallet = { path = "../hw" } fake-hardware-wallet = { path = "../util/fake-hardware-wallet" } [dev-dependencies] -trie-standardmap = { path = "../util/trie-standardmap" } tempdir = "0.3" +trie-standardmap = { path = "../util/trie-standardmap" } [features] # Display EVM debug traces. diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index ac3a70c3d..9435dcee4 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -14,6 +14,7 @@ ethcore-transaction = { path = "../transaction" } ethereum-types = "0.3" memorydb = { path = "../../util/memorydb" } patricia-trie = { path = "../../util/patricia_trie" } +patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } hashdb = { path = "../../util/hashdb" } @@ -32,6 +33,7 @@ serde_derive = "1.0" parking_lot = "0.5" stats = { path = "../../util/stats" } keccak-hash = { path = "../../util/hash" } +keccak-hasher = { path = "../../util/keccak-hasher" } triehash = { path = "../../util/triehash" } kvdb = { path = "../../util/kvdb" } memory-cache = { path = "../../util/memory_cache" } diff --git a/ethcore/light/src/cht.rs b/ethcore/light/src/cht.rs index 805cca3cb..18b0e5b06 100644 --- a/ethcore/light/src/cht.rs +++ b/ethcore/light/src/cht.rs @@ -26,9 +26,11 @@ use ethcore::ids::BlockId; use ethereum_types::{H256, U256}; use hashdb::HashDB; +use keccak_hasher::KeccakHasher; use memorydb::MemoryDB; use bytes::Bytes; -use trie::{self, TrieMut, TrieDBMut, Trie, TrieDB, Recorder}; +use trie::{TrieMut, Trie, Recorder}; +use ethtrie::{self, TrieDB, TrieDBMut}; use rlp::{RlpStream, Rlp}; // encode a key. @@ -50,13 +52,13 @@ pub const SIZE: u64 = 2048; /// A canonical hash trie. This is generic over any database it can query. /// See module docs for more details. #[derive(Debug, Clone)] -pub struct CHT { +pub struct CHT> { db: DB, root: H256, // the root of this CHT. number: u64, } -impl CHT { +impl> CHT { /// Query the root of the CHT. pub fn root(&self) -> H256 { self.root } @@ -66,7 +68,7 @@ impl CHT { /// Generate an inclusion proof for the entry at a specific block. /// Nodes before level `from_level` will be omitted. /// Returns an error on an incomplete trie, and `Ok(None)` on an unprovable request. - pub fn prove(&self, num: u64, from_level: u32) -> trie::Result>> { + pub fn prove(&self, num: u64, from_level: u32) -> ethtrie::Result>> { if block_to_cht_number(num) != Some(self.number) { return Ok(None) } let mut recorder = Recorder::with_depth(from_level); @@ -90,10 +92,10 @@ pub struct BlockInfo { /// Build an in-memory CHT from a closure which provides necessary information /// about blocks. If the fetcher ever fails to provide the info, the CHT /// will not be generated. -pub fn build(cht_num: u64, mut fetcher: F) -> Option> +pub fn build(cht_num: u64, mut fetcher: F) -> Option>> where F: FnMut(BlockId) -> Option { - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); // start from the last block by number and work backwards. let last_num = start_number(cht_num + 1) - 1; @@ -147,7 +149,7 @@ pub fn compute_root(cht_num: u64, iterable: I) -> Option /// verify the given trie branch and extract the canonical hash and total difficulty. // TODO: better support for partially-checked queries. pub fn check_proof(proof: &[Bytes], num: u64, root: H256) -> Option<(H256, U256)> { - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); for node in proof { db.insert(&node[..]); } let res = match TrieDB::new(&db, &root) { diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 44fb311b6..957a1ea43 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -28,28 +28,21 @@ use std::collections::BTreeMap; use std::sync::Arc; +use cache::Cache; use cht; - use ethcore::block_status::BlockStatus; -use ethcore::error::{Error, BlockImportError, BlockImportErrorKind, BlockError}; use ethcore::encoded; +use ethcore::engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; +use ethcore::error::{Error, BlockImportError, BlockImportErrorKind, BlockError}; use ethcore::header::Header; use ethcore::ids::BlockId; use ethcore::spec::{Spec, SpecHardcodedSync}; -use ethcore::engines::epoch::{ - Transition as EpochTransition, - PendingTransition as PendingEpochTransition -}; - -use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; -use heapsize::HeapSizeOf; use ethereum_types::{H256, H264, U256}; -use plain_hasher::H256FastMap; +use heapsize::HeapSizeOf; use kvdb::{DBTransaction, KeyValueDB}; - -use cache::Cache; use parking_lot::{Mutex, RwLock}; - +use plain_hasher::H256FastMap; +use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; use smallvec::SmallVec; /// Store at least this many candidate headers at all times. diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index d7469fdce..2000131a6 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -64,8 +64,10 @@ extern crate hashdb; extern crate heapsize; extern crate futures; extern crate itertools; +extern crate keccak_hasher; extern crate memorydb; extern crate patricia_trie as trie; +extern crate patricia_trie_ethereum as ethtrie; extern crate plain_hasher; extern crate rand; extern crate rlp; diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 179f70900..27d240fed 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -20,22 +20,20 @@ use transaction::UnverifiedTransaction; -use io::TimerToken; -use network::{NetworkProtocolHandler, NetworkContext, PeerId}; -use rlp::{RlpStream, Rlp}; use ethereum_types::{H256, U256}; +use io::TimerToken; use kvdb::DBValue; +use network::{NetworkProtocolHandler, NetworkContext, PeerId}; use parking_lot::{Mutex, RwLock}; -use std::time::{Duration, Instant}; - -use std::collections::{HashMap, HashSet}; -use std::fmt; -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::ops::{BitOr, BitAnd, Not}; - use provider::Provider; use request::{Request, NetworkRequests as Requests, Response}; +use rlp::{RlpStream, Rlp}; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::ops::{BitOr, BitAnd, Not}; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::time::{Duration, Instant}; use self::request_credits::{Credits, FlowParams}; use self::context::{Ctx, TickCtx}; diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index d89db173c..98b29f3e3 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -16,8 +16,8 @@ //! Peer status and capabilities. -use rlp::{DecoderError, Encodable, Decodable, RlpStream, Rlp}; use ethereum_types::{H256, U256}; +use rlp::{DecoderError, Encodable, Decodable, RlpStream, Rlp}; use super::request_credits::FlowParams; diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index e182f38b8..0c9971d75 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -19,20 +19,18 @@ use ethcore::blockchain_info::BlockChainInfo; use ethcore::client::{EachBlockWith, TestBlockChainClient}; -use ethcore::ids::BlockId; use ethcore::encoded; -use network::{PeerId, NodeId}; -use transaction::{Action, PendingTransaction}; - +use ethcore::ids::BlockId; +use ethereum_types::{H256, U256, Address}; +use net::{LightProtocol, Params, packet, Peer}; use net::context::IoContext; use net::status::{Capabilities, Status}; -use net::{LightProtocol, Params, packet, Peer}; +use network::{PeerId, NodeId}; use provider::Provider; use request; use request::*; - use rlp::{Rlp, RlpStream}; -use ethereum_types::{H256, U256, Address}; +use transaction::{Action, PendingTransaction}; use std::sync::Arc; use std::time::Instant; diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 4cac6b629..9feb0a670 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -18,26 +18,25 @@ use std::sync::Arc; +use bytes::Bytes; use ethcore::basic_account::BasicAccount; use ethcore::encoded; use ethcore::engines::{EthEngine, StateDependentProof}; use ethcore::machine::EthereumMachine; use ethcore::receipt::Receipt; use ethcore::state::{self, ProvedExecution}; -use transaction::SignedTransaction; -use vm::EnvInfo; -use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY, KECCAK_EMPTY_LIST_RLP, keccak}; - -use request::{self as net_request, IncompleteRequest, CompleteRequest, Output, OutputKind, Field}; - -use rlp::{RlpStream, Rlp}; use ethereum_types::{H256, U256, Address}; -use parking_lot::Mutex; +use ethtrie::{TrieError, TrieDB}; +use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY, KECCAK_EMPTY_LIST_RLP, keccak}; use hashdb::HashDB; use kvdb::DBValue; -use bytes::Bytes; use memorydb::MemoryDB; -use trie::{Trie, TrieDB, TrieError}; +use parking_lot::Mutex; +use request::{self as net_request, IncompleteRequest, CompleteRequest, Output, OutputKind, Field}; +use rlp::{RlpStream, Rlp}; +use transaction::SignedTransaction; +use trie::Trie; +use vm::EnvInfo; const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed"; @@ -935,11 +934,12 @@ mod tests { use ethereum_types::{H256, Address}; use memorydb::MemoryDB; use parking_lot::Mutex; - use trie::{Trie, TrieMut, SecTrieDB, SecTrieDBMut}; - use trie::recorder::Recorder; + use trie::{Trie, TrieMut}; + use ethtrie::{SecTrieDB, SecTrieDBMut}; + use trie::Recorder; use hash::keccak; - use ethcore::client::{BlockChainClient, BlockInfo, TestBlockChainClient, EachBlockWith}; + use ::ethcore::client::{BlockChainClient, BlockInfo, TestBlockChainClient, EachBlockWith}; use ethcore::header::Header; use ethcore::encoded; use ethcore::receipt::{Receipt, TransactionOutcome}; diff --git a/ethcore/private-tx/Cargo.toml b/ethcore/private-tx/Cargo.toml index e32c81bc2..d1e30fc25 100644 --- a/ethcore/private-tx/Cargo.toml +++ b/ethcore/private-tx/Cargo.toml @@ -26,6 +26,7 @@ keccak-hash = { path = "../../util/hash" } log = "0.3" parking_lot = "0.5" patricia-trie = { path = "../../util/patricia_trie" } +patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } rand = "0.3" rlp = { path = "../../util/rlp" } rlp_derive = { path = "../../util/rlp_derive" } diff --git a/ethcore/private-tx/src/error.rs b/ethcore/private-tx/src/error.rs index 0456b3305..55a75d6d9 100644 --- a/ethcore/private-tx/src/error.rs +++ b/ethcore/private-tx/src/error.rs @@ -16,7 +16,7 @@ use ethereum_types::Address; use rlp::DecoderError; -use trie::TrieError; +use ethtrie::TrieError; use ethcore::account_provider::SignError; use ethcore::error::{Error as EthcoreError, ExecutionError}; use transaction::Error as TransactionError; diff --git a/ethcore/private-tx/src/lib.rs b/ethcore/private-tx/src/lib.rs index 38285ed85..968b73be8 100644 --- a/ethcore/private-tx/src/lib.rs +++ b/ethcore/private-tx/src/lib.rs @@ -40,6 +40,7 @@ extern crate futures; extern crate keccak_hash as hash; extern crate parking_lot; extern crate patricia_trie as trie; +extern crate patricia_trie_ethereum as ethtrie; extern crate rlp; extern crate url; extern crate rustc_hex; diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index 8fc4b95c6..dd05d22cb 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -15,12 +15,13 @@ // along with Parity. If not, see . //! DB backend wrapper for Account trie -use std::collections::HashMap; -use hash::{KECCAK_NULL_RLP, keccak}; use ethereum_types::H256; +use hash::{KECCAK_NULL_RLP, keccak}; +use hashdb::{HashDB, AsHashDB}; +use keccak_hasher::KeccakHasher; use kvdb::DBValue; -use hashdb::HashDB; use rlp::NULL_RLP; +use std::collections::HashMap; #[cfg(test)] use ethereum_types::Address; @@ -44,7 +45,7 @@ fn combine_key<'a>(address_hash: &'a H256, key: &'a H256) -> H256 { /// A factory for different kinds of account dbs. #[derive(Debug, Clone)] pub enum Factory { - /// Mangle hashes based on address. + /// Mangle hashes based on address. This is the default. Mangled, /// Don't mangle hashes. Plain, @@ -57,7 +58,7 @@ impl Default for Factory { impl Factory { /// Create a read-only accountdb. /// This will panic when write operations are called. - pub fn readonly<'db>(&self, db: &'db HashDB, address_hash: H256) -> Box { + pub fn readonly<'db>(&self, db: &'db HashDB, address_hash: H256) -> Box + 'db> { match *self { Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)), Factory::Plain => Box::new(Wrapping(db)), @@ -65,7 +66,7 @@ impl Factory { } /// Create a new mutable hashdb. - pub fn create<'db>(&self, db: &'db mut HashDB, address_hash: H256) -> Box { + pub fn create<'db>(&self, db: &'db mut HashDB, address_hash: H256) -> Box + 'db> { match *self { Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)), Factory::Plain => Box::new(WrappingMut(db)), @@ -77,19 +78,19 @@ impl Factory { /// DB backend wrapper for Account trie /// Transforms trie node keys for the database pub struct AccountDB<'db> { - db: &'db HashDB, + db: &'db HashDB, address_hash: H256, } impl<'db> AccountDB<'db> { /// Create a new AccountDB from an address. #[cfg(test)] - pub fn new(db: &'db HashDB, address: &Address) -> Self { + pub fn new(db: &'db HashDB, address: &Address) -> Self { Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db HashDB, address_hash: H256) -> Self { + pub fn from_hash(db: &'db HashDB, address_hash: H256) -> Self { AccountDB { db: db, address_hash: address_hash, @@ -97,7 +98,12 @@ impl<'db> AccountDB<'db> { } } -impl<'db> HashDB for AccountDB<'db>{ +impl<'db> AsHashDB for AccountDB<'db> { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl<'db> HashDB for AccountDB<'db> { fn keys(&self) -> HashMap { unimplemented!() } @@ -131,19 +137,19 @@ impl<'db> HashDB for AccountDB<'db>{ /// DB backend wrapper for Account trie pub struct AccountDBMut<'db> { - db: &'db mut HashDB, + db: &'db mut HashDB, address_hash: H256, } impl<'db> AccountDBMut<'db> { /// Create a new AccountDB from an address. #[cfg(test)] - pub fn new(db: &'db mut HashDB, address: &Address) -> Self { + pub fn new(db: &'db mut HashDB, address: &Address) -> Self { Self::from_hash(db, keccak(address)) } /// Create a new AcountDB from an address' hash. - pub fn from_hash(db: &'db mut HashDB, address_hash: H256) -> Self { + pub fn from_hash(db: &'db mut HashDB, address_hash: H256) -> Self { AccountDBMut { db: db, address_hash: address_hash, @@ -156,7 +162,7 @@ impl<'db> AccountDBMut<'db> { } } -impl<'db> HashDB for AccountDBMut<'db>{ +impl<'db> HashDB for AccountDBMut<'db>{ fn keys(&self) -> HashMap { unimplemented!() } @@ -202,9 +208,19 @@ impl<'db> HashDB for AccountDBMut<'db>{ } } -struct Wrapping<'db>(&'db HashDB); +impl<'db> AsHashDB for AccountDBMut<'db> { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} -impl<'db> HashDB for Wrapping<'db> { +struct Wrapping<'db>(&'db HashDB); + +impl<'db> AsHashDB for Wrapping<'db> { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl<'db> HashDB for Wrapping<'db> { fn keys(&self) -> HashMap { unimplemented!() } @@ -236,9 +252,13 @@ impl<'db> HashDB for Wrapping<'db> { } } -struct WrappingMut<'db>(&'db mut HashDB); +struct WrappingMut<'db>(&'db mut HashDB); +impl<'db> AsHashDB for WrappingMut<'db> { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} -impl<'db> HashDB for WrappingMut<'db>{ +impl<'db> HashDB for WrappingMut<'db>{ fn keys(&self) -> HashMap { unimplemented!() } diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 91de4d379..ba21cb417 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -17,28 +17,27 @@ //! Blockchain block. use std::cmp; -use std::sync::Arc; use std::collections::HashSet; -use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; -use triehash::ordered_trie_root; +use std::sync::Arc; -use rlp::{Rlp, RlpStream, Encodable, Decodable, DecoderError, encode_list}; -use ethereum_types::{H256, U256, Address, Bloom}; use bytes::Bytes; -use unexpected::{Mismatch, OutOfBounds}; - -use vm::{EnvInfo, LastHashes}; use engines::EthEngine; use error::{Error, BlockError}; +use ethereum_types::{H256, U256, Address, Bloom}; use factory::Factories; +use hash::{keccak, KECCAK_NULL_RLP, KECCAK_EMPTY_LIST_RLP}; use header::{Header, ExtendedHeader}; use receipt::{Receipt, TransactionOutcome}; -use state::State; +use rlp::{Rlp, RlpStream, Encodable, Decodable, DecoderError, encode_list}; use state_db::StateDB; +use state::State; use trace::Tracing; use transaction::{UnverifiedTransaction, SignedTransaction, Error as TransactionError}; +use triehash::ordered_trie_root; +use unexpected::{Mismatch, OutOfBounds}; use verification::PreverifiedBlock; use views::BlockView; +use vm::{EnvInfo, LastHashes}; /// A block, encoded as it is on the block chain. #[derive(Default, Debug, Clone, PartialEq)] diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 58a5a06b0..b2ba886b4 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -17,37 +17,38 @@ //! Blockchain database. use std::collections::{HashMap, HashSet}; +use std::{mem, io}; use std::path::Path; use std::sync::Arc; -use std::{mem, io}; -use itertools::Itertools; -use blooms_db; -use heapsize::HeapSizeOf; -use ethereum_types::{H256, Bloom, BloomRef, U256}; -use parking_lot::{Mutex, RwLock}; -use bytes::Bytes; -use rlp::RlpStream; -use rlp_compress::{compress, decompress, blocks_swapper}; -use header::*; -use transaction::*; -use views::{BlockView, HeaderView}; -use log_entry::{LogEntry, LocalizedLogEntry}; -use receipt::Receipt; + +use ansi_term::Colour; +use blockchain::{CacheSize, ImportRoute, Config}; use blockchain::best_block::{BestBlock, BestAncientBlock}; use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData}; use blockchain::extras::{BlockReceipts, BlockDetails, TransactionAddress, EPOCH_KEY_PREFIX, EpochTransitions}; +use blockchain::update::{ExtrasUpdate, ExtrasInsert}; +use blooms_db; +use bytes::Bytes; +use cache_manager::CacheManager; +use db::{self, Writable, Readable, CacheUpdatePolicy}; +use encoded; +use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; +use engines::ForkChoice; +use ethereum_types::{H256, Bloom, BloomRef, U256}; +use header::*; +use heapsize::HeapSizeOf; +use itertools::Itertools; +use kvdb::{DBTransaction, KeyValueDB}; +use log_entry::{LogEntry, LocalizedLogEntry}; +use parking_lot::{Mutex, RwLock}; +use rayon::prelude::*; +use receipt::Receipt; +use rlp_compress::{compress, decompress, blocks_swapper}; +use rlp::RlpStream; +use transaction::*; use types::blockchain_info::BlockChainInfo; use types::tree_route::TreeRoute; -use blockchain::update::{ExtrasUpdate, ExtrasInsert}; -use blockchain::{CacheSize, ImportRoute, Config}; -use db::{self, Writable, Readable, CacheUpdatePolicy}; -use cache_manager::CacheManager; -use encoded; -use engines::ForkChoice; -use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition}; -use rayon::prelude::*; -use ansi_term::Colour; -use kvdb::{DBTransaction, KeyValueDB}; +use views::{BlockView, HeaderView}; /// Database backing `BlockChain`. pub trait BlockChainDB: Send + Sync { diff --git a/ethcore/src/blockchain/extras.rs b/ethcore/src/blockchain/extras.rs index 6b54fee2e..bbf67374a 100644 --- a/ethcore/src/blockchain/extras.rs +++ b/ethcore/src/blockchain/extras.rs @@ -16,18 +16,18 @@ //! Blockchain DB extras. -use std::ops; use std::io::Write; +use std::ops; + use db::Key; use engines::epoch::{Transition as EpochTransition}; +use ethereum_types::{H256, H264, U256}; use header::BlockNumber; +use heapsize::HeapSizeOf; +use kvdb::PREFIX_LEN as DB_PREFIX_LEN; use receipt::Receipt; use rlp; -use heapsize::HeapSizeOf; -use ethereum_types::{H256, H264, U256}; -use kvdb::PREFIX_LEN as DB_PREFIX_LEN; - /// Represents index of extra data in database #[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] pub enum ExtrasIndex { @@ -252,6 +252,7 @@ pub struct EpochTransitions { #[cfg(test)] mod tests { use rlp::*; + use super::BlockReceipts; #[test] diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 37dd71b9b..9d68f0841 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1713,7 +1713,7 @@ impl BlockChainClient for Client { fn list_storage(&self, id: BlockId, account: &Address, after: Option<&H256>, count: u64) -> Option> { if !self.factories.trie.is_fat() { - trace!(target: "fatdb", "list_stroage: Not a fat DB"); + trace!(target: "fatdb", "list_storage: Not a fat DB"); return None; } diff --git a/ethcore/src/client/error.rs b/ethcore/src/client/error.rs index 803499b9c..d40fd261c 100644 --- a/ethcore/src/client/error.rs +++ b/ethcore/src/client/error.rs @@ -16,7 +16,7 @@ use std::fmt::{Display, Formatter, Error as FmtError}; use util_error::UtilError; -use trie::TrieError; +use ethtrie::TrieError; /// Client configuration errors. #[derive(Debug)] diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index eccb9971a..f11cae3b0 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -25,12 +25,13 @@ use {state, state_db, client, executive, trace, transaction, db, spec, pod_state use factory::Factories; use evm::{VMType, FinalizationResult}; use vm::{self, ActionParams}; +use ethtrie; /// EVM test Error. #[derive(Debug)] pub enum EvmTestError { /// Trie integrity error. - Trie(trie::TrieError), + Trie(Box), /// EVM error. Evm(vm::Error), /// Initialization error. diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 620f1af33..3cb4660fe 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -52,7 +52,6 @@ use miner::{self, Miner, MinerService}; use spec::Spec; use types::basic_account::BasicAccount; use types::pruning_info::PruningInfo; - use verification::queue::QueueInfo; use block::{OpenBlock, SealedBlock, ClosedBlock}; use executive::Executed; @@ -62,7 +61,7 @@ use state_db::StateDB; use header::Header; use encoded; use engines::EthEngine; -use trie; +use ethtrie; use state::StateInfo; use views::BlockView; @@ -581,10 +580,10 @@ impl Call for TestBlockChainClient { } impl StateInfo for () { - fn nonce(&self, _address: &Address) -> trie::Result { unimplemented!() } - fn balance(&self, _address: &Address) -> trie::Result { unimplemented!() } - fn storage_at(&self, _address: &Address, _key: &H256) -> trie::Result { unimplemented!() } - fn code(&self, _address: &Address) -> trie::Result>> { unimplemented!() } + fn nonce(&self, _address: &Address) -> ethtrie::Result { unimplemented!() } + fn balance(&self, _address: &Address) -> ethtrie::Result { unimplemented!() } + fn storage_at(&self, _address: &Address, _key: &H256) -> ethtrie::Result { unimplemented!() } + fn code(&self, _address: &Address) -> ethtrie::Result>> { unimplemented!() } } impl StateClient for TestBlockChainClient { diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index dee9c7602..378b86c3e 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -34,12 +34,9 @@ use ethjson; use machine::{AuxiliaryData, Call, EthereumMachine}; use hash::keccak; use header::{Header, BlockNumber, ExtendedHeader}; - use super::signer::EngineSigner; use super::validator_set::{ValidatorSet, SimpleList, new_validator_set}; - use self::finality::RollingFinality; - use ethkey::{self, Password, Signature}; use io::{IoContext, IoHandler, TimerToken, IoService}; use itertools::{self, Itertools}; diff --git a/ethcore/src/engines/tendermint/message.rs b/ethcore/src/engines/tendermint/message.rs index d7a08b303..7d76c32a2 100644 --- a/ethcore/src/engines/tendermint/message.rs +++ b/ethcore/src/engines/tendermint/message.rs @@ -16,15 +16,15 @@ //! Tendermint message handling. -use std::cmp; -use hash::keccak; -use ethereum_types::{H256, H520, Address}; use bytes::Bytes; -use super::{Height, View, BlockHash, Step}; use error::Error; +use ethereum_types::{H256, H520, Address}; +use ethkey::{recover, public_to_address}; +use hash::keccak; use header::Header; use rlp::{Rlp, RlpStream, Encodable, Decodable, DecoderError}; -use ethkey::{recover, public_to_address}; +use std::cmp; +use super::{Height, View, BlockHash, Step}; use super::super::vote_collector::Message; /// Message transmitted between consensus participants. diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 26b1cfd98..c95d61aa7 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -145,7 +145,7 @@ impl super::EpochVerifier for EpochVerifier fn check_finality_proof(&self, proof: &[u8]) -> Option> { match ::rlp::decode(proof) { Ok(header) => self.verify_light(&header).ok().map(|_| vec![header.hash()]), - Err(_) => None // REVIEW: log perhaps? Not sure what the policy is. + Err(_) => None } } } diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/ethcore/src/engines/validator_set/safe_contract.rs index 2cecac8bd..a7f4f2c73 100644 --- a/ethcore/src/engines/validator_set/safe_contract.rs +++ b/ethcore/src/engines/validator_set/safe_contract.rs @@ -16,27 +16,23 @@ /// Validator set maintained in a contract, updated using `getValidators` method. -use std::sync::{Weak, Arc}; -use hash::keccak; - -use ethereum_types::{H256, U256, Address, Bloom}; -use parking_lot::RwLock; - use bytes::Bytes; -use memory_cache::MemoryLruCache; -use unexpected::Mismatch; -use rlp::{Rlp, RlpStream}; -use kvdb::DBValue; - use client::EngineClient; -use machine::{AuxiliaryData, Call, EthereumMachine, AuxiliaryRequest}; +use ethereum_types::{H256, U256, Address, Bloom}; +use hash::keccak; use header::Header; use ids::BlockId; +use kvdb::DBValue; use log_entry::LogEntry; +use machine::{AuxiliaryData, Call, EthereumMachine, AuxiliaryRequest}; +use memory_cache::MemoryLruCache; +use parking_lot::RwLock; use receipt::Receipt; - +use rlp::{Rlp, RlpStream}; +use std::sync::{Weak, Arc}; use super::{SystemCall, ValidatorSet}; use super::simple_list::SimpleList; +use unexpected::Mismatch; use_contract!(validator_set, "ValidatorSet", "res/contracts/validator_set.json"); diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index bc8d6cc33..020eea8a6 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -22,7 +22,7 @@ use ethereum_types::{H256, U256, Address, Bloom}; use util_error::{self, UtilError}; use snappy::InvalidInput; use unexpected::{Mismatch, OutOfBounds}; -use trie::TrieError; +use ethtrie::TrieError; use io::*; use header::BlockNumber; use client::Error as ClientError; diff --git a/ethcore/src/executed.rs b/ethcore/src/executed.rs index 3d0b9767c..2dc0c041b 100644 --- a/ethcore/src/executed.rs +++ b/ethcore/src/executed.rs @@ -18,7 +18,7 @@ use ethereum_types::{U256, U512, Address}; use bytes::Bytes; -use trie; +use ethtrie; use vm; use trace::{VMTrace, FlatTrace}; use log_entry::LogEntry; @@ -117,9 +117,14 @@ pub enum ExecutionError { TransactionMalformed(String), } -impl From> for ExecutionError { - fn from(err: Box) -> Self { - ExecutionError::Internal(format!("{}", err)) +impl From> for ExecutionError { + fn from(err: Box) -> Self { + ExecutionError::Internal(format!("{:?}", err)) + } +} +impl From for ExecutionError { + fn from(err: ethtrie::TrieError) -> Self { + ExecutionError::Internal(format!("{:?}", err)) } } diff --git a/ethcore/src/factory.rs b/ethcore/src/factory.rs index b429073b3..2eff7d760 100644 --- a/ethcore/src/factory.rs +++ b/ethcore/src/factory.rs @@ -15,10 +15,12 @@ // along with Parity. If not, see . use trie::TrieFactory; +use ethtrie::RlpCodec; use account_db::Factory as AccountFactory; use evm::{Factory as EvmFactory, VMType}; use vm::{Vm, ActionParams, Schedule}; use wasm::WasmInterpreter; +use keccak_hasher::KeccakHasher; const WASM_MAGIC_NUMBER: &'static [u8; 4] = b"\0asm"; @@ -54,7 +56,7 @@ pub struct Factories { /// factory for evm. pub vm: VmFactory, /// factory for tries. - pub trie: TrieFactory, + pub trie: TrieFactory, /// factory for account databases. pub accountdb: AccountFactory, } diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 86c6d6efc..daeed18eb 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -31,7 +31,7 @@ use ethjson; use trace::{Tracer, NoopTracer}; use trace::{VMTracer, NoopVMTracer}; use bytes::{Bytes, BytesRef}; -use trie; +use ethtrie; use rlp::RlpStream; use hash::keccak; use machine::EthereumMachine as Machine; @@ -93,7 +93,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> address: Address, tracer: &'a mut T, vm_tracer: &'a mut V, - ) -> trie::Result { + ) -> ethtrie::Result { let static_call = false; Ok(TestExt { nonce: state.nonce(&address)?, diff --git a/ethcore/src/json_tests/trie.rs b/ethcore/src/json_tests/trie.rs index 81bc5bcc0..0466c4de1 100644 --- a/ethcore/src/json_tests/trie.rs +++ b/ethcore/src/json_tests/trie.rs @@ -16,8 +16,10 @@ use ethjson; use trie::{TrieFactory, TrieSpec}; +use ethtrie::RlpCodec; use ethereum_types::H256; use memorydb::MemoryDB; +use keccak_hasher::KeccakHasher; use super::HookType; @@ -28,13 +30,13 @@ pub use self::secure::run_test_file as run_secure_test_file; fn test_trie(json: &[u8], trie: TrieSpec, start_stop_hook: &mut H) -> Vec { let tests = ethjson::trie::Test::load(json).unwrap(); - let factory = TrieFactory::new(trie); + let factory = TrieFactory::<_, RlpCodec>::new(trie); let mut result = vec![]; for (name, test) in tests.into_iter() { start_stop_hook(&name, HookType::OnStart); - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::default(); let mut t = factory.create(&mut memdb, &mut root); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index f52afdcad..3663a1cc9 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -91,9 +91,11 @@ extern crate rayon; extern crate rlp; extern crate rlp_compress; extern crate keccak_hash as hash; +extern crate keccak_hasher; extern crate heapsize; extern crate memorydb; extern crate patricia_trie as trie; +extern crate patricia_trie_ethereum as ethtrie; extern crate triehash; extern crate ansi_term; extern crate unexpected; diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 281299b3b..f35f05177 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -20,9 +20,11 @@ use itertools::Itertools; use hash::{keccak}; use ethereum_types::{H256, U256}; use hashdb::HashDB; +use keccak_hasher::KeccakHasher; use triehash::sec_trie_root; use bytes::Bytes; use trie::TrieFactory; +use ethtrie::RlpCodec; use state::Account; use ethjson; use types::account_diff::*; @@ -65,7 +67,7 @@ impl PodAccount { } /// Place additional data into given hash DB. - pub fn insert_additional(&self, db: &mut HashDB, factory: &TrieFactory) { + pub fn insert_additional(&self, db: &mut HashDB, factory: &TrieFactory) { match self.code { Some(ref c) if !c.is_empty() => { db.insert(c); } _ => {} diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 71c10068f..13ab4b227 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -18,16 +18,15 @@ use account_db::{AccountDB, AccountDBMut}; use basic_account::BasicAccount; -use snapshot::Error; -use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; - -use ethereum_types::{H256, U256}; -use hashdb::HashDB; use bytes::Bytes; -use trie::{TrieDB, Trie}; +use ethereum_types::{H256, U256}; +use ethtrie::{TrieDB, TrieDBMut}; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; +use hashdb::HashDB; use rlp::{RlpStream, Rlp}; - +use snapshot::Error; use std::collections::HashSet; +use trie::{Trie, TrieMut}; // An empty account -- these were replaced with RLP null data for a space optimization in v1. const ACC_EMPTY: BasicAccount = BasicAccount { @@ -151,7 +150,6 @@ pub fn from_fat_rlp( rlp: Rlp, mut storage_root: H256, ) -> Result<(BasicAccount, Option), Error> { - use trie::{TrieDBMut, TrieMut}; // check for special case of empty account. if rlp.is_empty() { diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index cec2e491a..3a18015e0 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -19,7 +19,6 @@ use block::Block; use header::Header; use hash::keccak; - use views::BlockView; use rlp::{DecoderError, RlpStream, Rlp}; use ethereum_types::H256; diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 7adaf8340..8ae4cc33c 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -38,6 +38,7 @@ use ethereum_types::{H256, U256}; use kvdb::KeyValueDB; use bytes::Bytes; + /// Snapshot creation and restoration for PoA chains. /// Chunk format: /// diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 36fb0927a..527b4e288 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -21,7 +21,7 @@ use std::fmt; use ids::BlockId; use ethereum_types::H256; -use trie::TrieError; +use ethtrie::TrieError; use rlp::DecoderError; /// Snapshot-related errors. diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 30a61b779..f4b9bf699 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -32,13 +32,15 @@ use ids::BlockId; use ethereum_types::{H256, U256}; use hashdb::HashDB; +use keccak_hasher::KeccakHasher; use kvdb::DBValue; use snappy; use bytes::Bytes; use parking_lot::Mutex; use journaldb::{self, Algorithm, JournalDB}; use kvdb::KeyValueDB; -use trie::{TrieDB, TrieDBMut, Trie, TrieMut}; +use trie::{Trie, TrieMut}; +use ethtrie::{TrieDB, TrieDBMut}; use rlp::{RlpStream, Rlp}; use bloom_journal::Bloom; @@ -126,7 +128,7 @@ pub fn take_snapshot( engine: &EthEngine, chain: &BlockChain, block_at: H256, - state_db: &HashDB, + state_db: &HashDB, writer: W, p: &Progress ) -> Result<(), Error> { @@ -264,7 +266,7 @@ impl<'a> StateChunker<'a> { /// /// Returns a list of hashes of chunks created, or any error it may /// have encountered. -pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex, progress: &'a Progress) -> Result, Error> { +pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex, progress: &'a Progress) -> Result, Error> { let account_trie = TrieDB::new(db, &root)?; let mut chunker = StateChunker { @@ -414,7 +416,7 @@ struct RebuiltStatus { // rebuild a set of accounts and their storage. // returns a status detailing newly-loaded code and accounts missing code. fn rebuild_accounts( - db: &mut HashDB, + db: &mut HashDB, account_fat_rlps: Rlp, out_chunk: &mut [(H256, Bytes)], known_code: &HashMap, diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index f01cad45c..19f50e946 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -36,8 +36,10 @@ use rand::Rng; use kvdb::DBValue; use ethereum_types::H256; use hashdb::HashDB; +use keccak_hasher::KeccakHasher; use journaldb; -use trie::{SecTrieDBMut, TrieMut, TrieDB, TrieDBMut, Trie}; +use trie::{TrieMut, Trie}; +use ethtrie::{SecTrieDBMut, TrieDB, TrieDBMut}; use self::trie_standardmap::{Alphabet, StandardMap, ValueMode}; // the proportion of accounts we will alter each tick. @@ -60,7 +62,7 @@ impl StateProducer { /// Tick the state producer. This alters the state, writing new data into /// the database. - pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { + pub fn tick(&mut self, rng: &mut R, db: &mut HashDB) { // modify existing accounts. let mut accounts_to_modify: Vec<_> = { let trie = TrieDB::new(&*db, &self.state_root).unwrap(); @@ -129,7 +131,7 @@ pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) { } /// Compare two state dbs. -pub fn compare_dbs(one: &HashDB, two: &HashDB) { +pub fn compare_dbs(one: &HashDB, two: &HashDB) { let keys = one.keys(); for key in keys.keys() { diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index a7a40e6a3..359b84b1e 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -23,10 +23,11 @@ use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; use ethereum_types::{H256, U256, Address}; use error::Error; use hashdb::HashDB; +use keccak_hasher::KeccakHasher; use kvdb::DBValue; use bytes::{Bytes, ToPretty}; -use trie; -use trie::{SecTrieDB, Trie, TrieFactory, TrieError}; +use trie::{Trie, Recorder}; +use ethtrie::{TrieFactory, TrieDB, SecTrieDB, Result as TrieResult}; use pod_account::*; use rlp::{RlpStream, encode}; use lru_cache::LruCache; @@ -199,7 +200,7 @@ impl Account { /// Get (and cache) the contents of the trie's storage at `key`. /// Takes modified storage into account. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> trie::Result { + pub fn storage_at(&self, db: &HashDB, key: &H256) -> TrieResult { if let Some(value) = self.cached_storage_at(key) { return Ok(value); } @@ -278,7 +279,7 @@ impl Account { } /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. - pub fn cache_code(&mut self, db: &HashDB) -> Option> { + pub fn cache_code(&mut self, db: &HashDB) -> Option> { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); @@ -307,7 +308,7 @@ impl Account { } /// Provide a database to get `code_size`. Should not be called if it is a contract without code. - pub fn cache_code_size(&mut self, db: &HashDB) -> bool { + pub fn cache_code_size(&mut self, db: &HashDB) -> bool { // TODO: fill out self.code_cache; trace!("Account::cache_code_size: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.code_size.is_some() || @@ -374,7 +375,7 @@ impl Account { } /// Commit the `storage_changes` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> trie::Result<()> { + pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) -> TrieResult<()> { let mut t = trie_factory.from_existing(db, &mut self.storage_root)?; for (k, v) in self.storage_changes.drain() { // cast key and value to trait type, @@ -390,7 +391,7 @@ impl Account { } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. - pub fn commit_code(&mut self, db: &mut HashDB) { + pub fn commit_code(&mut self, db: &mut HashDB) { trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_filth == Filth::Dirty, self.code_cache.is_empty()); match (self.code_filth == Filth::Dirty, self.code_cache.is_empty()) { (true, true) => { @@ -472,10 +473,7 @@ impl Account { /// trie. /// `storage_key` is the hash of the desired storage key, meaning /// this will only work correctly under a secure trie. - pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> Result<(Vec, H256), Box> { - use trie::{Trie, TrieDB}; - use trie::recorder::Recorder; - + pub fn prove_storage(&self, db: &HashDB, storage_key: H256) -> TrieResult<(Vec, H256)> { let mut recorder = Recorder::new(); let trie = TrieDB::new(db, &self.storage_root)?; diff --git a/ethcore/src/state/backend.rs b/ethcore/src/state/backend.rs index 6b2e21cb4..d07124d8d 100644 --- a/ethcore/src/state/backend.rs +++ b/ethcore/src/state/backend.rs @@ -29,14 +29,15 @@ use parking_lot::Mutex; use ethereum_types::{Address, H256}; use memorydb::MemoryDB; use hashdb::{AsHashDB, HashDB, DBValue}; +use keccak_hasher::KeccakHasher; /// State backend. See module docs for more details. pub trait Backend: Send { /// Treat the backend as a read-only hashdb. - fn as_hashdb(&self) -> &HashDB; + fn as_hashdb(&self) -> &HashDB; /// Treat the backend as a writeable hashdb. - fn as_hashdb_mut(&mut self) -> &mut HashDB; + fn as_hashdb_mut(&mut self) -> &mut HashDB; /// Add an account entry to the cache. fn add_to_account_cache(&mut self, addr: Address, data: Option, modified: bool); @@ -75,18 +76,18 @@ pub trait Backend: Send { // TODO: when account lookup moved into backends, this won't rely as tenuously on intended // usage. #[derive(Clone, PartialEq)] -pub struct ProofCheck(MemoryDB); +pub struct ProofCheck(MemoryDB); impl ProofCheck { /// Create a new `ProofCheck` backend from the given state items. pub fn new(proof: &[DBValue]) -> Self { - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); for item in proof { db.insert(item); } ProofCheck(db) } } -impl HashDB for ProofCheck { +impl HashDB for ProofCheck { fn keys(&self) -> HashMap { self.0.keys() } fn get(&self, key: &H256) -> Option { self.0.get(key) @@ -107,9 +108,14 @@ impl HashDB for ProofCheck { fn remove(&mut self, _key: &H256) { } } +impl AsHashDB for ProofCheck { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + impl Backend for ProofCheck { - fn as_hashdb(&self) -> &HashDB { self } - fn as_hashdb_mut(&mut self) -> &mut HashDB { self } + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } fn add_to_account_cache(&mut self, _addr: Address, _data: Option, _modified: bool) {} fn cache_code(&self, _hash: H256, _code: Arc>) {} fn get_cached_account(&self, _addr: &Address) -> Option> { None } @@ -128,13 +134,18 @@ impl Backend for ProofCheck { /// The proof-of-execution can be extracted with `extract_proof`. /// /// This doesn't cache anything or rely on the canonical state caches. -pub struct Proving { +pub struct Proving> { base: H, // state we're proving values from. - changed: MemoryDB, // changed state via insertions. + changed: MemoryDB, // changed state via insertions. proof: Mutex>, } -impl HashDB for Proving { +impl + Send + Sync> AsHashDB for Proving { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl + Send + Sync> HashDB for Proving { fn keys(&self) -> HashMap { let mut keys = self.base.as_hashdb().keys(); keys.extend(self.changed.keys()); @@ -171,14 +182,10 @@ impl HashDB for Proving { } } -impl Backend for Proving { - fn as_hashdb(&self) -> &HashDB { - self - } +impl + Send + Sync> Backend for Proving { + fn as_hashdb(&self) -> &HashDB { self } - fn as_hashdb_mut(&mut self) -> &mut HashDB { - self - } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } fn add_to_account_cache(&mut self, _: Address, _: Option, _: bool) { } @@ -197,13 +204,13 @@ impl Backend for Proving { fn is_known_null(&self, _: &Address) -> bool { false } } -impl Proving { +impl> Proving { /// Create a new `Proving` over a base database. /// This will store all values ever fetched from that base. pub fn new(base: H) -> Self { Proving { base: base, - changed: MemoryDB::new(), + changed: MemoryDB::::new(), proof: Mutex::new(HashSet::new()), } } @@ -215,7 +222,7 @@ impl Proving { } } -impl Clone for Proving { +impl + Clone> Clone for Proving { fn clone(&self) -> Self { Proving { base: self.base.clone(), @@ -229,12 +236,12 @@ impl Clone for Proving { /// it. Doesn't cache anything. pub struct Basic(pub H); -impl Backend for Basic { - fn as_hashdb(&self) -> &HashDB { +impl + Send + Sync> Backend for Basic { + fn as_hashdb(&self) -> &HashDB { self.0.as_hashdb() } - fn as_hashdb_mut(&mut self) -> &mut HashDB { + fn as_hashdb_mut(&mut self) -> &mut HashDB { self.0.as_hashdb_mut() } diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index ccca20b71..f785e24a3 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -44,12 +44,12 @@ use factory::VmFactory; use ethereum_types::{H256, U256, Address}; use hashdb::{HashDB, AsHashDB}; +use keccak_hasher::KeccakHasher; use kvdb::DBValue; use bytes::Bytes; -use trie; -use trie::{Trie, TrieError, TrieDB}; -use trie::recorder::Recorder; +use trie::{Trie, TrieError, Recorder}; +use ethtrie::{TrieDB, Result as TrieResult}; mod account; mod substate; @@ -225,7 +225,7 @@ pub fn check_proof( /// Prove a transaction on the given state. /// Returns `None` when the transacion could not be proved, /// and a proof otherwise. -pub fn prove_transaction( +pub fn prove_transaction + Send + Sync>( db: H, root: H256, transaction: &SignedTransaction, @@ -336,23 +336,23 @@ pub enum CleanupMode<'a> { /// Provides subset of `State` methods to query state information pub trait StateInfo { /// Get the nonce of account `a`. - fn nonce(&self, a: &Address) -> trie::Result; + fn nonce(&self, a: &Address) -> TrieResult; /// Get the balance of account `a`. - fn balance(&self, a: &Address) -> trie::Result; + fn balance(&self, a: &Address) -> TrieResult; /// Mutate storage of account `address` so that it is `value` for `key`. - fn storage_at(&self, address: &Address, key: &H256) -> trie::Result; + fn storage_at(&self, address: &Address, key: &H256) -> TrieResult; /// Get accounts' code. - fn code(&self, a: &Address) -> trie::Result>>; + fn code(&self, a: &Address) -> TrieResult>>; } impl StateInfo for State { - fn nonce(&self, a: &Address) -> trie::Result { State::nonce(self, a) } - fn balance(&self, a: &Address) -> trie::Result { State::balance(self, a) } - fn storage_at(&self, address: &Address, key: &H256) -> trie::Result { State::storage_at(self, address, key) } - fn code(&self, address: &Address) -> trie::Result>> { State::code(self, address) } + fn nonce(&self, a: &Address) -> TrieResult { State::nonce(self, a) } + fn balance(&self, a: &Address) -> TrieResult { State::balance(self, a) } + fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { State::storage_at(self, address, key) } + fn code(&self, address: &Address) -> TrieResult>> { State::code(self, address) } } const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \ @@ -379,9 +379,9 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: B, root: H256, account_start_nonce: U256, factories: Factories) -> Result, TrieError> { + pub fn from_existing(db: B, root: H256, account_start_nonce: U256, factories: Factories) -> TrieResult> { if !db.as_hashdb().contains(&root) { - return Err(TrieError::InvalidStateRoot(root)); + return Err(Box::new(TrieError::InvalidStateRoot(root))); } let state = State { @@ -481,7 +481,7 @@ impl State { } /// Destroy the current object and return single account data. - pub fn into_account(self, account: &Address) -> trie::Result<(Option>, HashMap)> { + pub fn into_account(self, account: &Address) -> TrieResult<(Option>, HashMap)> { // TODO: deconstruct without cloning. let account = self.require(account, true)?; Ok((account.code().clone(), account.storage_changes().clone())) @@ -504,43 +504,43 @@ impl State { } /// Determine whether an account exists. - pub fn exists(&self, a: &Address) -> trie::Result { + pub fn exists(&self, a: &Address) -> TrieResult { // Bloom filter does not contain empty accounts, so it is important here to // check if account exists in the database directly before EIP-161 is in effect. self.ensure_cached(a, RequireCache::None, false, |a| a.is_some()) } /// Determine whether an account exists and if not empty. - pub fn exists_and_not_null(&self, a: &Address) -> trie::Result { + pub fn exists_and_not_null(&self, a: &Address) -> TrieResult { self.ensure_cached(a, RequireCache::None, false, |a| a.map_or(false, |a| !a.is_null())) } /// Determine whether an account exists and has code or non-zero nonce. - pub fn exists_and_has_code_or_nonce(&self, a: &Address) -> trie::Result { + pub fn exists_and_has_code_or_nonce(&self, a: &Address) -> TrieResult { self.ensure_cached(a, RequireCache::CodeSize, false, |a| a.map_or(false, |a| a.code_hash() != KECCAK_EMPTY || *a.nonce() != self.account_start_nonce)) } /// Get the balance of account `a`. - pub fn balance(&self, a: &Address) -> trie::Result { + pub fn balance(&self, a: &Address) -> TrieResult { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(U256::zero(), |account| *account.balance())) } /// Get the nonce of account `a`. - pub fn nonce(&self, a: &Address) -> trie::Result { + pub fn nonce(&self, a: &Address) -> TrieResult { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) } /// Get the storage root of account `a`. - pub fn storage_root(&self, a: &Address) -> trie::Result> { + pub fn storage_root(&self, a: &Address) -> TrieResult> { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().and_then(|account| account.storage_root().cloned())) } /// Mutate storage of account `address` so that it is `value` for `key`. - pub fn storage_at(&self, address: &Address, key: &H256) -> trie::Result { + pub fn storage_at(&self, address: &Address, key: &H256) -> TrieResult { // Storage key search and update works like this: // 1. If there's an entry for the account in the local cache check for the key and return it if found. // 2. If there's an entry for the account in the global cache check for the key or load it into that account. @@ -602,25 +602,25 @@ impl State { } /// Get accounts' code. - pub fn code(&self, a: &Address) -> trie::Result>> { + pub fn code(&self, a: &Address) -> TrieResult>> { self.ensure_cached(a, RequireCache::Code, true, |a| a.as_ref().map_or(None, |a| a.code().clone())) } /// Get an account's code hash. - pub fn code_hash(&self, a: &Address) -> trie::Result { + pub fn code_hash(&self, a: &Address) -> TrieResult { self.ensure_cached(a, RequireCache::None, true, |a| a.as_ref().map_or(KECCAK_EMPTY, |a| a.code_hash())) } /// Get accounts' code size. - pub fn code_size(&self, a: &Address) -> trie::Result> { + pub fn code_size(&self, a: &Address) -> TrieResult> { self.ensure_cached(a, RequireCache::CodeSize, true, |a| a.as_ref().and_then(|a| a.code_size())) } /// Add `incr` to the balance of account `a`. - pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) -> trie::Result<()> { + pub fn add_balance(&mut self, a: &Address, incr: &U256, cleanup_mode: CleanupMode) -> TrieResult<()> { trace!(target: "state", "add_balance({}, {}): {}", a, incr, self.balance(a)?); let is_value_transfer = !incr.is_zero(); if is_value_transfer || (cleanup_mode == CleanupMode::ForceCreate && !self.exists(a)?) { @@ -635,7 +635,7 @@ impl State { } /// Subtract `decr` from the balance of account `a`. - pub fn sub_balance(&mut self, a: &Address, decr: &U256, cleanup_mode: &mut CleanupMode) -> trie::Result<()> { + pub fn sub_balance(&mut self, a: &Address, decr: &U256, cleanup_mode: &mut CleanupMode) -> TrieResult<()> { trace!(target: "state", "sub_balance({}, {}): {}", a, decr, self.balance(a)?); if !decr.is_zero() || !self.exists(a)? { self.require(a, false)?.sub_balance(decr); @@ -647,19 +647,19 @@ impl State { } /// Subtracts `by` from the balance of `from` and adds it to that of `to`. - pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, mut cleanup_mode: CleanupMode) -> trie::Result<()> { + pub fn transfer_balance(&mut self, from: &Address, to: &Address, by: &U256, mut cleanup_mode: CleanupMode) -> TrieResult<()> { self.sub_balance(from, by, &mut cleanup_mode)?; self.add_balance(to, by, cleanup_mode)?; Ok(()) } /// Increment the nonce of account `a` by 1. - pub fn inc_nonce(&mut self, a: &Address) -> trie::Result<()> { + pub fn inc_nonce(&mut self, a: &Address) -> TrieResult<()> { self.require(a, false).map(|mut x| x.inc_nonce()) } /// Mutate storage of account `a` so that it is `value` for `key`. - pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> trie::Result<()> { + pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> TrieResult<()> { trace!(target: "state", "set_storage({}:{:x} to {:x})", a, key, value); if self.storage_at(a, &key)? != value { self.require(a, false)?.set_storage(key, value) @@ -670,13 +670,13 @@ impl State { /// Initialise the code of account `a` so that it is `code`. /// NOTE: Account should have been created with `new_contract`. - pub fn init_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + pub fn init_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.init_code(code); Ok(()) } /// Reset the code of account `a` so that it is `code`. - pub fn reset_code(&mut self, a: &Address, code: Bytes) -> trie::Result<()> { + pub fn reset_code(&mut self, a: &Address, code: Bytes) -> TrieResult<()> { self.require_or_from(a, true, || Account::new_contract(0.into(), self.account_start_nonce), |_|{})?.reset_code(code); Ok(()) } @@ -753,7 +753,7 @@ impl State { } } - fn touch(&mut self, a: &Address) -> trie::Result<()> { + fn touch(&mut self, a: &Address) -> TrieResult<()> { self.require(a, false)?; Ok(()) } @@ -809,7 +809,7 @@ impl State { } /// Remove any touched empty or dust accounts. - pub fn kill_garbage(&mut self, touched: &HashSet
, remove_empty_touched: bool, min_balance: &Option, kill_contracts: bool) -> trie::Result<()> { + pub fn kill_garbage(&mut self, touched: &HashSet
, remove_empty_touched: bool, min_balance: &Option, kill_contracts: bool) -> TrieResult<()> { let to_kill: HashSet<_> = { self.cache.borrow().iter().filter_map(|(address, ref entry)| if touched.contains(address) && // Check all touched accounts @@ -850,7 +850,7 @@ impl State { } /// Populate a PodAccount map from this state, with another state as the account and storage query. - pub fn to_pod_diff(&mut self, query: &State) -> trie::Result { + pub fn to_pod_diff(&mut self, query: &State) -> TrieResult { assert!(self.checkpoints.borrow().is_empty()); // Merge PodAccount::to_pod for cache of self and `query`. @@ -858,7 +858,7 @@ impl State { .chain(query.cache.borrow().keys().cloned()) .collect::>(); - Ok(PodState::from(all_addresses.into_iter().fold(Ok(BTreeMap::new()), |m: trie::Result<_>, address| { + Ok(PodState::from(all_addresses.into_iter().fold(Ok(BTreeMap::new()), |m: TrieResult<_>, address| { let mut m = m?; let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| { @@ -886,7 +886,7 @@ impl State { })?; if let Some((balance, nonce, storage_keys, code)) = account { - let storage = storage_keys.into_iter().fold(Ok(BTreeMap::new()), |s: trie::Result<_>, key| { + let storage = storage_keys.into_iter().fold(Ok(BTreeMap::new()), |s: TrieResult<_>, key| { let mut s = s?; s.insert(key, self.storage_at(&address, &key)?); @@ -904,14 +904,14 @@ impl State { /// Returns a `StateDiff` describing the difference from `orig` to `self`. /// Consumes self. - pub fn diff_from(&self, mut orig: State) -> trie::Result { + pub fn diff_from(&self, mut orig: State) -> TrieResult { let pod_state_post = self.to_pod(); let pod_state_pre = orig.to_pod_diff(self)?; Ok(pod_state::diff_pod(&pod_state_pre, &pod_state_post)) } // load required account data from the databases. - fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &HashDB) { + fn update_account_cache(require: RequireCache, account: &mut Account, state_db: &B, db: &HashDB) { if let RequireCache::None = require { return; } @@ -943,7 +943,7 @@ impl State { /// Check caches for required data /// First searches for account in the local, then the shared cache. /// Populates local cache if nothing found. - fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> trie::Result + fn ensure_cached(&self, a: &Address, require: RequireCache, check_null: bool, f: F) -> TrieResult where F: Fn(Option<&Account>) -> U { // check local cache first if let Some(ref mut maybe_acc) = self.cache.borrow_mut().get_mut(a) { @@ -984,13 +984,13 @@ impl State { } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. - fn require<'a>(&'a self, a: &Address, require_code: bool) -> trie::Result> { + fn require<'a>(&'a self, a: &Address, require_code: bool) -> TrieResult> { self.require_or_from(a, require_code, || Account::new_basic(0u8.into(), self.account_start_nonce), |_|{}) } /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. /// If it doesn't exist, make account equal the evaluation of `default`. - fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> trie::Result> + fn require_or_from<'a, F, G>(&'a self, a: &Address, require_code: bool, default: F, not_default: G) -> TrieResult> where F: FnOnce() -> Account, G: FnOnce(&mut Account), { let contains_key = self.cache.borrow().contains_key(a); @@ -1037,7 +1037,7 @@ impl State { } /// Replace account code and storage. Creates account if it does not exist. - pub fn patch_account(&self, a: &Address, code: Arc, storage: HashMap) -> trie::Result<()> { + pub fn patch_account(&self, a: &Address, code: Arc, storage: HashMap) -> TrieResult<()> { Ok(self.require(a, false)?.reset_code_and_storage(code, storage)) } } @@ -1049,7 +1049,7 @@ impl State { /// If the account doesn't exist in the trie, prove that and return defaults. /// Requires a secure trie to be used for accurate results. /// `account_key` == keccak(address) - pub fn prove_account(&self, account_key: H256) -> trie::Result<(Vec, BasicAccount)> { + pub fn prove_account(&self, account_key: H256) -> TrieResult<(Vec, BasicAccount)> { let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let maybe_account: Option = { @@ -1074,7 +1074,7 @@ impl State { /// Requires a secure trie to be used for correctness. /// `account_key` == keccak(address) /// `storage_key` == keccak(key) - pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> trie::Result<(Vec, H256)> { + pub fn prove_storage(&self, account_key: H256, storage_key: H256) -> TrieResult<(Vec, H256)> { // TODO: probably could look into cache somehow but it's keyed by // address, not keccak(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index c3704828c..95cf73328 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -16,22 +16,23 @@ //! State database abstraction. For more info, see the doc for `StateDB` -use std::collections::{VecDeque, HashSet}; -use std::sync::Arc; -use lru_cache::LruCache; -use memory_cache::MemoryLruCache; +use bloom_journal::{Bloom, BloomJournal}; +use byteorder::{LittleEndian, ByteOrder}; +use db::COL_ACCOUNT_BLOOM; +use ethereum_types::{H256, Address}; +use hash::keccak; +use hashdb::HashDB; +use keccak_hasher::KeccakHasher; +use header::BlockNumber; use journaldb::JournalDB; use kvdb::{KeyValueDB, DBTransaction}; -use ethereum_types::{H256, Address}; -use hashdb::HashDB; -use state::{self, Account}; -use header::BlockNumber; -use hash::keccak; +use lru_cache::LruCache; +use memory_cache::MemoryLruCache; use parking_lot::Mutex; +use state::{self, Account}; +use std::collections::{VecDeque, HashSet}; +use std::sync::Arc; use util_error::UtilError; -use bloom_journal::{Bloom, BloomJournal}; -use db::COL_ACCOUNT_BLOOM; -use byteorder::{LittleEndian, ByteOrder}; /// Value used to initialize bloom bitmap size. /// @@ -310,12 +311,12 @@ impl StateDB { } /// Conversion method to interpret self as `HashDB` reference - pub fn as_hashdb(&self) -> &HashDB { + pub fn as_hashdb(&self) -> &HashDB { self.db.as_hashdb() } /// Conversion method to interpret self as mutable `HashDB` reference - pub fn as_hashdb_mut(&mut self) -> &mut HashDB { + pub fn as_hashdb_mut(&mut self) -> &mut HashDB { self.db.as_hashdb_mut() } @@ -410,11 +411,9 @@ impl StateDB { } impl state::Backend for StateDB { - fn as_hashdb(&self) -> &HashDB { - self.db.as_hashdb() - } + fn as_hashdb(&self) -> &HashDB { self.db.as_hashdb() } - fn as_hashdb_mut(&mut self) -> &mut HashDB { + fn as_hashdb_mut(&mut self) -> &mut HashDB { self.db.as_hashdb_mut() } diff --git a/ethcore/sync/Cargo.toml b/ethcore/sync/Cargo.toml index 66ee56621..5ca13c3b2 100644 --- a/ethcore/sync/Cargo.toml +++ b/ethcore/sync/Cargo.toml @@ -16,10 +16,12 @@ ethcore-light = { path = "../light" } ethcore-transaction = { path = "../transaction" } ethcore = { path = ".." } ethereum-types = "0.3" -plain_hasher = { path = "../../util/plain_hasher" } +hashdb = { version = "0.2", path = "../../util/hashdb" } +plain_hasher = { version = "0.2", path = "../../util/plain_hasher" } rlp = { path = "../../util/rlp" } rustc-hex = "1.0" keccak-hash = { path = "../../util/hash" } +keccak-hasher = { path = "../../util/keccak-hasher" } triehash = { path = "../../util/triehash" } kvdb = { path = "../../util/kvdb" } macros = { path = "../../util/macros" } diff --git a/ethcore/sync/src/chain/supplier.rs b/ethcore/sync/src/chain/supplier.rs index 9b6efbacb..e8a5c93ea 100644 --- a/ethcore/sync/src/chain/supplier.rs +++ b/ethcore/sync/src/chain/supplier.rs @@ -22,6 +22,7 @@ use network::{self, PeerId}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use std::cmp; + use sync_io::SyncIo; use super::{ diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index 35483f4ec..6e49b1ce7 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -30,6 +30,7 @@ extern crate ethcore_transaction as transaction; extern crate ethcore; extern crate ethereum_types; extern crate env_logger; +extern crate hashdb; extern crate plain_hasher; extern crate rand; extern crate semver; @@ -38,6 +39,7 @@ extern crate smallvec; extern crate rlp; extern crate ipnetwork; extern crate keccak_hash as hash; +extern crate keccak_hasher; extern crate triehash; extern crate kvdb; diff --git a/ethcore/vm/Cargo.toml b/ethcore/vm/Cargo.toml index c5d31f58e..7348951d7 100644 --- a/ethcore/vm/Cargo.toml +++ b/ethcore/vm/Cargo.toml @@ -8,6 +8,7 @@ byteorder = "1.0" ethcore-bytes = { path = "../../util/bytes" } ethereum-types = "0.3" patricia-trie = { path = "../../util/patricia_trie" } +patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } log = "0.3" common-types = { path = "../types" } ethjson = { path = "../../json" } diff --git a/ethcore/vm/src/error.rs b/ethcore/vm/src/error.rs index ad23e3e02..b5e337a75 100644 --- a/ethcore/vm/src/error.rs +++ b/ethcore/vm/src/error.rs @@ -16,8 +16,8 @@ //! VM errors module -use trie; use std::fmt; +use ethtrie; /// VM errors. #[derive(Debug, Clone, PartialEq)] @@ -71,8 +71,13 @@ pub enum Error { Reverted, } -impl From> for Error { - fn from(err: Box) -> Self { +impl From> for Error { + fn from(err: Box) -> Self { + Error::Internal(format!("Internal error: {}", err)) + } +} +impl From for Error { + fn from(err: ethtrie::TrieError) -> Self { Error::Internal(format!("Internal error: {}", err)) } } diff --git a/ethcore/vm/src/lib.rs b/ethcore/vm/src/lib.rs index 0dc1b7995..658420f74 100644 --- a/ethcore/vm/src/lib.rs +++ b/ethcore/vm/src/lib.rs @@ -22,6 +22,7 @@ extern crate common_types as types; extern crate ethjson; extern crate rlp; extern crate keccak_hash as hash; +extern crate patricia_trie_ethereum as ethtrie; extern crate patricia_trie as trie; mod action_params; diff --git a/util/hashdb/Cargo.toml b/util/hashdb/Cargo.toml index d4e055f9f..f5e63fb1b 100644 --- a/util/hashdb/Cargo.toml +++ b/util/hashdb/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "hashdb" -version = "0.1.1" +version = "0.2.0" authors = ["Parity Technologies "] description = "trait for hash-keyed databases." license = "GPL-3.0" [dependencies] elastic-array = "0.10" -ethereum-types = "0.3" +heapsize = "0.4" \ No newline at end of file diff --git a/util/hashdb/src/lib.rs b/util/hashdb/src/lib.rs index 182e81c5d..5961d90f9 100644 --- a/util/hashdb/src/lib.rs +++ b/util/hashdb/src/lib.rs @@ -14,65 +14,70 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Database of byte-slices keyed to their Keccak hash. +//! Database of byte-slices keyed to their hash. extern crate elastic_array; -extern crate ethereum_types; +extern crate heapsize; -use std::collections::HashMap; use elastic_array::ElasticArray128; -use ethereum_types::H256; +use heapsize::HeapSizeOf; +use std::collections::HashMap; +use std::{fmt::Debug, hash::Hash}; + +/// Trait describing an object that can hash a slice of bytes. Used to abstract +/// other types over the hashing algorithm. Defines a single `hash` method and an +/// `Out` associated type with the necessary bounds. +pub trait Hasher: Sync + Send { + /// The output type of the `Hasher` + type Out: AsRef<[u8]> + AsMut<[u8]> + Default + HeapSizeOf + Debug + PartialEq + Eq + Hash + Send + Sync + Clone + Copy; + /// What to use to build `HashMap`s with this `Hasher` + type StdHasher: Sync + Send + Default + std::hash::Hasher; + /// The length in bytes of the `Hasher` output + const LENGTH: usize; + + /// Compute the hash of the provided slice of bytes returning the `Out` type of the `Hasher` + fn hash(x: &[u8]) -> Self::Out; +} /// `HashDB` value type. pub type DBValue = ElasticArray128; -/// Trait modelling datastore keyed by a 32-byte Keccak hash. -pub trait HashDB: AsHashDB + Send + Sync { +/// Trait modelling datastore keyed by a hash defined by the `Hasher`. +pub trait HashDB: Send + Sync + AsHashDB { /// Get the keys in the database together with number of underlying references. - fn keys(&self) -> HashMap; + fn keys(&self) -> HashMap; /// Look up a given hash into the bytes that hash to it, returning None if the /// hash is not known. - fn get(&self, key: &H256) -> Option; + fn get(&self, key: &H::Out) -> Option; /// Check for the existance of a hash-key. - fn contains(&self, key: &H256) -> bool; + fn contains(&self, key: &H::Out) -> bool; /// Insert a datum item into the DB and return the datum's hash for a later lookup. Insertions /// are counted and the equivalent number of `remove()`s must be performed before the data /// is considered dead. - fn insert(&mut self, value: &[u8]) -> H256; + fn insert(&mut self, value: &[u8]) -> H::Out; - /// Like `insert()` , except you provide the key and the data is all moved. - fn emplace(&mut self, key: H256, value: DBValue); + /// Like `insert()`, except you provide the key and the data is all moved. + fn emplace(&mut self, key: H::Out, value: DBValue); /// Remove a datum previously inserted. Insertions can be "owed" such that the same number of `insert()`s may /// happen without the data being eventually being inserted into the DB. It can be "owed" more than once. - fn remove(&mut self, key: &H256); + fn remove(&mut self, key: &H::Out); } /// Upcast trait. -pub trait AsHashDB { +pub trait AsHashDB { /// Perform upcast to HashDB for anything that derives from HashDB. - fn as_hashdb(&self) -> &HashDB; + fn as_hashdb(&self) -> &HashDB; /// Perform mutable upcast to HashDB for anything that derives from HashDB. - fn as_hashdb_mut(&mut self) -> &mut HashDB; + fn as_hashdb_mut(&mut self) -> &mut HashDB; } -impl AsHashDB for T { - fn as_hashdb(&self) -> &HashDB { - self - } - fn as_hashdb_mut(&mut self) -> &mut HashDB { - self - } +// NOTE: There used to be a `impl AsHashDB for T` but that does not work with generics. See https://stackoverflow.com/questions/48432842/implementing-a-trait-for-reference-and-non-reference-types-causes-conflicting-im +// This means we need concrete impls of AsHashDB in several places, which somewhat defeats the point of the trait. +impl<'a, H: Hasher> AsHashDB for &'a mut HashDB { + fn as_hashdb(&self) -> &HashDB { &**self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { &mut **self } } -impl<'a> AsHashDB for &'a mut HashDB { - fn as_hashdb(&self) -> &HashDB { - &**self - } - - fn as_hashdb_mut(&mut self) -> &mut HashDB { - &mut **self - } -} diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml index dea70bd6a..56412533c 100644 --- a/util/journaldb/Cargo.toml +++ b/util/journaldb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "journaldb" -version = "0.1.0" +version = "0.2.0" authors = ["Parity Technologies "] description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions" license = "GPL3" @@ -8,11 +8,12 @@ license = "GPL3" [dependencies] ethcore-bytes = { path = "../bytes" } ethereum-types = "0.3" -hashdb = { path = "../hashdb" } +hashdb = { version = "0.2.0", path = "../hashdb" } heapsize = "0.4" +keccak-hasher = { path = "../keccak-hasher" } kvdb = { path = "../kvdb" } log = "0.3" -memorydb = { path = "../memorydb" } +memorydb = { version="0.2.0", path = "../memorydb" } parking_lot = "0.5" plain_hasher = { path = "../plain_hasher" } rlp = { path = "../rlp" } diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index e2d8c8007..2978e86c2 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -19,15 +19,17 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; -use rlp::{encode, decode}; -use hashdb::*; -use super::memorydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; -use traits::JournalDB; -use kvdb::{KeyValueDB, DBTransaction}; -use ethereum_types::H256; -use error::{BaseDataError, UtilError}; + use bytes::Bytes; +use error::{BaseDataError, UtilError}; +use ethereum_types::H256; +use hashdb::*; +use keccak_hasher::KeccakHasher; +use kvdb::{KeyValueDB, DBTransaction}; +use rlp::{encode, decode}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; +use super::memorydb::*; +use traits::JournalDB; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -37,7 +39,7 @@ use bytes::Bytes; /// immediately. As this is an "archive" database, nothing is ever removed. This means /// that the states of any block the node has ever processed will be accessible. pub struct ArchiveDB { - overlay: MemoryDB, + overlay: MemoryDB, backing: Arc, latest_era: Option, column: Option, @@ -62,7 +64,7 @@ impl ArchiveDB { } } -impl HashDB for ArchiveDB { +impl HashDB for ArchiveDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| (H256::from_slice(&*key), 1)) @@ -191,7 +193,7 @@ impl JournalDB for ArchiveDB { &self.backing } - fn consolidate(&mut self, with: MemoryDB) { + fn consolidate(&mut self, with: MemoryDB) { self.overlay.consolidate(with); } } diff --git a/util/journaldb/src/as_hash_db_impls.rs b/util/journaldb/src/as_hash_db_impls.rs new file mode 100644 index 000000000..bd3b0e2d7 --- /dev/null +++ b/util/journaldb/src/as_hash_db_impls.rs @@ -0,0 +1,49 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Impls of the `AsHashDB` upcast trait for all different variants of DB +use hashdb::{HashDB, AsHashDB}; +use keccak_hasher::KeccakHasher; +use archivedb::ArchiveDB; +use earlymergedb::EarlyMergeDB; +use overlayrecentdb::OverlayRecentDB; +use refcounteddb::RefCountedDB; +use overlaydb::OverlayDB; + +impl AsHashDB for ArchiveDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl AsHashDB for EarlyMergeDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl AsHashDB for OverlayRecentDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl AsHashDB for RefCountedDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + +impl AsHashDB for OverlayDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} \ No newline at end of file diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 25e078bda..bee63ae7f 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -19,17 +19,19 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; -use parking_lot::RwLock; -use heapsize::HeapSizeOf; -use rlp::{encode, decode}; + +use bytes::Bytes; +use error::{BaseDataError, UtilError}; +use ethereum_types::H256; use hashdb::*; +use heapsize::HeapSizeOf; +use keccak_hasher::KeccakHasher; +use kvdb::{KeyValueDB, DBTransaction}; use memorydb::*; +use parking_lot::RwLock; +use rlp::{encode, decode}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; -use kvdb::{KeyValueDB, DBTransaction}; -use ethereum_types::H256; -use error::{BaseDataError, UtilError}; -use bytes::Bytes; use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; #[derive(Debug, Clone, PartialEq, Eq)] @@ -105,7 +107,7 @@ enum RemoveFrom { /// /// TODO: `store_reclaim_period` pub struct EarlyMergeDB { - overlay: MemoryDB, + overlay: MemoryDB, backing: Arc, refs: Option>>>, latest_era: Option, @@ -285,7 +287,7 @@ impl EarlyMergeDB { } } -impl HashDB for EarlyMergeDB { +impl HashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| (H256::from_slice(&*key), 1)) @@ -512,7 +514,7 @@ impl JournalDB for EarlyMergeDB { Ok(ops) } - fn consolidate(&mut self, with: MemoryDB) { + fn consolidate(&mut self, with: MemoryDB) { self.overlay.consolidate(with); } } diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 7607271c8..652d89039 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -23,6 +23,7 @@ extern crate log; extern crate ethereum_types; extern crate ethcore_bytes as bytes; extern crate hashdb; +extern crate keccak_hasher; extern crate kvdb; extern crate memorydb; extern crate parking_lot; @@ -47,6 +48,7 @@ mod earlymergedb; mod overlayrecentdb; mod refcounteddb; mod util; +mod as_hash_db_impls; pub mod overlaydb; diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 46bf42c0a..1e01edfea 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -23,6 +23,7 @@ use error::{Result, BaseDataError}; use ethereum_types::H256; use rlp::{Rlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; use hashdb::*; +use keccak_hasher::KeccakHasher; use memorydb::*; use kvdb::{KeyValueDB, DBTransaction}; @@ -36,7 +37,7 @@ use kvdb::{KeyValueDB, DBTransaction}; /// queries have an immediate effect in terms of these functions. #[derive(Clone)] pub struct OverlayDB { - overlay: MemoryDB, + overlay: MemoryDB, backing: Arc, column: Option, } @@ -152,7 +153,7 @@ impl OverlayDB { } } -impl HashDB for OverlayDB { +impl HashDB for OverlayDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| { diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index c7153b889..d38f91c7c 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -19,18 +19,20 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; -use parking_lot::RwLock; -use heapsize::HeapSizeOf; -use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; -use hashdb::*; -use memorydb::*; -use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; -use kvdb::{KeyValueDB, DBTransaction}; -use super::JournalDB; -use ethereum_types::H256; -use plain_hasher::H256FastMap; -use error::{BaseDataError, UtilError}; + use bytes::Bytes; +use error::{BaseDataError, UtilError}; +use ethereum_types::H256; +use hashdb::*; +use heapsize::HeapSizeOf; +use keccak_hasher::KeccakHasher; +use kvdb::{KeyValueDB, DBTransaction}; +use memorydb::*; +use parking_lot::RwLock; +use plain_hasher::H256FastMap; +use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; +use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; +use super::JournalDB; use util::DatabaseKey; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay @@ -65,7 +67,7 @@ use util::DatabaseKey; /// 7. Delete ancient record from memory and disk. pub struct OverlayRecentDB { - transaction_overlay: MemoryDB, + transaction_overlay: MemoryDB, backing: Arc, journal_overlay: Arc>, column: Option, @@ -119,7 +121,7 @@ impl<'a> Encodable for DatabaseValueRef<'a> { #[derive(PartialEq)] struct JournalOverlay { - backing_overlay: MemoryDB, // Nodes added in the history period + backing_overlay: MemoryDB, // Nodes added in the history period pending_overlay: H256FastMap, // Nodes being transfered from backing_overlay to backing db journal: HashMap>, latest_era: Option, @@ -433,12 +435,12 @@ impl JournalDB for OverlayRecentDB { Ok(ops) } - fn consolidate(&mut self, with: MemoryDB) { + fn consolidate(&mut self, with: MemoryDB) { self.transaction_overlay.consolidate(with); } } -impl HashDB for OverlayRecentDB { +impl HashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| (H256::from_slice(&*key), 1)) diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index cc81bbfba..bca8d9305 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -18,17 +18,19 @@ use std::collections::HashMap; use std::sync::Arc; -use heapsize::HeapSizeOf; -use rlp::{encode, decode}; + +use bytes::Bytes; +use error::UtilError; +use ethereum_types::H256; use hashdb::*; -use overlaydb::OverlayDB; +use heapsize::HeapSizeOf; +use keccak_hasher::KeccakHasher; +use kvdb::{KeyValueDB, DBTransaction}; use memorydb::MemoryDB; +use overlaydb::OverlayDB; +use rlp::{encode, decode}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; -use kvdb::{KeyValueDB, DBTransaction}; -use ethereum_types::H256; -use error::UtilError; -use bytes::Bytes; use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay @@ -78,7 +80,7 @@ impl RefCountedDB { } } -impl HashDB for RefCountedDB { +impl HashDB for RefCountedDB { fn keys(&self) -> HashMap { self.forward.keys() } fn get(&self, key: &H256) -> Option { self.forward.get(key) } fn contains(&self, key: &H256) -> bool { self.forward.contains(key) } @@ -197,7 +199,7 @@ impl JournalDB for RefCountedDB { self.forward.commit_to_batch(batch) } - fn consolidate(&mut self, mut with: MemoryDB) { + fn consolidate(&mut self, mut with: MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, value.clone()); diff --git a/util/journaldb/src/traits.rs b/util/journaldb/src/traits.rs index e37ac8aab..470761614 100644 --- a/util/journaldb/src/traits.rs +++ b/util/journaldb/src/traits.rs @@ -16,16 +16,17 @@ //! Disk-backed `HashDB` implementation. -use std::sync::Arc; -use hashdb::*; -use kvdb::{self, DBTransaction}; -use ethereum_types::H256; -use error::UtilError; use bytes::Bytes; +use error::UtilError; +use ethereum_types::H256; +use hashdb::*; +use keccak_hasher::KeccakHasher; +use kvdb::{self, DBTransaction}; +use std::sync::Arc; /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. -pub trait JournalDB: HashDB { +pub trait JournalDB: HashDB { /// Return a copy of ourself, in a box. fn boxed_clone(&self) -> Box; @@ -76,7 +77,7 @@ pub trait JournalDB: HashDB { fn flush(&self) {} /// Consolidate all the insertions and deletions in the given memory overlay. - fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); + fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); /// Commit all changes in a single batch #[cfg(test)] diff --git a/util/keccak-hasher/Cargo.toml b/util/keccak-hasher/Cargo.toml new file mode 100644 index 000000000..e7c8e950e --- /dev/null +++ b/util/keccak-hasher/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "keccak-hasher" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Keccak-256 implementation of the Hasher trait" +license = "GPL-3.0" + +[dependencies] +ethereum-types = "0.3" +tiny-keccak = "1.4.2" +hashdb = { path = "../hashdb" } +plain_hasher = { path = "../plain_hasher" } \ No newline at end of file diff --git a/util/keccak-hasher/src/lib.rs b/util/keccak-hasher/src/lib.rs new file mode 100644 index 000000000..bb2b5b45f --- /dev/null +++ b/util/keccak-hasher/src/lib.rs @@ -0,0 +1,39 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Hasher implementation for the Keccak-256 hash +extern crate hashdb; +extern crate ethereum_types; +extern crate tiny_keccak; +extern crate plain_hasher; + +use hashdb::Hasher; +use ethereum_types::H256; +use tiny_keccak::Keccak; +use plain_hasher::PlainHasher; +/// Concrete `Hasher` impl for the Keccak-256 hash +#[derive(Default, Debug, Clone, PartialEq)] +pub struct KeccakHasher; +impl Hasher for KeccakHasher { + type Out = H256; + type StdHasher = PlainHasher; + const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { + let mut out = [0;32]; + Keccak::keccak256(x, &mut out); + out.into() + } +} diff --git a/util/memorydb/Cargo.toml b/util/memorydb/Cargo.toml index 41c41bb62..57d7439cf 100644 --- a/util/memorydb/Cargo.toml +++ b/util/memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "memorydb" -version = "0.1.1" +version = "0.2.0" authors = ["Parity Technologies "] description = "in-memory implementation of hashdb" license = "GPL-3.0" @@ -8,8 +8,12 @@ license = "GPL-3.0" [dependencies] elastic-array = "0.10" heapsize = "0.4" -ethereum-types = "0.3" -keccak-hash = { version = "0.1.0", path = "../hash" } -hashdb = { version = "0.1.1", path = "../hashdb" } +hashdb = { version = "0.2.0", path = "../hashdb" } plain_hasher = { path = "../plain_hasher" } rlp = { version = "0.2.1", path = "../rlp" } + +[dev-dependencies] +tiny-keccak = "1.4.2" +ethereum-types = "0.3" +keccak-hasher = { path = "../keccak-hasher" } +keccak-hash = { path = "../hash" } \ No newline at end of file diff --git a/util/memorydb/benches/memdb.rs b/util/memorydb/benches/memdb.rs new file mode 100644 index 000000000..cfc676ccd --- /dev/null +++ b/util/memorydb/benches/memdb.rs @@ -0,0 +1,79 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![feature(test)] + +extern crate hashdb; +extern crate memorydb; +extern crate keccak_hasher; +extern crate keccak_hash; +extern crate rlp; +extern crate test; + +use memorydb::MemoryDB; +use keccak_hasher::KeccakHasher; +use hashdb::{HashDB, Hasher}; +use keccak_hash::KECCAK_NULL_RLP; +use rlp::NULL_RLP; +use test::{Bencher, black_box}; + + +#[bench] +fn instantiation(b: &mut Bencher) { + b.iter(|| { + MemoryDB::::new(); + }) +} + +#[bench] +fn compare_to_null_embedded_in_struct(b: &mut Bencher) { + struct X {a_hash: ::Out}; + let x = X {a_hash: KeccakHasher::hash(&NULL_RLP)}; + let key = KeccakHasher::hash(b"abc"); + + b.iter(|| { + black_box(key == x.a_hash); + }) +} + +#[bench] +fn compare_to_null_in_const(b: &mut Bencher) { + let key = KeccakHasher::hash(b"abc"); + + b.iter(|| { + black_box(key == KECCAK_NULL_RLP); + }) +} + +#[bench] +fn contains_with_non_null_key(b: &mut Bencher) { + let mut m = MemoryDB::::new(); + let key = KeccakHasher::hash(b"abc"); + m.insert(b"abcefghijklmnopqrstuvxyz"); + b.iter(|| { + m.contains(&key); + }) +} + +#[bench] +fn contains_with_null_key(b: &mut Bencher) { + let mut m = MemoryDB::::new(); + let null_key = KeccakHasher::hash(&NULL_RLP); + m.insert(b"abcefghijklmnopqrstuvxyz"); + b.iter(|| { + m.contains(&null_key); + }) +} \ No newline at end of file diff --git a/util/memorydb/src/lib.rs b/util/memorydb/src/lib.rs index e297d1e6d..538738995 100644 --- a/util/memorydb/src/lib.rs +++ b/util/memorydb/src/lib.rs @@ -15,23 +15,25 @@ // along with Parity. If not, see . //! Reference-counted memory-based `HashDB` implementation. -extern crate heapsize; -extern crate ethereum_types; -extern crate hashdb; -extern crate keccak_hash as keccak; -extern crate plain_hasher; -extern crate rlp; extern crate elastic_array; +extern crate hashdb; +extern crate heapsize; +extern crate rlp; +#[cfg(test)] extern crate keccak_hasher; +#[cfg(test)] extern crate tiny_keccak; +#[cfg(test)] extern crate ethereum_types; -use std::mem; -use std::collections::HashMap; -use std::collections::hash_map::Entry; +use hashdb::{HashDB, Hasher as KeyHasher, DBValue, AsHashDB}; use heapsize::HeapSizeOf; -use ethereum_types::H256; -use hashdb::{HashDB, DBValue}; -use keccak::{KECCAK_NULL_RLP, keccak}; -use plain_hasher::H256FastMap; use rlp::NULL_RLP; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::hash; +use std::mem; + +// Backing `HashMap` parametrized with a `Hasher` for the keys `Hasher::Out` and the `Hasher::StdHasher` as hash map builder. +type FastMap = HashMap<::Out, T, hash::BuildHasherDefault<::StdHasher>>; + /// Reference-counted memory-based `HashDB` implementation. /// /// Use `new()` to create a new database. Insert items with `insert()`, remove items @@ -42,11 +44,14 @@ use rlp::NULL_RLP; /// # Example /// ```rust /// extern crate hashdb; +/// extern crate keccak_hasher; /// extern crate memorydb; +/// /// use hashdb::*; +/// use keccak_hasher::KeccakHasher; /// use memorydb::*; /// fn main() { -/// let mut m = MemoryDB::new(); +/// let mut m = MemoryDB::::new(); /// let d = "Hello world!".as_bytes(); /// /// let k = m.insert(d); @@ -77,15 +82,17 @@ use rlp::NULL_RLP; /// } /// ``` #[derive(Default, Clone, PartialEq)] -pub struct MemoryDB { - data: H256FastMap<(DBValue, i32)>, +pub struct MemoryDB { + data: FastMap, + hashed_null_node: H::Out, } -impl MemoryDB { +impl MemoryDB { /// Create a new instance of the memory DB. - pub fn new() -> MemoryDB { + pub fn new() -> MemoryDB { MemoryDB { - data: H256FastMap::default(), + data: FastMap::::default(), + hashed_null_node: H::hash(&NULL_RLP) } } @@ -94,11 +101,15 @@ impl MemoryDB { /// # Examples /// ```rust /// extern crate hashdb; + /// extern crate keccak_hasher; /// extern crate memorydb; + /// /// use hashdb::*; + /// use keccak_hasher::KeccakHasher; /// use memorydb::*; + /// /// fn main() { - /// let mut m = MemoryDB::new(); + /// let mut m = MemoryDB::::new(); /// let hello_bytes = "Hello world!".as_bytes(); /// let hash = m.insert(hello_bytes); /// assert!(m.contains(&hash)); @@ -116,8 +127,8 @@ impl MemoryDB { } /// Return the internal map of hashes to data, clearing the current state. - pub fn drain(&mut self) -> H256FastMap<(DBValue, i32)> { - mem::replace(&mut self.data, H256FastMap::default()) + pub fn drain(&mut self) -> FastMap { + mem::replace(&mut self.data, FastMap::::default()) } /// Grab the raw information associated with a key. Returns None if the key @@ -125,8 +136,8 @@ impl MemoryDB { /// /// Even when Some is returned, the data is only guaranteed to be useful /// when the refs > 0. - pub fn raw(&self, key: &H256) -> Option<(DBValue, i32)> { - if key == &KECCAK_NULL_RLP { + pub fn raw(&self, key: &::Out) -> Option<(DBValue, i32)> { + if key == &self.hashed_null_node { return Some((DBValue::from_slice(&NULL_RLP), 1)); } self.data.get(key).cloned() @@ -139,8 +150,8 @@ impl MemoryDB { /// Remove an element and delete it from storage if reference count reaches zero. /// If the value was purged, return the old value. - pub fn remove_and_purge(&mut self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { + pub fn remove_and_purge(&mut self, key: &::Out) -> Option { + if key == &self.hashed_null_node { return None; } match self.data.entry(key.clone()) { @@ -177,19 +188,9 @@ impl MemoryDB { } } -impl HashDB for MemoryDB { - fn get(&self, key: &H256) -> Option { - if key == &KECCAK_NULL_RLP { - return Some(DBValue::from_slice(&NULL_RLP)); - } +impl HashDB for MemoryDB { - match self.data.get(key) { - Some(&(ref d, rc)) if rc > 0 => Some(d.clone()), - _ => None - } - } - - fn keys(&self) -> HashMap { + fn keys(&self) -> HashMap { self.data.iter() .filter_map(|(k, v)| if v.1 != 0 { Some((*k, v.1)) @@ -199,8 +200,19 @@ impl HashDB for MemoryDB { .collect() } - fn contains(&self, key: &H256) -> bool { - if key == &KECCAK_NULL_RLP { + fn get(&self, key: &H::Out) -> Option { + if key == &self.hashed_null_node { + return Some(DBValue::from_slice(&NULL_RLP)); + } + + match self.data.get(key) { + Some(&(ref d, rc)) if rc > 0 => Some(d.clone()), + _ => None + } + } + + fn contains(&self, key: &H::Out) -> bool { + if key == &self.hashed_null_node { return true; } @@ -210,15 +222,15 @@ impl HashDB for MemoryDB { } } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H::Out { if value == &NULL_RLP { - return KECCAK_NULL_RLP.clone(); + return self.hashed_null_node.clone(); } - let key = keccak(value); + let key = H::hash(value); match self.data.entry(key) { Entry::Occupied(mut entry) => { let &mut (ref mut old_value, ref mut rc) = entry.get_mut(); - if *rc >= -0x80000000i32 && *rc <= 0 { + if *rc <= 0 { *old_value = DBValue::from_slice(value); } *rc += 1; @@ -230,7 +242,7 @@ impl HashDB for MemoryDB { key } - fn emplace(&mut self, key: H256, value: DBValue) { + fn emplace(&mut self, key:H::Out, value: DBValue) { if &*value == &NULL_RLP { return; } @@ -238,7 +250,7 @@ impl HashDB for MemoryDB { match self.data.entry(key) { Entry::Occupied(mut entry) => { let &mut (ref mut old_value, ref mut rc) = entry.get_mut(); - if *rc >= -0x80000000i32 && *rc <= 0 { + if *rc <= 0 { *old_value = value; } *rc += 1; @@ -249,8 +261,8 @@ impl HashDB for MemoryDB { } } - fn remove(&mut self, key: &H256) { - if key == &KECCAK_NULL_RLP { + fn remove(&mut self, key: &H::Out) { + if key == &self.hashed_null_node { return; } @@ -266,17 +278,26 @@ impl HashDB for MemoryDB { } } +impl AsHashDB for MemoryDB { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} + #[cfg(test)] mod tests { - use keccak::keccak; use super::*; + use tiny_keccak::Keccak; + use ethereum_types::H256; + use keccak_hasher::KeccakHasher; #[test] fn memorydb_remove_and_purge() { let hello_bytes = b"Hello world!"; - let hello_key = keccak(hello_bytes); + let mut hello_key = [0;32]; + Keccak::keccak256(hello_bytes, &mut hello_key); + let hello_key = H256(hello_key); - let mut m = MemoryDB::new(); + let mut m = MemoryDB::::new(); m.remove(&hello_key); assert_eq!(m.raw(&hello_key).unwrap().1, -1); m.purge(); @@ -286,7 +307,7 @@ mod tests { m.purge(); assert_eq!(m.raw(&hello_key), None); - let mut m = MemoryDB::new(); + let mut m = MemoryDB::::new(); assert!(m.remove_and_purge(&hello_key).is_none()); assert_eq!(m.raw(&hello_key).unwrap().1, -1); m.insert(hello_bytes); @@ -299,8 +320,8 @@ mod tests { #[test] fn consolidate() { - let mut main = MemoryDB::new(); - let mut other = MemoryDB::new(); + let mut main = MemoryDB::::new(); + let mut other = MemoryDB::::new(); let remove_key = other.insert(b"doggo"); main.remove(&remove_key); diff --git a/util/patricia-trie-ethereum/Cargo.toml b/util/patricia-trie-ethereum/Cargo.toml new file mode 100644 index 000000000..239c7c99f --- /dev/null +++ b/util/patricia-trie-ethereum/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "patricia-trie-ethereum" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Merkle-Patricia Trie (Ethereum Style)" +license = "GPL-3.0" + +[dependencies] +patricia-trie = { path = "../patricia_trie" } +keccak-hasher = { path = "../keccak-hasher" } +hashdb = { path = "../hashdb" } +rlp = { path = "../rlp" } +ethcore-bytes = { path = "../bytes" } +ethereum-types = "0.3" +elastic-array = "0.10" \ No newline at end of file diff --git a/util/patricia-trie-ethereum/src/lib.rs b/util/patricia-trie-ethereum/src/lib.rs new file mode 100644 index 000000000..a252152c7 --- /dev/null +++ b/util/patricia-trie-ethereum/src/lib.rs @@ -0,0 +1,62 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Façade crate for `patricia_trie` for Ethereum specific impls + +pub extern crate patricia_trie as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa +extern crate elastic_array; +extern crate ethcore_bytes; +extern crate ethereum_types; +extern crate hashdb; +extern crate keccak_hasher; +extern crate rlp; + +mod rlp_node_codec; + +pub use rlp_node_codec::RlpNodeCodec; + +use ethereum_types::H256; +use keccak_hasher::KeccakHasher; +use rlp::DecoderError; + +/// Convenience type alias to instantiate a Keccak-flavoured `RlpNodeCodec` +pub type RlpCodec = RlpNodeCodec; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDB` +pub type TrieDB<'db> = trie::TrieDB<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDB` +pub type SecTrieDB<'db> = trie::SecTrieDB<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `FatDB` +pub type FatDB<'db> = trie::FatDB<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDBMut` +pub type TrieDBMut<'db> = trie::TrieDBMut<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDBMut` +pub type SecTrieDBMut<'db> = trie::SecTrieDBMut<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `FatDBMut` +pub type FatDBMut<'db> = trie::FatDBMut<'db, KeccakHasher, RlpCodec>; + +/// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieFactory` +pub type TrieFactory = trie::TrieFactory; + +/// Convenience type alias for Keccak/Rlp flavoured trie errors +pub type TrieError = trie::TrieError; +/// Convenience type alias for Keccak/Rlp flavoured trie results +pub type Result = trie::Result; diff --git a/util/patricia-trie-ethereum/src/rlp_node_codec.rs b/util/patricia-trie-ethereum/src/rlp_node_codec.rs new file mode 100644 index 000000000..414a129ef --- /dev/null +++ b/util/patricia-trie-ethereum/src/rlp_node_codec.rs @@ -0,0 +1,124 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! `NodeCodec` implementation for Rlp + +use elastic_array::{ElasticArray1024, ElasticArray128}; +use ethereum_types::H256; +use hashdb::Hasher; +use keccak_hasher::KeccakHasher; +use rlp::{DecoderError, RlpStream, Rlp, Prototype}; +use std::marker::PhantomData; +use trie::{NibbleSlice, NodeCodec, node::Node, ChildReference}; + +/// Concrete implementation of a `NodeCodec` with Rlp encoding, generic over the `Hasher` +#[derive(Default, Clone)] +pub struct RlpNodeCodec {mark: PhantomData} + +// NOTE: what we'd really like here is: +// `impl NodeCodec for RlpNodeCodec where H::Out: Decodable` +// but due to the current limitations of Rust const evaluation we can't +// do `const HASHED_NULL_NODE: H::Out = H::Out( … … )`. Perhaps one day soon? +impl NodeCodec for RlpNodeCodec { + type Error = DecoderError; + const HASHED_NULL_NODE : H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); + fn decode(data: &[u8]) -> ::std::result::Result { + let r = Rlp::new(data); + match r.prototype()? { + // either leaf or extension - decode first item with NibbleSlice::??? + // and use is_leaf return to figure out which. + // if leaf, second item is a value (is_data()) + // if extension, second item is a node (either SHA3 to be looked up and + // fed back into this function or inline RLP which can be fed back into this function). + Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0)?.data()?) { + (slice, true) => Ok(Node::Leaf(slice, r.at(1)?.data()?)), + (slice, false) => Ok(Node::Extension(slice, r.at(1)?.as_raw())), + }, + // branch - first 16 are nodes, 17th is a value (or empty). + Prototype::List(17) => { + let mut nodes = [&[] as &[u8]; 16]; + for i in 0..16 { + nodes[i] = r.at(i)?.as_raw(); + } + Ok(Node::Branch(nodes, if r.at(16)?.is_empty() { None } else { Some(r.at(16)?.data()?) })) + }, + // an empty branch index. + Prototype::Data(0) => Ok(Node::Empty), + // something went wrong. + _ => Err(DecoderError::Custom("Rlp is not valid.")) + } + } + fn try_decode_hash(data: &[u8]) -> Option<::Out> { + let r = Rlp::new(data); + if r.is_data() && r.size() == KeccakHasher::LENGTH { + Some(r.as_val().expect("Hash is the correct size; qed")) + } else { + None + } + } + fn is_empty_node(data: &[u8]) -> bool { + Rlp::new(data).is_empty() + } + fn empty_node() -> ElasticArray1024 { + let mut stream = RlpStream::new(); + stream.append_empty_data(); + stream.drain() + } + + fn leaf_node(partial: &[u8], value: &[u8]) -> ElasticArray1024 { + let mut stream = RlpStream::new_list(2); + stream.append(&partial); + stream.append(&value); + stream.drain() + } + + fn ext_node(partial: &[u8], child_ref: ChildReference<::Out>) -> ElasticArray1024 { + let mut stream = RlpStream::new_list(2); + stream.append(&partial); + match child_ref { + ChildReference::Hash(h) => stream.append(&h), + ChildReference::Inline(inline_data, len) => { + let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; + stream.append_raw(bytes, 1) + }, + }; + stream.drain() + } + + fn branch_node(children: I, value: Option>) -> ElasticArray1024 + where I: IntoIterator::Out>>> + { + let mut stream = RlpStream::new_list(17); + for child_ref in children { + match child_ref { + Some(c) => match c { + ChildReference::Hash(h) => stream.append(&h), + ChildReference::Inline(inline_data, len) => { + let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; + stream.append_raw(bytes, 1) + }, + }, + None => stream.append_empty_data() + }; + } + if let Some(value) = value { + stream.append(&&*value); + } else { + stream.append_empty_data(); + } + stream.drain() + } +} diff --git a/util/patricia_trie/Cargo.toml b/util/patricia_trie/Cargo.toml index 48b06b214..532465678 100644 --- a/util/patricia_trie/Cargo.toml +++ b/util/patricia_trie/Cargo.toml @@ -1,22 +1,24 @@ [package] name = "patricia-trie" -version = "0.1.0" +version = "0.2.0" authors = ["Parity Technologies "] -description = "Merkle-Patricia Trie (Ethereum Style)" +description = "Merkle-Patricia Trie generic over key hasher and node encoding" license = "GPL-3.0" [dependencies] elastic-array = "0.10" log = "0.3" rand = "0.4" +hashdb = { version = "0.2", path = "../hashdb" } ethcore-bytes = { version = "0.1.0", path = "../bytes" } -ethereum-types = "0.3" -keccak-hash = { version = "0.1.0", path = "../hash" } -hashdb = { version = "0.1.1", path = "../hashdb" } -rlp = { version = "0.2.1", path = "../rlp" } -triehash = { version = "0.1.0", path = "../triehash" } -memorydb = { version = "0.1.0", path = "../memorydb" } -ethcore-logger = { version = "1.9.0", path = "../../logger" } [dev-dependencies] +env_logger = "0.5" +ethereum-types = "0.3" +keccak-hash = { version = "0.1.0", path = "../hash" } +keccak-hasher = { path = "../keccak-hasher" } +memorydb = { version = "0.2", path = "../memorydb" } +patricia-trie-ethereum = { path = "../patricia-trie-ethereum" } +rlp = { version = "0.2.1", path = "../rlp" } trie-standardmap = { path = "../trie-standardmap" } +triehash = { version = "0.1.0", path = "../triehash" } diff --git a/util/patricia_trie/benches/trie.rs b/util/patricia_trie/benches/trie.rs index f26febdb5..114006ebe 100644 --- a/util/patricia_trie/benches/trie.rs +++ b/util/patricia_trie/benches/trie.rs @@ -21,16 +21,21 @@ extern crate ethcore_bytes; extern crate ethereum_types; extern crate memorydb; extern crate patricia_trie as trie; +extern crate patricia_trie_ethereum as ethtrie; +extern crate keccak_hasher; extern crate keccak_hash; extern crate trie_standardmap; +extern crate hashdb; use ethcore_bytes::Bytes; use ethereum_types::H256; use keccak_hash::keccak; use memorydb::MemoryDB; use test::{Bencher, black_box}; -use trie::{TrieDBMut, TrieDB, TrieMut, Trie}; +use trie::{TrieMut, Trie}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; +use keccak_hasher::KeccakHasher; +use ethtrie::{TrieDB, TrieDBMut}; fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { assert!(min_count + diff_count <= 32); @@ -69,7 +74,7 @@ fn trie_insertions_32_mir_1k(b: &mut Bencher) { }; let d = st.make(); b.iter(&mut ||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { @@ -87,7 +92,7 @@ fn trie_iter(b: &mut Bencher) { count: 1000, }; let d = st.make(); - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); { let mut t = TrieDBMut::new(&mut memdb, &mut root); @@ -116,7 +121,7 @@ fn trie_insertions_32_ran_1k(b: &mut Bencher) { let d = st.make(); let mut r = H256::new(); b.iter(&mut ||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { @@ -137,7 +142,7 @@ fn trie_insertions_six_high(b: &mut Bencher) { } b.iter(||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { @@ -157,7 +162,7 @@ fn trie_insertions_six_mid(b: &mut Bencher) { d.push((k, v)) } b.iter(||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { @@ -178,7 +183,7 @@ fn trie_insertions_random_mid(b: &mut Bencher) { } b.iter(||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { @@ -199,7 +204,7 @@ fn trie_insertions_six_low(b: &mut Bencher) { } b.iter(||{ - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); for i in d.iter() { diff --git a/util/patricia_trie/src/fatdb.rs b/util/patricia_trie/src/fatdb.rs index 90cdef902..34a49a5d0 100644 --- a/util/patricia_trie/src/fatdb.rs +++ b/util/patricia_trie/src/fatdb.rs @@ -14,66 +14,77 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; -use keccak::keccak; -use hashdb::HashDB; -use super::{TrieDB, Trie, TrieDBIterator, TrieItem, TrieIterator, Query}; +use hashdb::{HashDB, Hasher}; +use super::{Result, TrieDB, Trie, TrieDBIterator, TrieItem, TrieIterator, Query}; +use node_codec::NodeCodec; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// Additionaly it stores inserted hash-key mappings for later retrieval. /// /// Use it as a `Trie` or `TrieMut` trait object. -pub struct FatDB<'db> { - raw: TrieDB<'db>, +pub struct FatDB<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec +{ + raw: TrieDB<'db, H, C>, } -impl<'db> FatDB<'db> { +impl<'db, H, C> FatDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db HashDB, root: &'db H256) -> super::Result { - let fatdb = FatDB { - raw: TrieDB::new(db, root)? - }; - - Ok(fatdb) + pub fn new(db: &'db HashDB, root: &'db H::Out) -> Result { + Ok(FatDB { raw: TrieDB::new(db, root)? }) } /// Get the backing database. - pub fn db(&self) -> &HashDB { - self.raw.db() - } + pub fn db(&self) -> &HashDB { self.raw.db() } } -impl<'db> Trie for FatDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { - FatDBIterator::new(&self.raw).map(|iter| Box::new(iter) as Box<_>) +impl<'db, H, C> Trie for FatDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&self) -> &H::Out { self.raw.root() } + + fn contains(&self, key: &[u8]) -> Result { + self.raw.contains(H::hash(key).as_ref()) } - fn root(&self) -> &H256 { - self.raw.root() - } - - fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&keccak(key)) - } - - fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> super::Result> + fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result, H::Out, C::Error> where 'a: 'key { - self.raw.get_with(&keccak(key), query) + self.raw.get_with(H::hash(key).as_ref(), query) + } + + fn iter<'a>(&'a self) -> Result> + 'a>, ::Out, C::Error> { + FatDBIterator::::new(&self.raw).map(|iter| Box::new(iter) as Box<_>) } } /// Itarator over inserted pairs of key values. -pub struct FatDBIterator<'db> { - trie_iterator: TrieDBIterator<'db>, - trie: &'db TrieDB<'db>, +pub struct FatDBIterator<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec + 'db +{ + trie_iterator: TrieDBIterator<'db, H, C>, + trie: &'db TrieDB<'db, H, C>, } -impl<'db> FatDBIterator<'db> { +impl<'db, H, C> FatDBIterator<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Creates new iterator. - pub fn new(trie: &'db TrieDB) -> super::Result { + pub fn new(trie: &'db TrieDB) -> Result { Ok(FatDBIterator { trie_iterator: TrieDBIterator::new(trie)?, trie: trie, @@ -81,40 +92,56 @@ impl<'db> FatDBIterator<'db> { } } -impl<'db> TrieIterator for FatDBIterator<'db> { - fn seek(&mut self, key: &[u8]) -> super::Result<()> { - self.trie_iterator.seek(&keccak(key)) +impl<'db, H, C> TrieIterator for FatDBIterator<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, C::Error> { + let hashed_key = H::hash(key); + self.trie_iterator.seek(hashed_key.as_ref()) } } -impl<'db> Iterator for FatDBIterator<'db> { - type Item = TrieItem<'db>; +impl<'db, H, C> Iterator for FatDBIterator<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + type Item = TrieItem<'db, H::Out, C::Error>; fn next(&mut self) -> Option { self.trie_iterator.next() - .map(|res| + .map(|res| { res.map(|(hash, value)| { - let aux_hash = keccak(hash); + let aux_hash = H::hash(&hash); (self.trie.db().get(&aux_hash).expect("Missing fatdb hash").into_vec(), value) }) - ) + }) } } -#[test] -fn fatdb_to_trie() { +#[cfg(test)] +mod test { use memorydb::MemoryDB; use hashdb::DBValue; - use super::fatdbmut::FatDBMut; - use super::TrieMut; + use keccak_hasher::KeccakHasher; + use ethtrie::trie::{Trie, TrieMut}; + use ethtrie::{FatDB, FatDBMut}; + use ethereum_types::H256; - let mut memdb = MemoryDB::new(); - let mut root = H256::default(); - { - let mut t = FatDBMut::new(&mut memdb, &mut root); - t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + #[test] + fn fatdb_to_trie() { + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = FatDBMut::new(&mut memdb, &mut root); + t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + } + let t = FatDB::new(&memdb, &root).unwrap(); + assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); + assert_eq!( + t.iter().unwrap().map(Result::unwrap).collect::>(), + vec![(vec![0x01u8, 0x23], DBValue::from_slice(&[0x01u8, 0x23] as &[u8]))]); } - let t = FatDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); - assert_eq!(t.iter().unwrap().map(Result::unwrap).collect::>(), vec![(vec![0x01u8, 0x23], DBValue::from_slice(&[0x01u8, 0x23] as &[u8]))]); } diff --git a/util/patricia_trie/src/fatdbmut.rs b/util/patricia_trie/src/fatdbmut.rs index 9bf7b8803..67f4f14a7 100644 --- a/util/patricia_trie/src/fatdbmut.rs +++ b/util/patricia_trie/src/fatdbmut.rs @@ -14,105 +14,116 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; -use keccak::keccak; -use hashdb::{HashDB, DBValue}; -use super::{TrieDBMut, TrieMut}; +use hashdb::{HashDB, DBValue, Hasher}; +use super::{Result, TrieDBMut, TrieMut}; +use node_codec::NodeCodec; /// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// Additionaly it stores inserted hash-key mappings for later retrieval. /// /// Use it as a `Trie` or `TrieMut` trait object. -pub struct FatDBMut<'db> { - raw: TrieDBMut<'db>, +pub struct FatDBMut<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec +{ + raw: TrieDBMut<'db, H, C>, } -impl<'db> FatDBMut<'db> { +impl<'db, H, C> FatDBMut<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H::Out) -> Self { FatDBMut { raw: TrieDBMut::new(db, root) } } /// Create a new trie with the backing database `db` and `root`. /// /// Returns an error if root does not exist. - pub fn from_existing(db: &'db mut HashDB, root: &'db mut H256) -> super::Result { + pub fn from_existing(db: &'db mut HashDB, root: &'db mut H::Out) -> Result { Ok(FatDBMut { raw: TrieDBMut::from_existing(db, root)? }) } /// Get the backing database. - pub fn db(&self) -> &HashDB { + pub fn db(&self) -> &HashDB { self.raw.db() } /// Get the backing database. - pub fn db_mut(&mut self) -> &mut HashDB { + pub fn db_mut(&mut self) -> &mut HashDB { self.raw.db_mut() } - - fn to_aux_key(key: &[u8]) -> H256 { - keccak(key) - } } -impl<'db> TrieMut for FatDBMut<'db> { - fn root(&mut self) -> &H256 { - self.raw.root() +impl<'db, H, C> TrieMut for FatDBMut<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&mut self) -> &H::Out { self.raw.root() } + + fn is_empty(&self) -> bool { self.raw.is_empty() } + + fn contains(&self, key: &[u8]) -> Result { + self.raw.contains(H::hash(key).as_ref()) } - fn is_empty(&self) -> bool { - self.raw.is_empty() - } - - fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&keccak(key)) - } - - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> + fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result, H::Out, C::Error> where 'a: 'key { - self.raw.get(&keccak(key)) + self.raw.get(H::hash(key).as_ref()) } - fn insert(&mut self, key: &[u8], value: &[u8]) -> super::Result> { - let hash = keccak(key); - let out = self.raw.insert(&hash, value)?; + fn insert(&mut self, key: &[u8], value: &[u8]) -> Result, H::Out, C::Error> { + let hash = H::hash(key); + let out = self.raw.insert(hash.as_ref(), value)?; let db = self.raw.db_mut(); // don't insert if it doesn't exist. if out.is_none() { - db.emplace(Self::to_aux_key(&hash), DBValue::from_slice(key)); + let aux_hash = H::hash(hash.as_ref()); + db.emplace(aux_hash, DBValue::from_slice(key)); } Ok(out) } - fn remove(&mut self, key: &[u8]) -> super::Result> { - let hash = keccak(key); - let out = self.raw.remove(&hash)?; + fn remove(&mut self, key: &[u8]) -> Result, H::Out, C::Error> { + let hash = H::hash(key); + let out = self.raw.remove(hash.as_ref())?; // don't remove if it already exists. if out.is_some() { - self.raw.db_mut().remove(&Self::to_aux_key(&hash)); + self.raw.db_mut().remove(&hash); } Ok(out) } } -#[test] -fn fatdb_to_trie() { +#[cfg(test)] +mod test { + use hashdb::DBValue; use memorydb::MemoryDB; - use super::TrieDB; - use super::Trie; + use ethtrie::trie::{Trie, TrieMut}; + use ethtrie::{TrieDB, FatDBMut}; + use keccak_hasher::KeccakHasher; + use keccak; + use ethereum_types::H256; - let mut memdb = MemoryDB::new(); - let mut root = H256::default(); - { - let mut t = FatDBMut::new(&mut memdb, &mut root); - t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + #[test] + fn fatdbmut_to_trie() { + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = FatDBMut::new(&mut memdb, &mut root); + t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + } + let t = TrieDB::new(&memdb, &root).unwrap(); + assert_eq!(t.get(&keccak::keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } - let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); -} +} \ No newline at end of file diff --git a/util/patricia_trie/src/lib.rs b/util/patricia_trie/src/lib.rs index 8e0e44f03..7cc623b1a 100644 --- a/util/patricia_trie/src/lib.rs +++ b/util/patricia_trie/src/lib.rs @@ -15,26 +15,35 @@ // along with Parity. If not, see . //! Trie interface and implementation. -extern crate rand; -extern crate ethereum_types; -extern crate keccak_hash as keccak; -extern crate rlp; -extern crate hashdb; -extern crate ethcore_bytes as bytes; extern crate elastic_array; -extern crate memorydb; -extern crate ethcore_logger; - -#[cfg(test)] -extern crate trie_standardmap as standardmap; - +extern crate ethcore_bytes as bytes; +extern crate hashdb; +extern crate rand; #[macro_use] extern crate log; +#[cfg(test)] +extern crate env_logger; +#[cfg(test)] +extern crate ethereum_types; +#[cfg(test)] +extern crate trie_standardmap as standardmap; +#[cfg(test)] +extern crate patricia_trie_ethereum as ethtrie; +#[cfg(test)] +extern crate memorydb; +#[cfg(test)] +extern crate rlp; +#[cfg(test)] +extern crate keccak_hash as keccak; +#[cfg(test)] +extern crate keccak_hasher; +#[cfg(test)] +extern crate triehash; + use std::{fmt, error}; -use ethereum_types::H256; -use keccak::KECCAK_NULL_RLP; -use hashdb::{HashDB, DBValue}; +use hashdb::{HashDB, DBValue, Hasher}; +use std::marker::PhantomData; pub mod node; pub mod triedb; @@ -46,158 +55,154 @@ pub mod recorder; mod fatdb; mod fatdbmut; mod lookup; -mod nibbleslice; mod nibblevec; +mod nibbleslice; +mod node_codec; -pub use self::triedbmut::TrieDBMut; pub use self::triedb::{TrieDB, TrieDBIterator}; +pub use self::triedbmut::{TrieDBMut, ChildReference}; pub use self::sectriedbmut::SecTrieDBMut; pub use self::sectriedb::SecTrieDB; pub use self::fatdb::{FatDB, FatDBIterator}; pub use self::fatdbmut::FatDBMut; pub use self::recorder::Recorder; +pub use self::lookup::Lookup; +pub use self::nibbleslice::NibbleSlice; +pub use node_codec::NodeCodec; /// Trie Errors. /// /// These borrow the data within them to avoid excessive copying on every /// trie operation. #[derive(Debug, PartialEq, Eq, Clone)] -pub enum TrieError { +pub enum TrieError { /// Attempted to create a trie with a state root not in the DB. - InvalidStateRoot(H256), + InvalidStateRoot(T), /// Trie item not found in the database, - IncompleteDatabase(H256), + IncompleteDatabase(T), /// Corrupt Trie item - DecoderError(rlp::DecoderError), + DecoderError(T, E), } -impl fmt::Display for TrieError { +impl fmt::Display for TrieError where T: std::fmt::Debug, E: std::fmt::Debug { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - TrieError::InvalidStateRoot(ref root) => write!(f, "Invalid state root: {}", root), - TrieError::IncompleteDatabase(ref missing) => - write!(f, "Database missing expected key: {}", missing), - TrieError::DecoderError(ref err) => write!(f, "Decoding failed with {}", err), + TrieError::InvalidStateRoot(ref root) => write!(f, "Invalid state root: {:?}", root), + TrieError::IncompleteDatabase(ref missing) => write!(f, "Database missing expected key: {:?}", missing), + TrieError::DecoderError(ref hash, ref decoder_err) => write!(f, "Decoding failed for hash {:?}; err: {:?}", hash, decoder_err), } } } -impl error::Error for TrieError { +impl error::Error for TrieError where T: std::fmt::Debug, E: std::error::Error { fn description(&self) -> &str { match *self { TrieError::InvalidStateRoot(_) => "Invalid state root", TrieError::IncompleteDatabase(_) => "Incomplete database", - TrieError::DecoderError(ref e) => e.description(), + TrieError::DecoderError(_, ref err) => err.description(), } } } -impl From for Box { - fn from(e: rlp::DecoderError) -> Self { Box::new(TrieError::DecoderError(e)) } -} +/// Trie result type. Boxed to avoid copying around extra space for the `Hasher`s `Out` on successful queries. +pub type Result = ::std::result::Result>>; -/// Trie result type. Boxed to avoid copying around extra space for `H256`s on successful queries. -pub type Result = ::std::result::Result>; -/// Trie-Item type. -pub type TrieItem<'a> = Result<(Vec, DBValue)>; +/// Trie-Item type used for iterators over trie data. +pub type TrieItem<'a, U, E> = Result<(Vec, DBValue), U, E>; /// Description of what kind of query will be made to the trie. /// /// This is implemented for any &mut recorder (where the query will return /// a DBValue), any function taking raw bytes (where no recording will be made), /// or any tuple of (&mut Recorder, FnOnce(&[u8])) -pub trait Query { +pub trait Query { /// Output item. type Item; /// Decode a byte-slice into the desired item. - fn decode(self, &[u8]) -> Self::Item; + fn decode(self, data: &[u8]) -> Self::Item; /// Record that a node has been passed through. - fn record(&mut self, &H256, &[u8], u32) { } + fn record(&mut self, _hash: &H::Out, _data: &[u8], _depth: u32) {} } -impl<'a> Query for &'a mut Recorder { +impl<'a, H: Hasher> Query for &'a mut Recorder { type Item = DBValue; - fn decode(self, value: &[u8]) -> DBValue { DBValue::from_slice(value) } - fn record(&mut self, hash: &H256, data: &[u8], depth: u32) { + fn record(&mut self, hash: &H::Out, data: &[u8], depth: u32) { (&mut **self).record(hash, data, depth); } } -impl Query for F where F: for<'a> FnOnce(&'a [u8]) -> T { +impl Query for F where F: for<'a> FnOnce(&'a [u8]) -> T { type Item = T; - fn decode(self, value: &[u8]) -> T { (self)(value) } } -impl<'a, F, T> Query for (&'a mut Recorder, F) where F: FnOnce(&[u8]) -> T { +impl<'a, F, T, H: Hasher> Query for (&'a mut Recorder, F) where F: FnOnce(&[u8]) -> T { type Item = T; - fn decode(self, value: &[u8]) -> T { (self.1)(value) } - fn record(&mut self, hash: &H256, data: &[u8], depth: u32) { + fn record(&mut self, hash: &H::Out, data: &[u8], depth: u32) { self.0.record(hash, data, depth) } } /// A key-value datastore implemented as a database-backed modified Merkle tree. -pub trait Trie { +pub trait Trie> { /// Return the root of the trie. - fn root(&self) -> &H256; + fn root(&self) -> &H::Out; /// Is the trie empty? - fn is_empty(&self) -> bool { *self.root() == KECCAK_NULL_RLP } + fn is_empty(&self) -> bool { *self.root() == C::HASHED_NULL_NODE } /// Does the trie contain a given key? - fn contains(&self, key: &[u8]) -> Result { - self.get(key).map(|x| x.is_some()) + fn contains(&self, key: &[u8]) -> Result { + self.get(key).map(|x|x.is_some() ) } /// What is the value of the given key in this trie? - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result> where 'a: 'key { + fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result, H::Out, C::Error> where 'a: 'key { self.get_with(key, DBValue::from_slice) } /// Search for the key with the given query parameter. See the docs of the `Query` /// trait for more details. - fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) - -> Result> where 'a: 'key; + fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result, H::Out, C::Error> where 'a: 'key; /// Returns a depth-first iterator over the elements of trie. - fn iter<'a>(&'a self) -> Result + 'a>>; + fn iter<'a>(&'a self) -> Result> + 'a>, H::Out, C::Error>; } /// A key-value datastore implemented as a database-backed modified Merkle tree. -pub trait TrieMut { +pub trait TrieMut> { /// Return the root of the trie. - fn root(&mut self) -> &H256; + fn root(&mut self) -> &H::Out; /// Is the trie empty? fn is_empty(&self) -> bool; /// Does the trie contain a given key? - fn contains(&self, key: &[u8]) -> Result { + fn contains(&self, key: &[u8]) -> Result { self.get(key).map(|x| x.is_some()) } /// What is the value of the given key in this trie? - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result> where 'a: 'key; + fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result, H::Out, C::Error> where 'a: 'key; /// Insert a `key`/`value` pair into the trie. An empty value is equivalent to removing /// `key` from the trie. Returns the old value associated with this key, if it existed. - fn insert(&mut self, key: &[u8], value: &[u8]) -> Result>; + fn insert(&mut self, key: &[u8], value: &[u8]) -> Result, H::Out, C::Error>; /// Remove a `key` from the trie. Equivalent to making it equal to the empty /// value. Returns the old value associated with this key, if it existed. - fn remove(&mut self, key: &[u8]) -> Result>; + fn remove(&mut self, key: &[u8]) -> Result, H::Out, C::Error>; } -/// A trie iterator that also supports random access. -pub trait TrieIterator : Iterator { +/// A trie iterator that also supports random access (`seek()`). +pub trait TrieIterator>: Iterator { /// Position the iterator on the first element with key > `key` - fn seek(&mut self, key: &[u8]) -> Result<()>; + fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, >::Error>; } /// Trie types @@ -219,19 +224,21 @@ impl Default for TrieSpec { /// Trie factory. #[derive(Default, Clone)] -pub struct TrieFactory { +pub struct TrieFactory> { spec: TrieSpec, + mark_hash: PhantomData, + mark_codec: PhantomData, } /// All different kinds of tries. /// This is used to prevent a heap allocation for every created trie. -pub enum TrieKinds<'db> { +pub enum TrieKinds<'db, H: Hasher + 'db, C: NodeCodec> { /// A generic trie db. - Generic(TrieDB<'db>), + Generic(TrieDB<'db, H, C>), /// A secure trie db. - Secure(SecTrieDB<'db>), + Secure(SecTrieDB<'db, H, C>), /// A fat trie db. - Fat(FatDB<'db>), + Fat(FatDB<'db, H, C>), } // wrapper macro for making the match easier to deal with. @@ -245,8 +252,8 @@ macro_rules! wrapper { } } -impl<'db> Trie for TrieKinds<'db> { - fn root(&self) -> &H256 { +impl<'db, H: Hasher, C: NodeCodec> Trie for TrieKinds<'db, H, C> { + fn root(&self) -> &H::Out { wrapper!(self, root,) } @@ -254,31 +261,33 @@ impl<'db> Trie for TrieKinds<'db> { wrapper!(self, is_empty,) } - fn contains(&self, key: &[u8]) -> Result { + fn contains(&self, key: &[u8]) -> Result { wrapper!(self, contains, key) } - fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result> + fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result, H::Out, C::Error> where 'a: 'key { wrapper!(self, get_with, key, query) } - fn iter<'a>(&'a self) -> Result + 'a>> { + fn iter<'a>(&'a self) -> Result> + 'a>, H::Out, C::Error> { wrapper!(self, iter,) } } -impl TrieFactory { +impl<'db, H, C> TrieFactory +where + H: Hasher, + C: NodeCodec + 'db +{ /// Creates new factory. pub fn new(spec: TrieSpec) -> Self { - TrieFactory { - spec: spec, - } + TrieFactory { spec, mark_hash: PhantomData, mark_codec: PhantomData } } /// Create new immutable instance of Trie. - pub fn readonly<'db>(&self, db: &'db HashDB, root: &'db H256) -> Result> { + pub fn readonly(&self, db: &'db HashDB, root: &'db H::Out) -> Result, H::Out, >::Error> { match self.spec { TrieSpec::Generic => Ok(TrieKinds::Generic(TrieDB::new(db, root)?)), TrieSpec::Secure => Ok(TrieKinds::Secure(SecTrieDB::new(db, root)?)), @@ -287,20 +296,20 @@ impl TrieFactory { } /// Create new mutable instance of Trie. - pub fn create<'db>(&self, db: &'db mut HashDB, root: &'db mut H256) -> Box { + pub fn create(&self, db: &'db mut HashDB, root: &'db mut H::Out) -> Box + 'db> { match self.spec { - TrieSpec::Generic => Box::new(TrieDBMut::new(db, root)), - TrieSpec::Secure => Box::new(SecTrieDBMut::new(db, root)), - TrieSpec::Fat => Box::new(FatDBMut::new(db, root)), + TrieSpec::Generic => Box::new(TrieDBMut::<_, C>::new(db, root)), + TrieSpec::Secure => Box::new(SecTrieDBMut::<_, C>::new(db, root)), + TrieSpec::Fat => Box::new(FatDBMut::<_, C>::new(db, root)), } } /// Create new mutable instance of trie and check for errors. - pub fn from_existing<'db>(&self, db: &'db mut HashDB, root: &'db mut H256) -> Result> { + pub fn from_existing(&self, db: &'db mut HashDB, root: &'db mut H::Out) -> Result + 'db>, H::Out, >::Error> { match self.spec { - TrieSpec::Generic => Ok(Box::new(TrieDBMut::from_existing(db, root)?)), - TrieSpec::Secure => Ok(Box::new(SecTrieDBMut::from_existing(db, root)?)), - TrieSpec::Fat => Ok(Box::new(FatDBMut::from_existing(db, root)?)), + TrieSpec::Generic => Ok(Box::new(TrieDBMut::<_, C>::from_existing(db, root)?)), + TrieSpec::Secure => Ok(Box::new(SecTrieDBMut::<_, C>::from_existing(db, root)?)), + TrieSpec::Fat => Ok(Box::new(FatDBMut::<_, C>::from_existing(db, root)?)), } } diff --git a/util/patricia_trie/src/lookup.rs b/util/patricia_trie/src/lookup.rs index ae91689a3..6e9c7ff75 100644 --- a/util/patricia_trie/src/lookup.rs +++ b/util/patricia_trie/src/lookup.rs @@ -16,27 +16,33 @@ //! Trie lookup via HashDB. -use hashdb::HashDB; +use hashdb::{HashDB, Hasher}; use nibbleslice::NibbleSlice; -use ethereum_types::H256; - -use super::{TrieError, Query}; -use super::node::Node; +use node::Node; +use node_codec::NodeCodec; +use super::{Result, TrieError, Query}; +use std::marker::PhantomData; /// Trie lookup helper object. -pub struct Lookup<'a, Q: Query> { +pub struct Lookup<'a, H: Hasher + 'a, C: NodeCodec, Q: Query> { /// database to query from. - pub db: &'a HashDB, + pub db: &'a HashDB, /// Query object to record nodes and transform data. pub query: Q, /// Hash to start at - pub hash: H256, + pub hash: H::Out, + pub marker: PhantomData, // TODO: probably not needed when all is said and done? When Query is made generic? } -impl<'a, Q: Query> Lookup<'a, Q> { +impl<'a, H, C, Q> Lookup<'a, H, C, Q> +where + H: Hasher + 'a, + C: NodeCodec + 'a, + Q: Query, +{ /// Look up the given key. If the value is found, it will be passed to the given /// function to decode or copy. - pub fn look_up(mut self, mut key: NibbleSlice) -> super::Result> { + pub fn look_up(mut self, mut key: NibbleSlice) -> Result, H::Out, C::Error> { let mut hash = self.hash; // this loop iterates through non-inline nodes. @@ -55,7 +61,13 @@ impl<'a, Q: Query> Lookup<'a, Q> { // without incrementing the depth. let mut node_data = &node_data[..]; loop { - match Node::decoded(node_data)? { + let decoded = match C::decode(node_data) { + Ok(node) => node, + Err(e) => { + return Err(Box::new(TrieError::DecoderError(hash, e))) + } + }; + match decoded { Node::Leaf(slice, value) => { return Ok(match slice == key { true => Some(self.query.decode(value)), @@ -81,7 +93,7 @@ impl<'a, Q: Query> Lookup<'a, Q> { } // check if new node data is inline or hash. - if let Some(h) = Node::try_decode_hash(&node_data) { + if let Some(h) = C::try_decode_hash(&node_data) { hash = h; break } diff --git a/util/patricia_trie/src/nibbleslice.rs b/util/patricia_trie/src/nibbleslice.rs index 8cb03e53d..b87a66369 100644 --- a/util/patricia_trie/src/nibbleslice.rs +++ b/util/patricia_trie/src/nibbleslice.rs @@ -105,9 +105,11 @@ impl<'a> NibbleSlice<'a> { pub fn is_empty(&self) -> bool { self.len() == 0 } /// Get the length (in nibbles, naturally) of this slice. + #[inline] pub fn len(&self) -> usize { (self.data.len() + self.data_encode_suffix.len()) * 2 - self.offset - self.offset_encode_suffix } /// Get the nibble at position `i`. + #[inline(always)] pub fn at(&self, i: usize) -> u8 { let l = self.data.len() * 2 - self.offset; if i < l { @@ -154,6 +156,7 @@ impl<'a> NibbleSlice<'a> { } /// Encode while nibble slice in prefixed hex notation, noting whether it `is_leaf`. + #[inline] pub fn encoded(&self, is_leaf: bool) -> ElasticArray36 { let l = self.len(); let mut r = ElasticArray36::new(); diff --git a/util/patricia_trie/src/nibblevec.rs b/util/patricia_trie/src/nibblevec.rs index 4398dbc6f..3fe8c9fb7 100644 --- a/util/patricia_trie/src/nibblevec.rs +++ b/util/patricia_trie/src/nibblevec.rs @@ -41,12 +41,14 @@ impl NibbleVec { } /// Length of the `NibbleVec` + #[inline(always)] pub fn len(&self) -> usize { self.len } /// Retrurns true if `NibbleVec` has zero length pub fn is_empty(&self) -> bool { self.len == 0 } /// Try to get the nibble at the given offset. + #[inline] pub fn at(&self, idx: usize) -> u8 { if idx % 2 == 0 { self.inner[idx / 2] >> 4 diff --git a/util/patricia_trie/src/node.rs b/util/patricia_trie/src/node.rs index 0ded1f66d..72c344fc3 100644 --- a/util/patricia_trie/src/node.rs +++ b/util/patricia_trie/src/node.rs @@ -14,12 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; use elastic_array::ElasticArray36; use nibbleslice::NibbleSlice; use nibblevec::NibbleVec; -use bytes::*; -use rlp::{Rlp, RlpStream, Prototype, DecoderError}; use hashdb::DBValue; /// Partial node key type. @@ -35,83 +32,7 @@ pub enum Node<'a> { /// Extension node; has key slice and node data. Data may not be null. Extension(NibbleSlice<'a>, &'a [u8]), /// Branch node; has array of 16 child nodes (each possibly null) and an optional immediate node data. - Branch([&'a [u8]; 16], Option<&'a [u8]>) -} - -impl<'a> Node<'a> { - /// Decode the `node_rlp` and return the Node. - pub fn decoded(node_rlp: &'a [u8]) -> Result { - let r = Rlp::new(node_rlp); - match r.prototype()? { - // either leaf or extension - decode first item with NibbleSlice::??? - // and use is_leaf return to figure out which. - // if leaf, second item is a value (is_data()) - // if extension, second item is a node (either SHA3 to be looked up and - // fed back into this function or inline RLP which can be fed back into this function). - Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0)?.data()?) { - (slice, true) => Ok(Node::Leaf(slice, r.at(1)?.data()?)), - (slice, false) => Ok(Node::Extension(slice, r.at(1)?.as_raw())), - }, - // branch - first 16 are nodes, 17th is a value (or empty). - Prototype::List(17) => { - let mut nodes = [&[] as &[u8]; 16]; - for i in 0..16 { - nodes[i] = r.at(i)?.as_raw(); - } - Ok(Node::Branch(nodes, if r.at(16)?.is_empty() { None } else { Some(r.at(16)?.data()?) })) - }, - // an empty branch index. - Prototype::Data(0) => Ok(Node::Empty), - // something went wrong. - _ => Err(DecoderError::Custom("Rlp is not valid.")) - } - } - - /// Encode the node into RLP. - /// - /// Will always return the direct node RLP even if it's 32 or more bytes. To get the - /// RLP which would be valid for using in another node, use `encoded_and_added()`. - pub fn encoded(&self) -> Bytes { - match *self { - Node::Leaf(ref slice, ref value) => { - let mut stream = RlpStream::new_list(2); - stream.append(&&*slice.encoded(true)); - stream.append(value); - stream.out() - }, - Node::Extension(ref slice, ref raw_rlp) => { - let mut stream = RlpStream::new_list(2); - stream.append(&&*slice.encoded(false)); - stream.append_raw(raw_rlp, 1); - stream.out() - }, - Node::Branch(ref nodes, ref value) => { - let mut stream = RlpStream::new_list(17); - for i in 0..16 { - stream.append_raw(nodes[i], 1); - } - match *value { - Some(ref n) => { stream.append(n); }, - None => { stream.append_empty_data(); }, - } - stream.out() - }, - Node::Empty => { - let mut stream = RlpStream::new(); - stream.append_empty_data(); - stream.out() - } - } - } - - pub fn try_decode_hash(node_data: &[u8]) -> Option { - let r = Rlp::new(node_data); - if r.is_data() && r.size() == 32 { - Some(r.as_val().expect("Hash is the correct size of 32 bytes; qed")) - } else { - None - } - } + Branch([&'a [u8]; 16], Option<&'a [u8]>), } /// An owning node type. Useful for trie iterators. diff --git a/util/patricia_trie/src/node_codec.rs b/util/patricia_trie/src/node_codec.rs new file mode 100644 index 000000000..1dec20f90 --- /dev/null +++ b/util/patricia_trie/src/node_codec.rs @@ -0,0 +1,55 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Generic trait for trie node encoding/decoding. Takes a `hashdb::Hasher` +//! to parametrize the hashes used in the codec. + +use hashdb::Hasher; +use node::Node; +use ChildReference; + +use elastic_array::{ElasticArray1024, ElasticArray128}; + +/// Trait for trie node encoding/decoding +pub trait NodeCodec: Sized { + /// Encoding error type + type Error: ::std::error::Error; + + /// Null node type + const HASHED_NULL_NODE: H::Out; + + /// Decode bytes to a `Node`. Returns `Self::E` on failure. + fn decode(data: &[u8]) -> Result; + + /// Decode bytes to the `Hasher`s output type. Returns `None` on failure. + fn try_decode_hash(data: &[u8]) -> Option; + + /// Check if the provided bytes correspond to the codecs "empty" node. + fn is_empty_node(data: &[u8]) -> bool; + + /// Returns an empty node + fn empty_node() -> ElasticArray1024; + + /// Returns an encoded leaft node + fn leaf_node(partial: &[u8], value: &[u8]) -> ElasticArray1024; + + /// Returns an encoded extension node + fn ext_node(partial: &[u8], child_ref: ChildReference) -> ElasticArray1024; + + /// Returns an encoded branch node. Takes an iterator yielding `ChildReference` and an optional value + fn branch_node(children: I, value: Option>) -> ElasticArray1024 + where I: IntoIterator>>; +} diff --git a/util/patricia_trie/src/recorder.rs b/util/patricia_trie/src/recorder.rs index 6a0f9b45e..9ad7d8c33 100644 --- a/util/patricia_trie/src/recorder.rs +++ b/util/patricia_trie/src/recorder.rs @@ -16,13 +16,11 @@ //! Trie query recorder. -use keccak::keccak; -use ethereum_types::H256; use bytes::Bytes; /// A record of a visited node. #[derive(PartialEq, Eq, Debug, Clone)] -pub struct Record { +pub struct Record { /// The depth of this node. pub depth: u32, @@ -30,23 +28,23 @@ pub struct Record { pub data: Bytes, /// The hash of the data. - pub hash: H256, + pub hash: HO, } /// Records trie nodes as they pass it. #[derive(Debug)] -pub struct Recorder { - nodes: Vec, +pub struct Recorder { + nodes: Vec>, min_depth: u32, } -impl Default for Recorder { +impl Default for Recorder { fn default() -> Self { Recorder::new() } } -impl Recorder { +impl Recorder { /// Create a new `Recorder` which records all given nodes. #[inline] pub fn new() -> Self { @@ -62,9 +60,7 @@ impl Recorder { } /// Record a visited node, given its hash, data, and depth. - pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) { - debug_assert_eq!(keccak(data), *hash); - + pub fn record(&mut self, hash: &HO, data: &[u8], depth: u32) { if depth >= self.min_depth { self.nodes.push(Record { depth: depth, @@ -75,7 +71,7 @@ impl Recorder { } /// Drain all visited records. - pub fn drain(&mut self) -> Vec { + pub fn drain(&mut self) -> Vec> { ::std::mem::replace(&mut self.nodes, Vec::new()) } } @@ -83,11 +79,13 @@ impl Recorder { #[cfg(test)] mod tests { use super::*; + use keccak::keccak; + use keccak_hasher::KeccakHasher; use ethereum_types::H256; #[test] fn basic_recorder() { - let mut basic = Recorder::new(); + let mut basic = Recorder::::new(); let node1 = vec![1, 2, 3, 4]; let node2 = vec![4, 5, 6, 7, 8, 9, 10]; @@ -105,15 +103,16 @@ mod tests { let record2 = Record { data: node2, hash: hash2, - depth: 456 + depth: 456, }; + assert_eq!(basic.drain(), vec![record1, record2]); } #[test] fn basic_recorder_min_depth() { - let mut basic = Recorder::with_depth(400); + let mut basic = Recorder::::with_depth(400); let node1 = vec![1, 2, 3, 4]; let node2 = vec![4, 5, 6, 7, 8, 9, 10]; @@ -136,10 +135,11 @@ mod tests { #[test] fn trie_record() { - use super::super::{TrieDB, TrieDBMut, Trie, TrieMut}; + use ethtrie::trie::{Trie, TrieMut, Recorder}; use memorydb::MemoryDB; + use ethtrie::{TrieDB, TrieDBMut}; - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); let mut root = H256::default(); @@ -157,7 +157,7 @@ mod tests { } let trie = TrieDB::new(&db, &root).unwrap(); - let mut recorder = Recorder::new(); + let mut recorder = Recorder::::new(); trie.get_with(b"pirate", &mut recorder).unwrap().unwrap(); diff --git a/util/patricia_trie/src/sectriedb.rs b/util/patricia_trie/src/sectriedb.rs index c8d5ec0ec..a340de947 100644 --- a/util/patricia_trie/src/sectriedb.rs +++ b/util/patricia_trie/src/sectriedb.rs @@ -14,71 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; -use keccak::keccak; -use hashdb::HashDB; +use hashdb::{HashDB, Hasher}; use super::triedb::TrieDB; -use super::{Trie, TrieItem, TrieIterator, Query}; +use super::{Result, Trie, TrieItem, TrieIterator, Query}; +use node_codec::NodeCodec; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// /// Use it as a `Trie` trait object. You can use `raw()` to get the backing `TrieDB` object. -pub struct SecTrieDB<'db> { - raw: TrieDB<'db> +pub struct SecTrieDB<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec +{ + raw: TrieDB<'db, H, C> } -impl<'db> SecTrieDB<'db> { +impl<'db, H, C> SecTrieDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with the backing database `db` and empty `root` /// /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. /// Returns an error if root does not exist. - pub fn new(db: &'db HashDB, root: &'db H256) -> super::Result { + pub fn new(db: &'db HashDB, root: &'db H::Out) -> Result { Ok(SecTrieDB { raw: TrieDB::new(db, root)? }) } /// Get a reference to the underlying raw `TrieDB` struct. - pub fn raw(&self) -> &TrieDB { + pub fn raw(&self) -> &TrieDB { &self.raw } /// Get a mutable reference to the underlying raw `TrieDB` struct. - pub fn raw_mut(&mut self) -> &mut TrieDB<'db> { + pub fn raw_mut(&mut self) -> &mut TrieDB<'db, H, C> { &mut self.raw } } -impl<'db> Trie for SecTrieDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { - TrieDB::iter(&self.raw) +impl<'db, H, C> Trie for SecTrieDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&self) -> &H::Out { self.raw.root() } + + fn contains(&self, key: &[u8]) -> Result { + self.raw.contains(H::hash(key).as_ref()) } - fn root(&self) -> &H256 { self.raw.root() } - - fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&keccak(key)) - } - - fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> super::Result> + fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result, H::Out, C::Error> where 'a: 'key { - self.raw.get_with(&keccak(key), query) + self.raw.get_with(H::hash(key).as_ref(), query) + } + + fn iter<'a>(&'a self) -> Result> + 'a>, H::Out, C::Error> { + TrieDB::iter(&self.raw) } } -#[test] -fn trie_to_sectrie() { +#[cfg(test)] +mod test { use memorydb::MemoryDB; use hashdb::DBValue; - use super::triedbmut::TrieDBMut; - use super::TrieMut; + use keccak; + use keccak_hasher::KeccakHasher; + use ethtrie::{TrieDBMut, SecTrieDB, trie::{Trie, TrieMut}}; + use ethereum_types::H256; - let mut memdb = MemoryDB::new(); - let mut root = H256::default(); - { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - t.insert(&keccak(&[0x01u8, 0x23]), &[0x01u8, 0x23]).unwrap(); + #[test] + fn trie_to_sectrie() { + let mut db = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut db, &mut root); + t.insert(&keccak::keccak(&[0x01u8, 0x23]), &[0x01u8, 0x23]).unwrap(); + } + let t = SecTrieDB::new(&db, &root).unwrap(); + assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } - let t = SecTrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); -} +} \ No newline at end of file diff --git a/util/patricia_trie/src/sectriedbmut.rs b/util/patricia_trie/src/sectriedbmut.rs index 335fb2f18..8750c2dd5 100644 --- a/util/patricia_trie/src/sectriedbmut.rs +++ b/util/patricia_trie/src/sectriedbmut.rs @@ -14,43 +14,53 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; -use keccak::keccak; -use hashdb::{HashDB, DBValue}; -use super::triedbmut::TrieDBMut; -use super::TrieMut; +use hashdb::{HashDB, DBValue, Hasher}; +use super::{Result, TrieMut, TrieDBMut}; +use node_codec::NodeCodec; /// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// /// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing `TrieDBMut` object. -pub struct SecTrieDBMut<'db> { - raw: TrieDBMut<'db> +pub struct SecTrieDBMut<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec +{ + raw: TrieDBMut<'db, H, C> } -impl<'db> SecTrieDBMut<'db> { +impl<'db, H, C> SecTrieDBMut<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H::Out) -> Self { SecTrieDBMut { raw: TrieDBMut::new(db, root) } } /// Create a new trie with the backing database `db` and `root`. /// /// Returns an error if root does not exist. - pub fn from_existing(db: &'db mut HashDB, root: &'db mut H256) -> super::Result { + pub fn from_existing(db: &'db mut HashDB, root: &'db mut H::Out) -> Result { Ok(SecTrieDBMut { raw: TrieDBMut::from_existing(db, root)? }) } /// Get the backing database. - pub fn db(&self) -> &HashDB { self.raw.db() } + pub fn db(&self) -> &HashDB { self.raw.db() } /// Get the backing database. - pub fn db_mut(&mut self) -> &mut HashDB { self.raw.db_mut() } + pub fn db_mut(&mut self) -> &mut HashDB { self.raw.db_mut() } } -impl<'db> TrieMut for SecTrieDBMut<'db> { - fn root(&mut self) -> &H256 { +impl<'db, H, C> TrieMut for SecTrieDBMut<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&mut self) -> &H::Out { self.raw.root() } @@ -58,37 +68,43 @@ impl<'db> TrieMut for SecTrieDBMut<'db> { self.raw.is_empty() } - fn contains(&self, key: &[u8]) -> super::Result { - self.raw.contains(&keccak(key)) + fn contains(&self, key: &[u8]) -> Result { + self.raw.contains(&H::hash(key).as_ref()) } - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> + fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result, H::Out, C::Error> where 'a: 'key { - self.raw.get(&keccak(key)) + self.raw.get(&H::hash(key).as_ref()) } - fn insert(&mut self, key: &[u8], value: &[u8]) -> super::Result> { - self.raw.insert(&keccak(key), value) + fn insert(&mut self, key: &[u8], value: &[u8]) -> Result, H::Out, C::Error> { + self.raw.insert(&H::hash(key).as_ref(), value) } - fn remove(&mut self, key: &[u8]) -> super::Result> { - self.raw.remove(&keccak(key)) + fn remove(&mut self, key: &[u8]) -> Result, H::Out, C::Error> { + self.raw.remove(&H::hash(key).as_ref()) } } -#[test] -fn sectrie_to_trie() { - use memorydb::*; - use super::triedb::*; - use super::Trie; +#[cfg(test)] +mod test { + use memorydb::MemoryDB; + use hashdb::DBValue; + use keccak; + use keccak_hasher::KeccakHasher; + use ethtrie::{TrieDB, SecTrieDBMut, trie::{Trie, TrieMut}}; + use ethereum_types::H256; - let mut memdb = MemoryDB::new(); - let mut root = H256::default(); - { - let mut t = SecTrieDBMut::new(&mut memdb, &mut root); - t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + #[test] + fn sectrie_to_trie() { + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = SecTrieDBMut::new(&mut memdb, &mut root); + t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); + } + let t = TrieDB::new(&memdb, &root).unwrap(); + assert_eq!(t.get(&keccak::keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } - let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get(&keccak(&[0x01u8, 0x23])).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); } diff --git a/util/patricia_trie/src/triedb.rs b/util/patricia_trie/src/triedb.rs index 65ce3caba..bf88ab2ec 100644 --- a/util/patricia_trie/src/triedb.rs +++ b/util/patricia_trie/src/triedb.rs @@ -18,12 +18,15 @@ use std::fmt; use hashdb::*; use nibbleslice::NibbleSlice; use super::node::{Node, OwnedNode}; +use node_codec::NodeCodec; use super::lookup::Lookup; -use super::{Trie, TrieItem, TrieError, TrieIterator, Query}; -use ethereum_types::H256; +use super::{Result, Trie, TrieItem, TrieError, TrieIterator, Query}; use bytes::Bytes; +use std::marker::PhantomData; -/// A `Trie` implementation using a generic `HashDB` backing database. +/// A `Trie` implementation using a generic `HashDB` backing database, a `Hasher` +/// implementation to generate keys and a `NodeCodec` implementation to encode/decode +/// the nodes. /// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object. /// Use `get` and `contains` to query values associated with keys in the trie. @@ -31,17 +34,22 @@ use bytes::Bytes; /// # Example /// ``` /// extern crate patricia_trie as trie; +/// extern crate patricia_trie_ethereum as ethtrie; /// extern crate hashdb; +/// extern crate keccak_hasher; /// extern crate memorydb; /// extern crate ethereum_types; /// /// use trie::*; /// use hashdb::*; +/// use keccak_hasher::KeccakHasher; /// use memorydb::*; /// use ethereum_types::H256; +/// use ethtrie::{TrieDB, TrieDBMut}; +/// /// /// fn main() { -/// let mut memdb = MemoryDB::new(); +/// let mut memdb = MemoryDB::::new(); /// let mut root = H256::new(); /// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap(); /// let t = TrieDB::new(&memdb, &root).unwrap(); @@ -49,35 +57,38 @@ use bytes::Bytes; /// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar")); /// } /// ``` -pub struct TrieDB<'db> { - db: &'db HashDB, - root: &'db H256, +pub struct TrieDB<'db, H, C> +where + H: Hasher + 'db, + C: NodeCodec +{ + db: &'db HashDB, + root: &'db H::Out, /// The number of hashes performed so far in operations on this trie. hash_count: usize, + codec_marker: PhantomData, } -impl<'db> TrieDB<'db> { +impl<'db, H, C> TrieDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with the backing database `db` and `root` /// Returns an error if `root` does not exist - pub fn new(db: &'db HashDB, root: &'db H256) -> super::Result { + pub fn new(db: &'db HashDB, root: &'db H::Out) -> Result { if !db.contains(root) { Err(Box::new(TrieError::InvalidStateRoot(*root))) } else { - Ok(TrieDB { - db: db, - root: root, - hash_count: 0 - }) + Ok(TrieDB {db, root, hash_count: 0, codec_marker: PhantomData}) } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db - } + pub fn db(&'db self) -> &'db HashDB { self.db } /// Get the data of the root node. - fn root_data(&self) -> super::Result { + fn root_data(&self) -> Result { self.db .get(self.root) .ok_or_else(|| Box::new(TrieError::InvalidStateRoot(*self.root))) @@ -86,49 +97,57 @@ impl<'db> TrieDB<'db> { /// Given some node-describing data `node`, return the actual node RLP. /// This could be a simple identity operation in the case that the node is sufficiently small, but /// may require a database lookup. - fn get_raw_or_lookup(&'db self, node: &'db [u8]) -> super::Result { - match Node::try_decode_hash(node) { + fn get_raw_or_lookup(&'db self, node: &'db [u8]) -> Result { + match C::try_decode_hash(node) { Some(key) => { self.db.get(&key).ok_or_else(|| Box::new(TrieError::IncompleteDatabase(key))) } None => Ok(DBValue::from_slice(node)) } } - - /// Create a node from raw rlp bytes, assumes valid rlp because encoded locally - fn decode_node(node: &'db [u8]) -> Node { - Node::decoded(node).expect("rlp read from db; qed") - } } -impl<'db> Trie for TrieDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { - TrieDBIterator::new(self).map(|iter| Box::new(iter) as Box<_>) - } +impl<'db, H, C> Trie for TrieDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&self) -> &H::Out { self.root } - fn root(&self) -> &H256 { self.root } - - fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> super::Result> + fn get_with<'a, 'key, Q: Query>(&'a self, key: &'key [u8], query: Q) -> Result, H::Out, C::Error> where 'a: 'key { Lookup { db: self.db, query: query, hash: self.root.clone(), + marker: PhantomData::, }.look_up(NibbleSlice::new(key)) } + + fn iter<'a>(&'a self) -> Result> + 'a>, H::Out, C::Error> { + TrieDBIterator::new(self).map(|iter| Box::new(iter) as Box<_>) + } } // This is for pretty debug output only -struct TrieAwareDebugNode<'db, 'a> { - trie: &'db TrieDB<'db>, +struct TrieAwareDebugNode<'db, 'a, H, C> +where + H: Hasher + 'db, + C: NodeCodec + 'db +{ + trie: &'db TrieDB<'db, H, C>, key: &'a[u8] } -impl<'db, 'a> fmt::Debug for TrieAwareDebugNode<'db, 'a> { +impl<'db, 'a, H, C> fmt::Debug for TrieAwareDebugNode<'db, 'a, H, C> +where + H: Hasher, + C: NodeCodec +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Ok(node) = self.trie.get_raw_or_lookup(self.key) { - match Node::decoded(&node) { + match C::decode(&node) { Ok(Node::Leaf(slice, value)) => f.debug_struct("Node::Leaf") .field("slice", &slice) .field("value", &value) @@ -138,7 +157,7 @@ impl<'db, 'a> fmt::Debug for TrieAwareDebugNode<'db, 'a> { .field("item", &TrieAwareDebugNode{trie: self.trie, key: item}) .finish(), Ok(Node::Branch(ref nodes, ref value)) => { - let nodes: Vec = nodes.into_iter().map(|n| TrieAwareDebugNode{trie: self.trie, key: n} ).collect(); + let nodes: Vec> = nodes.into_iter().map(|n| TrieAwareDebugNode{trie: self.trie, key: n} ).collect(); f.debug_struct("Node::Branch") .field("nodes", &nodes) .field("value", &value) @@ -160,8 +179,11 @@ impl<'db, 'a> fmt::Debug for TrieAwareDebugNode<'db, 'a> { } } - -impl<'db> fmt::Debug for TrieDB<'db> { +impl<'db, H, C> fmt::Debug for TrieDB<'db, H, C> +where + H: Hasher, + C: NodeCodec +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let root_rlp = self.db.get(self.root).expect("Trie root not found!"); f.debug_struct("TrieDB") @@ -202,29 +224,24 @@ impl Crumb { } /// Iterator for going through all values in the trie. -pub struct TrieDBIterator<'a> { - db: &'a TrieDB<'a>, +pub struct TrieDBIterator<'a, H: Hasher + 'a, C: NodeCodec + 'a> { + db: &'a TrieDB<'a, H, C>, trail: Vec, key_nibbles: Bytes, } -impl<'a> TrieDBIterator<'a> { +impl<'a, H: Hasher, C: NodeCodec> TrieDBIterator<'a, H, C> { /// Create a new iterator. - pub fn new(db: &'a TrieDB) -> super::Result> { - let mut r = TrieDBIterator { - db: db, - trail: vec![], - key_nibbles: Vec::new(), - }; - + pub fn new(db: &'a TrieDB) -> Result, H::Out, C::Error> { + let mut r = TrieDBIterator { db, trail: Vec::with_capacity(8), key_nibbles: Vec::with_capacity(64) }; db.root_data().and_then(|root| r.descend(&root))?; Ok(r) } - fn seek<'key>(&mut self, mut node_data: DBValue, mut key: NibbleSlice<'key>) -> super::Result<()> { + fn seek<'key>(&mut self, mut node_data: DBValue, mut key: NibbleSlice<'key>) -> Result<(), H::Out, C::Error> { loop { let (data, mid) = { - let node = TrieDB::decode_node(&node_data); + let node = C::decode(&node_data).expect("encoded data read from db; qed"); match node { Node::Leaf(slice, _) => { if slice == key { @@ -285,17 +302,15 @@ impl<'a> TrieDBIterator<'a> { } /// Descend into a payload. - fn descend(&mut self, d: &[u8]) -> super::Result<()> { - let node = TrieDB::decode_node(&self.db.get_raw_or_lookup(d)?).into(); - Ok(self.descend_into_node(node)) + fn descend(&mut self, d: &[u8]) -> Result<(), H::Out, C::Error> { + let node_data = &self.db.get_raw_or_lookup(d)?; + let node = C::decode(&node_data).expect("encoded node read from db; qed"); + Ok(self.descend_into_node(node.into())) } /// Descend into a payload. fn descend_into_node(&mut self, node: OwnedNode) { - self.trail.push(Crumb { - status: Status::Entering, - node: node, - }); + self.trail.push(Crumb { status: Status::Entering, node }); match &self.trail.last().expect("just pushed item; qed").node { &OwnedNode::Leaf(ref n, _) | &OwnedNode::Extension(ref n, _) => { self.key_nibbles.extend((0..n.len()).map(|i| n.at(i))); @@ -319,26 +334,25 @@ impl<'a> TrieDBIterator<'a> { } } -impl<'a> TrieIterator for TrieDBIterator<'a> { +impl<'a, H: Hasher, C: NodeCodec> TrieIterator for TrieDBIterator<'a, H, C> { /// Position the iterator on the first element with key >= `key` - fn seek(&mut self, key: &[u8]) -> super::Result<()> { + fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, C::Error> { self.trail.clear(); self.key_nibbles.clear(); let root_rlp = self.db.root_data()?; - self.seek(root_rlp, NibbleSlice::new(key)) + self.seek(root_rlp, NibbleSlice::new(key.as_ref())) } } -impl<'a> Iterator for TrieDBIterator<'a> { - type Item = TrieItem<'a>; +impl<'a, H: Hasher, C: NodeCodec> Iterator for TrieDBIterator<'a, H, C> { + type Item = TrieItem<'a, H::Out, C::Error>; fn next(&mut self) -> Option { - enum IterStep { + enum IterStep { Continue, PopTrail, - Descend(super::Result), + Descend(Result), } - loop { let iter_step = { self.trail.last_mut()?.increment(); @@ -359,7 +373,9 @@ impl<'a> Iterator for TrieDBIterator<'a> { (Status::At, &OwnedNode::Leaf(_, ref v)) | (Status::At, &OwnedNode::Branch(_, Some(ref v))) => { return Some(Ok((self.key(), v.clone()))); }, - (Status::At, &OwnedNode::Extension(_, ref d)) => IterStep::Descend(self.db.get_raw_or_lookup(&*d)), + (Status::At, &OwnedNode::Extension(_, ref d)) => { + IterStep::Descend::(self.db.get_raw_or_lookup(&*d)) + }, (Status::At, &OwnedNode::Branch(_, _)) => IterStep::Continue, (Status::AtChild(i), &OwnedNode::Branch(ref children, _)) if children[i].len() > 0 => { match i { @@ -367,7 +383,7 @@ impl<'a> Iterator for TrieDBIterator<'a> { i => *self.key_nibbles.last_mut() .expect("pushed as 0; moves sequentially; removed afterwards; qed") = i as u8, } - IterStep::Descend(self.db.get_raw_or_lookup(&*children[i])) + IterStep::Descend::(self.db.get_raw_or_lookup(&*children[i])) }, (Status::AtChild(i), &OwnedNode::Branch(_, _)) => { if i == 0 { @@ -383,10 +399,11 @@ impl<'a> Iterator for TrieDBIterator<'a> { IterStep::PopTrail => { self.trail.pop(); }, - IterStep::Descend(Ok(d)) => { - self.descend_into_node(TrieDB::decode_node(&d).into()) + IterStep::Descend::(Ok(d)) => { + let node = C::decode(&d).expect("encoded data read from db; qed"); + self.descend_into_node(node.into()) }, - IterStep::Descend(Err(e)) => { + IterStep::Descend::(Err(e)) => { return Some(Err(e)) } IterStep::Continue => {}, @@ -395,115 +412,106 @@ impl<'a> Iterator for TrieDBIterator<'a> { } } -#[test] -fn iterator() { - use memorydb::*; - use super::TrieMut; - use super::triedbmut::*; +#[cfg(test)] +mod tests { + use hashdb::DBValue; + use keccak_hasher::KeccakHasher; + use memorydb::MemoryDB; + use ethtrie::{TrieDB, TrieDBMut, RlpCodec, trie::{Trie, TrieMut, Lookup}}; + use ethereum_types::H256; - let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; + #[test] + fn iterator() { + let d = vec![DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B")]; - let mut memdb = MemoryDB::new(); - let mut root = H256::new(); - { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - for x in &d { - t.insert(x, x).unwrap(); + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + for x in &d { + t.insert(x, x).unwrap(); + } } + + let t = TrieDB::new(&memdb, &root).unwrap(); + assert_eq!(d.iter().map(|i| i.clone().into_vec()).collect::>(), t.iter().unwrap().map(|x| x.unwrap().0).collect::>()); + assert_eq!(d, t.iter().unwrap().map(|x| x.unwrap().1).collect::>()); } - let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(d.iter().map(|i| i.clone().into_vec()).collect::>(), t.iter().unwrap().map(|x| x.unwrap().0).collect::>()); - assert_eq!(d, t.iter().unwrap().map(|x| x.unwrap().1).collect::>()); -} - -#[test] -fn iterator_seek() { - use memorydb::*; - use super::TrieMut; - use super::triedbmut::*; - - let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; - - let mut memdb = MemoryDB::new(); - let mut root = H256::new(); - { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - for x in &d { - t.insert(x, x).unwrap(); + #[test] + fn iterator_seek() { + let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; + + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + for x in &d { + t.insert(x, x).unwrap(); + } } + + let t = TrieDB::new(&memdb, &root).unwrap(); + let mut iter = t.iter().unwrap(); + assert_eq!(iter.next().unwrap().unwrap(), (b"A".to_vec(), DBValue::from_slice(b"A"))); + iter.seek(b"!").unwrap(); + assert_eq!(d, iter.map(|x| x.unwrap().1).collect::>()); + let mut iter = t.iter().unwrap(); + iter.seek(b"A").unwrap(); + assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AA").unwrap(); + assert_eq!(&d[2..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"A!").unwrap(); + assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AB").unwrap(); + assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AB!").unwrap(); + assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"B").unwrap(); + assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"C").unwrap(); + assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); } - let t = TrieDB::new(&memdb, &root).unwrap(); - let mut iter = t.iter().unwrap(); - assert_eq!(iter.next(), Some(Ok((b"A".to_vec(), DBValue::from_slice(b"A"))))); - iter.seek(b"!").unwrap(); - assert_eq!(d, iter.map(|x| x.unwrap().1).collect::>()); - let mut iter = t.iter().unwrap(); - iter.seek(b"A").unwrap(); - assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"AA").unwrap(); - assert_eq!(&d[2..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"A!").unwrap(); - assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"AB").unwrap(); - assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"AB!").unwrap(); - assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"B").unwrap(); - assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); - let mut iter = t.iter().unwrap(); - iter.seek(b"C").unwrap(); - assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); -} + #[test] + fn get_len() { + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + t.insert(b"A", b"ABC").unwrap(); + t.insert(b"B", b"ABCBA").unwrap(); + } -#[test] -fn get_len() { - use memorydb::*; - use super::TrieMut; - use super::triedbmut::*; - - let mut memdb = MemoryDB::new(); - let mut root = H256::new(); - { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - t.insert(b"A", b"ABC").unwrap(); - t.insert(b"B", b"ABCBA").unwrap(); + let t = TrieDB::new(&memdb, &root).unwrap(); + assert_eq!(t.get_with(b"A", |x: &[u8]| x.len()).unwrap(), Some(3)); + assert_eq!(t.get_with(b"B", |x: &[u8]| x.len()).unwrap(), Some(5)); + assert_eq!(t.get_with(b"C", |x: &[u8]| x.len()).unwrap(), None); } - let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(t.get_with(b"A", |x: &[u8]| x.len()), Ok(Some(3))); - assert_eq!(t.get_with(b"B", |x: &[u8]| x.len()), Ok(Some(5))); - assert_eq!(t.get_with(b"C", |x: &[u8]| x.len()), Ok(None)); -} + #[test] + fn debug_output_supports_pretty_print() { + let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + let root = { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + for x in &d { + t.insert(x, x).unwrap(); + } + t.root().clone() + }; + let t = TrieDB::new(&memdb, &root).unwrap(); -#[test] -fn debug_output_supports_pretty_print() { - use memorydb::*; - use super::TrieMut; - use super::triedbmut::*; - - let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; - - let mut memdb = MemoryDB::new(); - let mut root = H256::new(); - let root = { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - for x in &d { - t.insert(x, x).unwrap(); - } - t.root().clone() - }; - let t = TrieDB::new(&memdb, &root).unwrap(); - - assert_eq!(format!("{:?}", t), "TrieDB { hash_count: 0, root: Node::Extension { slice: 4, item: Node::Branch { nodes: [Node::Empty, Node::Branch { nodes: [Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Branch { nodes: [Node::Empty, Node::Leaf { slice: , value: [65, 65] }, Node::Leaf { slice: , value: [65, 66] }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: None }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: Some([65]) }, Node::Leaf { slice: , value: [66] }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: None } } }"); - assert_eq!(format!("{:#?}", t), + assert_eq!(format!("{:?}", t), "TrieDB { hash_count: 0, root: Node::Extension { slice: 4, item: Node::Branch { nodes: [Node::Empty, Node::Branch { nodes: [Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Branch { nodes: [Node::Empty, Node::Leaf { slice: , value: [65, 65] }, Node::Leaf { slice: , value: [65, 66] }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: None }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: Some([65]) }, Node::Leaf { slice: , value: [66] }, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty, Node::Empty], value: None } } }"); + assert_eq!(format!("{:#?}", t), "TrieDB { hash_count: 0, root: Node::Extension { @@ -592,29 +600,29 @@ fn debug_output_supports_pretty_print() { } } }"); -} - -#[test] -fn test_lookup_with_corrupt_data_returns_decoder_error() { - use memorydb::*; - use super::TrieMut; - use super::triedbmut::*; - use rlp; - use ethereum_types::H512; - - let mut memdb = MemoryDB::new(); - let mut root = H256::new(); - { - let mut t = TrieDBMut::new(&mut memdb, &mut root); - t.insert(b"A", b"ABC").unwrap(); - t.insert(b"B", b"ABCBA").unwrap(); } - let t = TrieDB::new(&memdb, &root).unwrap(); + #[test] + fn test_lookup_with_corrupt_data_returns_decoder_error() { + use rlp; + use ethereum_types::H512; + use std::marker::PhantomData; + use ethtrie::trie::NibbleSlice; - // query for an invalid data type to trigger an error - let q = rlp::decode::; - let lookup = Lookup{ db: t.db, query: q, hash: root }; - let query_result = lookup.look_up(NibbleSlice::new(b"A")); - assert_eq!(query_result.unwrap().unwrap().unwrap_err(), rlp::DecoderError::RlpIsTooShort); -} + let mut memdb = MemoryDB::::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + t.insert(b"A", b"ABC").unwrap(); + t.insert(b"B", b"ABCBA").unwrap(); + } + + let t = TrieDB::new(&memdb, &root).unwrap(); + + // query for an invalid data type to trigger an error + let q = rlp::decode::; + let lookup = Lookup::<_, RlpCodec, _>{ db: t.db(), query: q, hash: root, marker: PhantomData }; + let query_result = lookup.look_up(NibbleSlice::new(b"A")); + assert_eq!(query_result.unwrap().unwrap().unwrap_err(), rlp::DecoderError::RlpIsTooShort); + } +} \ No newline at end of file diff --git a/util/patricia_trie/src/triedbmut.rs b/util/patricia_trie/src/triedbmut.rs index 994045eb3..f5c28bac3 100644 --- a/util/patricia_trie/src/triedbmut.rs +++ b/util/patricia_trie/src/triedbmut.rs @@ -16,23 +16,21 @@ //! In-memory trie representation. -use super::{TrieError, TrieMut}; +use super::{Result, TrieError, TrieMut}; use super::lookup::Lookup; -use super::node::Node as RlpNode; +use super::node::Node as EncodedNode; +use node_codec::NodeCodec; use super::node::NodeKey; -use hashdb::HashDB; use bytes::ToPretty; +use hashdb::{HashDB, Hasher, DBValue}; use nibbleslice::NibbleSlice; -use rlp::{Rlp, RlpStream}; -use hashdb::DBValue; +use elastic_array::ElasticArray1024; use std::collections::{HashSet, VecDeque}; +use std::marker::PhantomData; use std::mem; use std::ops::Index; -use ethereum_types::H256; -use elastic_array::ElasticArray1024; -use keccak::{KECCAK_NULL_RLP}; // For lookups into the Node storage buffer. // This is deliberately non-copyable. @@ -41,26 +39,20 @@ struct StorageHandle(usize); // Handles to nodes in the trie. #[derive(Debug)] -enum NodeHandle { +enum NodeHandle { /// Loaded into memory. InMemory(StorageHandle), /// Either a hash or an inline node - Hash(H256), + Hash(H::Out), } -impl From for NodeHandle { +impl From for NodeHandle { fn from(handle: StorageHandle) -> Self { NodeHandle::InMemory(handle) } } -impl From for NodeHandle { - fn from(hash: H256) -> Self { - NodeHandle::Hash(hash) - } -} - -fn empty_children() -> Box<[Option; 16]> { +fn empty_children() -> Box<[Option>; 16]> { Box::new([ None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, @@ -69,7 +61,7 @@ fn empty_children() -> Box<[Option; 16]> { /// Node types in the Trie. #[derive(Debug)] -enum Node { +enum Node { /// Empty node. Empty, /// A leaf node contains the end of a key and a value. @@ -80,36 +72,41 @@ enum Node { /// The shared portion is encoded from a `NibbleSlice` meaning it contains /// a flag indicating it is an extension. /// The child node is always a branch. - Extension(NodeKey, NodeHandle), + Extension(NodeKey, NodeHandle), /// A branch has up to 16 children and an optional value. - Branch(Box<[Option; 16]>, Option) + Branch(Box<[Option>; 16]>, Option) } -impl Node { +impl Node { // load an inline node into memory or get the hash to do the lookup later. - fn inline_or_hash(node: &[u8], db: &HashDB, storage: &mut NodeStorage) -> NodeHandle { - RlpNode::try_decode_hash(&node) + fn inline_or_hash(node: &[u8], db: &HashDB, storage: &mut NodeStorage) -> NodeHandle + where C: NodeCodec + { + C::try_decode_hash(&node) .map(NodeHandle::Hash) .unwrap_or_else(|| { - let child = Node::from_rlp(node, db, storage); + let child = Node::from_encoded::(node, db, storage); NodeHandle::InMemory(storage.alloc(Stored::New(child))) }) } - // decode a node from rlp without getting its children. - fn from_rlp(rlp: &[u8], db: &HashDB, storage: &mut NodeStorage) -> Self { - match RlpNode::decoded(rlp).expect("rlp read from db; qed") { - RlpNode::Empty => Node::Empty, - RlpNode::Leaf(k, v) => Node::Leaf(k.encoded(true), DBValue::from_slice(&v)), - RlpNode::Extension(key, cb) => { - Node::Extension(key.encoded(false), Self::inline_or_hash(cb, db, storage)) + // decode a node from encoded bytes without getting its children. + fn from_encoded(data: &[u8], db: &HashDB, storage: &mut NodeStorage) -> Self + where C: NodeCodec + { + match C::decode(data).expect("encoded bytes read from db; qed") { + EncodedNode::Empty => Node::Empty, + EncodedNode::Leaf(k, v) => Node::Leaf(k.encoded(true), DBValue::from_slice(&v)), + EncodedNode::Extension(key, cb) => { + Node::Extension( + key.encoded(false), + Self::inline_or_hash::(cb, db, storage)) } - RlpNode::Branch(ref children_rlp, val) => { - let mut child = |i| { - let raw = children_rlp[i]; - let child_rlp = Rlp::new(raw); - if !child_rlp.is_empty() { - Some(Self::inline_or_hash(raw, db, storage)) + EncodedNode::Branch(ref encoded_children, val) => { + let mut child = |i:usize| { + let raw = encoded_children[i]; + if !C::is_empty_node(raw) { + Some(Self::inline_or_hash::(raw, db, storage)) } else { None } @@ -127,70 +124,51 @@ impl Node { } } - // encode a node to RLP // TODO: parallelize - fn into_rlp(self, mut child_cb: F) -> ElasticArray1024 - where F: FnMut(NodeHandle, &mut RlpStream) + fn into_encoded(self, mut child_cb: F) -> ElasticArray1024 + where + C: NodeCodec, + F: FnMut(NodeHandle) -> ChildReference { match self { - Node::Empty => { - let mut stream = RlpStream::new(); - stream.append_empty_data(); - stream.drain() - } - Node::Leaf(partial, value) => { - let mut stream = RlpStream::new_list(2); - stream.append(&&*partial); - stream.append(&&*value); - stream.drain() - } - Node::Extension(partial, child) => { - let mut stream = RlpStream::new_list(2); - stream.append(&&*partial); - child_cb(child, &mut stream); - stream.drain() - } + Node::Empty => C::empty_node(), + Node::Leaf(partial, value) => C::leaf_node(&partial, &value), + Node::Extension(partial, child) => C::ext_node(&partial, child_cb(child)), Node::Branch(mut children, value) => { - let mut stream = RlpStream::new_list(17); - for child in children.iter_mut().map(Option::take) { - if let Some(handle) = child { - child_cb(handle, &mut stream); - } else { - stream.append_empty_data(); - } - } - if let Some(value) = value { - stream.append(&&*value); - } else { - stream.append_empty_data(); - } - - stream.drain() + C::branch_node( + // map the `NodeHandle`s from the Branch to `ChildReferences` + children.iter_mut() + .map(Option::take) + .map(|maybe_child| + maybe_child.map(|child| child_cb(child)) + ), + value + ) } } } } // post-inspect action. -enum Action { +enum Action { // Replace a node with a new one. - Replace(Node), + Replace(Node), // Restore the original node. This trusts that the node is actually the original. - Restore(Node), + Restore(Node), // if it is a new node, just clears the storage. Delete, } // post-insert action. Same as action without delete -enum InsertAction { +enum InsertAction { // Replace a node with a new one. - Replace(Node), + Replace(Node), // Restore the original node. - Restore(Node), + Restore(Node), } -impl InsertAction { - fn into_action(self) -> Action { +impl InsertAction { + fn into_action(self) -> Action { match self { InsertAction::Replace(n) => Action::Replace(n), InsertAction::Restore(n) => Action::Restore(n), @@ -198,7 +176,7 @@ impl InsertAction { } // unwrap the node, disregarding replace or restore state. - fn unwrap_node(self) -> Node { + fn unwrap_node(self) -> Node { match self { InsertAction::Replace(n) | InsertAction::Restore(n) => n, } @@ -206,20 +184,26 @@ impl InsertAction { } // What kind of node is stored here. -enum Stored { +enum Stored { // A new node. - New(Node), + New(Node), // A cached node, loaded from the DB. - Cached(Node, H256), + Cached(Node, H::Out), +} + +/// Used to build a collection of child nodes from a collection of `NodeHandle`s +pub enum ChildReference { // `HO` is e.g. `H256`, i.e. the output of a `Hasher` + Hash(HO), + Inline(HO, usize), // usize is the length of the node data we store in the `H::Out` } /// Compact and cache-friendly storage for Trie nodes. -struct NodeStorage { - nodes: Vec, +struct NodeStorage { + nodes: Vec>, free_indices: VecDeque, } -impl NodeStorage { +impl NodeStorage { /// Create a new storage. fn empty() -> Self { NodeStorage { @@ -229,7 +213,7 @@ impl NodeStorage { } /// Allocate a new node in the storage. - fn alloc(&mut self, stored: Stored) -> StorageHandle { + fn alloc(&mut self, stored: Stored) -> StorageHandle { if let Some(idx) = self.free_indices.pop_front() { self.nodes[idx] = stored; StorageHandle(idx) @@ -240,7 +224,7 @@ impl NodeStorage { } /// Remove a node from the storage, consuming the handle and returning the node. - fn destroy(&mut self, handle: StorageHandle) -> Stored { + fn destroy(&mut self, handle: StorageHandle) -> Stored { let idx = handle.0; self.free_indices.push_back(idx); @@ -248,10 +232,10 @@ impl NodeStorage { } } -impl<'a> Index<&'a StorageHandle> for NodeStorage { - type Output = Node; +impl<'a, H: Hasher> Index<&'a StorageHandle> for NodeStorage { + type Output = Node; - fn index(&self, handle: &'a StorageHandle) -> &Node { + fn index(&self, handle: &'a StorageHandle) -> &Node { match self.nodes[handle.0] { Stored::New(ref node) => node, Stored::Cached(ref node, _) => node, @@ -268,19 +252,22 @@ impl<'a> Index<&'a StorageHandle> for NodeStorage { /// # Example /// ``` /// extern crate patricia_trie as trie; -/// extern crate keccak_hash; +/// extern crate patricia_trie_ethereum as ethtrie; /// extern crate hashdb; +/// extern crate keccak_hash; +/// extern crate keccak_hasher; /// extern crate memorydb; /// extern crate ethereum_types; /// /// use keccak_hash::KECCAK_NULL_RLP; -/// use trie::*; -/// use hashdb::*; +/// use ethtrie::{TrieDBMut, trie::TrieMut}; +/// use hashdb::DBValue; +/// use keccak_hasher::KeccakHasher; /// use memorydb::*; /// use ethereum_types::H256; /// /// fn main() { -/// let mut memdb = MemoryDB::new(); +/// let mut memdb = MemoryDB::::new(); /// let mut root = H256::new(); /// let mut t = TrieDBMut::new(&mut memdb, &mut root); /// assert!(t.is_empty()); @@ -292,22 +279,31 @@ impl<'a> Index<&'a StorageHandle> for NodeStorage { /// assert!(!t.contains(b"foo").unwrap()); /// } /// ``` -pub struct TrieDBMut<'a> { - storage: NodeStorage, - db: &'a mut HashDB, - root: &'a mut H256, - root_handle: NodeHandle, - death_row: HashSet, +pub struct TrieDBMut<'a, H, C> +where + H: Hasher + 'a, + C: NodeCodec +{ + storage: NodeStorage, + db: &'a mut HashDB, + root: &'a mut H::Out, + root_handle: NodeHandle, + death_row: HashSet, /// The number of hash operations this trie has performed. /// Note that none are performed until changes are committed. hash_count: usize, + marker: PhantomData, // TODO: rpheimer: "we could have the NodeCodec trait take &self to its methods and then we don't need PhantomData. we can just store an instance of C: NodeCodec in the trie struct. If it's a ZST it won't have any additional overhead anyway" } -impl<'a> TrieDBMut<'a> { +impl<'a, H, C> TrieDBMut<'a, H, C> +where + H: Hasher, + C: NodeCodec +{ /// Create a new trie with backing database `db` and empty `root`. - pub fn new(db: &'a mut HashDB, root: &'a mut H256) -> Self { - *root = KECCAK_NULL_RLP; - let root_handle = NodeHandle::Hash(KECCAK_NULL_RLP); + pub fn new(db: &'a mut HashDB, root: &'a mut H::Out) -> Self { + *root = C::HASHED_NULL_NODE; + let root_handle = NodeHandle::Hash(C::HASHED_NULL_NODE); TrieDBMut { storage: NodeStorage::empty(), @@ -316,12 +312,13 @@ impl<'a> TrieDBMut<'a> { root_handle: root_handle, death_row: HashSet::new(), hash_count: 0, + marker: PhantomData, } } /// Create a new trie with the backing database `db` and `root. /// Returns an error if `root` does not exist. - pub fn from_existing(db: &'a mut HashDB, root: &'a mut H256) -> super::Result { + pub fn from_existing(db: &'a mut HashDB, root: &'a mut H::Out) -> Result { if !db.contains(root) { return Err(Box::new(TrieError::InvalidStateRoot(*root))); } @@ -334,29 +331,34 @@ impl<'a> TrieDBMut<'a> { root_handle: root_handle, death_row: HashSet::new(), hash_count: 0, + marker: PhantomData, }) } /// Get the backing database. - pub fn db(&self) -> &HashDB { + pub fn db(&self) -> &HashDB { self.db } /// Get the backing database mutably. - pub fn db_mut(&mut self) -> &mut HashDB { + pub fn db_mut(&mut self) -> &mut HashDB { self.db } // cache a node by hash - fn cache(&mut self, hash: H256) -> super::Result { - let node_rlp = self.db.get(&hash).ok_or_else(|| Box::new(TrieError::IncompleteDatabase(hash)))?; - let node = Node::from_rlp(&node_rlp, &*self.db, &mut self.storage); + fn cache(&mut self, hash: H::Out) -> Result { + let node_encoded = self.db.get(&hash).ok_or_else(|| Box::new(TrieError::IncompleteDatabase(hash)))?; + let node = Node::from_encoded::( + &node_encoded, + &*self.db, + &mut self.storage + ); Ok(self.storage.alloc(Stored::Cached(node, hash))) } // inspect a node, choosing either to replace, restore, or delete it. // if restored or replaced, returns the new node along with a flag of whether it was changed. - fn inspect(&mut self, stored: Stored, inspector: F) -> super::Result> - where F: FnOnce(&mut Self, Node) -> super::Result { + fn inspect(&mut self, stored: Stored, inspector: F) -> Result, bool)>, H::Out, C::Error> + where F: FnOnce(&mut Self, Node) -> Result, H::Out, C::Error> { Ok(match stored { Stored::New(node) => match inspector(self, node)? { Action::Restore(node) => Some((Stored::New(node), false)), @@ -378,16 +380,17 @@ impl<'a> TrieDBMut<'a> { } // walk the trie, attempting to find the key's node. - fn lookup<'x, 'key>(&'x self, mut partial: NibbleSlice<'key>, handle: &NodeHandle) -> super::Result> + fn lookup<'x, 'key>(&'x self, mut partial: NibbleSlice<'key>, handle: &NodeHandle) -> Result, H::Out, C::Error> where 'x: 'key { let mut handle = handle; loop { let (mid, child) = match *handle { - NodeHandle::Hash(ref hash) => return Lookup { + NodeHandle::Hash(ref hash) => return Lookup{ db: &*self.db, query: DBValue::from_slice, hash: hash.clone(), + marker: PhantomData::, }.look_up(partial), NodeHandle::InMemory(ref handle) => match self.storage[handle] { Node::Empty => return Ok(None), @@ -425,10 +428,8 @@ impl<'a> TrieDBMut<'a> { } } - /// insert a key, value pair into the trie, creating new nodes if necessary. - fn insert_at(&mut self, handle: NodeHandle, partial: NibbleSlice, value: DBValue, old_val: &mut Option) - -> super::Result<(StorageHandle, bool)> - { + /// insert a key-value pair into the trie, creating new nodes if necessary. + fn insert_at(&mut self, handle: NodeHandle, partial: NibbleSlice, value: DBValue, old_val: &mut Option) -> Result<(StorageHandle, bool), H::Out, C::Error> { let h = match handle { NodeHandle::InMemory(h) => h, NodeHandle::Hash(h) => self.cache(h)?, @@ -442,9 +443,7 @@ impl<'a> TrieDBMut<'a> { } /// the insertion inspector. - fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: DBValue, old_val: &mut Option) - -> super::Result - { + fn insert_inspector(&mut self, node: Node, partial: NibbleSlice, value: DBValue, old_val: &mut Option) -> Result, H::Out, C::Error> { trace!(target: "trie", "augmented (partial: {:?}, value: {:?})", partial, value.pretty()); Ok(match node { @@ -605,9 +604,7 @@ impl<'a> TrieDBMut<'a> { } /// Remove a node from the trie based on key. - fn remove_at(&mut self, handle: NodeHandle, partial: NibbleSlice, old_val: &mut Option) - -> super::Result> - { + fn remove_at(&mut self, handle: NodeHandle, partial: NibbleSlice, old_val: &mut Option) -> Result, H::Out, C::Error> { let stored = match handle { NodeHandle::InMemory(h) => self.storage.destroy(h), NodeHandle::Hash(h) => { @@ -622,7 +619,7 @@ impl<'a> TrieDBMut<'a> { } /// the removal inspector - fn remove_inspector(&mut self, node: Node, partial: NibbleSlice, old_val: &mut Option) -> super::Result { + fn remove_inspector(&mut self, node: Node, partial: NibbleSlice, old_val: &mut Option) -> Result, H::Out, C::Error> { Ok(match (node, partial.is_empty()) { (Node::Empty, _) => Action::Delete, (Node::Branch(c, None), true) => Action::Restore(Node::Branch(c, None)), @@ -708,7 +705,7 @@ impl<'a> TrieDBMut<'a> { /// _invalid state_ means: /// - Branch node where there is only a single entry; /// - Extension node followed by anything other than a Branch node. - fn fix(&mut self, node: Node) -> super::Result { + fn fix(&mut self, node: Node) -> Result, H::Out, C::Error> { match node { Node::Branch(mut children, value) => { // if only a single value, transmute to leaf/extension and feed through fixed. @@ -828,11 +825,11 @@ impl<'a> TrieDBMut<'a> { match self.storage.destroy(handle) { Stored::New(node) => { - let root_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream)); - *self.root = self.db.insert(&root_rlp[..]); + let encoded_root = node.into_encoded::<_, C>(|child| self.commit_child(child) ); + *self.root = self.db.insert(&encoded_root[..]); self.hash_count += 1; - trace!(target: "trie", "root node rlp: {:?}", (&root_rlp[..]).pretty()); + trace!(target: "trie", "encoded root node: {:?}", (&encoded_root[..]).pretty()); self.root_handle = NodeHandle::Hash(*self.root); } Stored::Cached(node, hash) => { @@ -843,29 +840,38 @@ impl<'a> TrieDBMut<'a> { } } - /// commit a node, hashing it, committing it to the db, - /// and writing it to the rlp stream as necessary. - fn commit_node(&mut self, handle: NodeHandle, stream: &mut RlpStream) { + /// Commit a node by hashing it and writing it to the db. Returns a + /// `ChildReference` which in most cases carries a normal hash but for the + /// case where we can fit the actual data in the `Hasher`s output type, we + /// store the data inline. This function is used as the callback to the + /// `into_encoded` method of `Node`. + fn commit_child(&mut self, handle: NodeHandle) -> ChildReference { match handle { - NodeHandle::Hash(h) => stream.append(&h), - NodeHandle::InMemory(h) => match self.storage.destroy(h) { - Stored::Cached(_, h) => stream.append(&h), - Stored::New(node) => { - let node_rlp = node.into_rlp(|child, stream| self.commit_node(child, stream)); - if node_rlp.len() >= 32 { - let hash = self.db.insert(&node_rlp[..]); - self.hash_count += 1; - stream.append(&hash) - } else { - stream.append_raw(&node_rlp, 1) + NodeHandle::Hash(hash) => ChildReference::Hash(hash), + NodeHandle::InMemory(storage_handle) => { + match self.storage.destroy(storage_handle) { + Stored::Cached(_, hash) => ChildReference::Hash(hash), + Stored::New(node) => { + let encoded = node.into_encoded::<_, C>(|node_handle| self.commit_child(node_handle) ); + if encoded.len() >= H::LENGTH { + let hash = self.db.insert(&encoded[..]); + self.hash_count +=1; + ChildReference::Hash(hash) + } else { + // it's a small value, so we cram it into a `H::Out` and tag with length + let mut h = H::Out::default(); + let len = encoded.len(); + h.as_mut()[..len].copy_from_slice(&encoded[..len]); + ChildReference::Inline(h, len) + } } } } - }; + } } // a hack to get the root node's handle - fn root_handle(&self) -> NodeHandle { + fn root_handle(&self) -> NodeHandle { match self.root_handle { NodeHandle::Hash(h) => NodeHandle::Hash(h), NodeHandle::InMemory(StorageHandle(x)) => NodeHandle::InMemory(StorageHandle(x)), @@ -873,15 +879,19 @@ impl<'a> TrieDBMut<'a> { } } -impl<'a> TrieMut for TrieDBMut<'a> { - fn root(&mut self) -> &H256 { +impl<'a, H, C> TrieMut for TrieDBMut<'a, H, C> +where + H: Hasher, + C: NodeCodec +{ + fn root(&mut self) -> &H::Out { self.commit(); self.root } fn is_empty(&self) -> bool { match self.root_handle { - NodeHandle::Hash(h) => h == KECCAK_NULL_RLP, + NodeHandle::Hash(h) => h == C::HASHED_NULL_NODE, NodeHandle::InMemory(ref h) => match self.storage[h] { Node::Empty => true, _ => false, @@ -889,11 +899,13 @@ impl<'a> TrieMut for TrieDBMut<'a> { } } - fn get<'x, 'key>(&'x self, key: &'key [u8]) -> super::Result> where 'x: 'key { + fn get<'x, 'key>(&'x self, key: &'key [u8]) -> Result, H::Out, C::Error> + where 'x: 'key + { self.lookup(NibbleSlice::new(key), &self.root_handle) } - fn insert(&mut self, key: &[u8], value: &[u8]) -> super::Result> { + fn insert(&mut self, key: &[u8], value: &[u8]) -> Result, H::Out, C::Error> { if value.is_empty() { return self.remove(key) } let mut old_val = None; @@ -914,7 +926,7 @@ impl<'a> TrieMut for TrieDBMut<'a> { Ok(old_val) } - fn remove(&mut self, key: &[u8]) -> super::Result> { + fn remove(&mut self, key: &[u8]) -> Result, H::Out, C::Error> { trace!(target: "trie", "remove: key={:?}", key.pretty()); let root_handle = self.root_handle(); @@ -928,8 +940,8 @@ impl<'a> TrieMut for TrieDBMut<'a> { } None => { trace!(target: "trie", "remove: obliterated trie"); - self.root_handle = NodeHandle::Hash(KECCAK_NULL_RLP); - *self.root = KECCAK_NULL_RLP; + self.root_handle = NodeHandle::Hash(C::HASHED_NULL_NODE); + *self.root = C::HASHED_NULL_NODE; } } @@ -937,7 +949,11 @@ impl<'a> TrieMut for TrieDBMut<'a> { } } -impl<'a> Drop for TrieDBMut<'a> { +impl<'a, H, C> Drop for TrieDBMut<'a, H, C> +where + H: Hasher, + C: NodeCodec +{ fn drop(&mut self) { self.commit(); } @@ -945,18 +961,20 @@ impl<'a> Drop for TrieDBMut<'a> { #[cfg(test)] mod tests { - extern crate triehash; - - use self::triehash::trie_root; - use hashdb::*; - use memorydb::*; - use super::*; use bytes::ToPretty; - use keccak::KECCAK_NULL_RLP; - use super::super::TrieMut; + use hashdb::{DBValue, Hasher, HashDB}; + use keccak_hasher::KeccakHasher; + use memorydb::MemoryDB; + use rlp::{Decodable, Encodable}; + use triehash::trie_root; use standardmap::*; + use ethtrie::{TrieDBMut, RlpCodec, trie::{TrieMut, NodeCodec}}; + use env_logger; + use ethereum_types::H256; - fn populate_trie<'db>(db: &'db mut HashDB, root: &'db mut H256, v: &[(Vec, Vec)]) -> TrieDBMut<'db> { + fn populate_trie<'db, H, C>(db: &'db mut HashDB, root: &'db mut H256, v: &[(Vec, Vec)]) -> TrieDBMut<'db> + where H: Hasher, H::Out: Decodable + Encodable, C: NodeCodec + { let mut t = TrieDBMut::new(db, root); for i in 0..v.len() { let key: &[u8]= &v[i].0; @@ -975,8 +993,7 @@ mod tests { #[test] fn playpen() { - ::ethcore_logger::init_log(); - + env_logger::init(); let mut seed = H256::new(); for test_i in 0..10 { if test_i % 50 == 0 { @@ -991,9 +1008,9 @@ mod tests { }.make_with(&mut seed); let real = trie_root(x.clone()); - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); - let mut memtrie = populate_trie(&mut memdb, &mut root, &x); + let mut memtrie = populate_trie::<_, RlpCodec>(&mut memdb, &mut root, &x); memtrie.commit(); if *memtrie.root() != real { @@ -1007,7 +1024,7 @@ mod tests { assert_eq!(*memtrie.root(), real); unpopulate_trie(&mut memtrie, &x); memtrie.commit(); - if *memtrie.root() != KECCAK_NULL_RLP { + if *memtrie.root() != RlpCodec::HASHED_NULL_NODE { println!("- TRIE MISMATCH"); println!(""); println!("{:?} vs {:?}", memtrie.root(), real); @@ -1015,21 +1032,21 @@ mod tests { println!("{:?} -> {:?}", i.0.pretty(), i.1.pretty()); } } - assert_eq!(*memtrie.root(), KECCAK_NULL_RLP); + assert_eq!(*memtrie.root(), RlpCodec::HASHED_NULL_NODE); } } #[test] fn init() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); - assert_eq!(*t.root(), KECCAK_NULL_RLP); + assert_eq!(*t.root(), RlpCodec::HASHED_NULL_NODE); } #[test] fn insert_on_empty() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1040,14 +1057,15 @@ mod tests { fn remove_to_empty() { let big_value = b"00000000000000000000000000000000"; - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t1 = TrieDBMut::new(&mut memdb, &mut root); t1.insert(&[0x01, 0x23], big_value).unwrap(); t1.insert(&[0x01, 0x34], big_value).unwrap(); - let mut memdb2 = MemoryDB::new(); + let mut memdb2 = MemoryDB::::new(); let mut root2 = H256::new(); let mut t2 = TrieDBMut::new(&mut memdb2, &mut root2); + t2.insert(&[0x01], big_value).unwrap(); t2.insert(&[0x01, 0x23], big_value).unwrap(); t2.insert(&[0x01, 0x34], big_value).unwrap(); @@ -1056,7 +1074,7 @@ mod tests { #[test] fn insert_replace_root() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1066,7 +1084,7 @@ mod tests { #[test] fn insert_make_branch_root() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1079,7 +1097,7 @@ mod tests { #[test] fn insert_into_branch_root() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1094,7 +1112,7 @@ mod tests { #[test] fn insert_value_into_branch_root() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1107,7 +1125,7 @@ mod tests { #[test] fn insert_split_leaf() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1120,7 +1138,7 @@ mod tests { #[test] fn insert_split_extenstion() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01, 0x23, 0x45], &[0x01]).unwrap(); @@ -1138,7 +1156,7 @@ mod tests { let big_value0 = b"00000000000000000000000000000000"; let big_value1 = b"11111111111111111111111111111111"; - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], big_value0).unwrap(); @@ -1153,7 +1171,7 @@ mod tests { fn insert_duplicate_value() { let big_value = b"00000000000000000000000000000000"; - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], big_value).unwrap(); @@ -1166,15 +1184,15 @@ mod tests { #[test] fn test_at_empty() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let t = TrieDBMut::new(&mut memdb, &mut root); - assert_eq!(t.get(&[0x5]), Ok(None)); + assert_eq!(t.get(&[0x5]).unwrap(), None); } #[test] fn test_at_one() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1185,7 +1203,7 @@ mod tests { #[test] fn test_at_three() { - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut memdb, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); @@ -1194,12 +1212,12 @@ mod tests { assert_eq!(t.get(&[0x01, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); assert_eq!(t.get(&[0xf1, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0xf1u8, 0x23])); assert_eq!(t.get(&[0x81, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x81u8, 0x23])); - assert_eq!(t.get(&[0x82, 0x23]), Ok(None)); + assert_eq!(t.get(&[0x82, 0x23]).unwrap(), None); t.commit(); assert_eq!(t.get(&[0x01, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x01u8, 0x23])); assert_eq!(t.get(&[0xf1, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0xf1u8, 0x23])); assert_eq!(t.get(&[0x81, 0x23]).unwrap().unwrap(), DBValue::from_slice(&[0x81u8, 0x23])); - assert_eq!(t.get(&[0x82, 0x23]), Ok(None)); + assert_eq!(t.get(&[0x82, 0x23]).unwrap(), None); } #[test] @@ -1215,14 +1233,14 @@ mod tests { }.make_with(&mut seed); let real = trie_root(x.clone()); - let mut memdb = MemoryDB::new(); + let mut memdb = MemoryDB::::new(); let mut root = H256::new(); - let mut memtrie = populate_trie(&mut memdb, &mut root, &x); + let mut memtrie = populate_trie::<_, RlpCodec>(&mut memdb, &mut root, &x); let mut y = x.clone(); y.sort_by(|ref a, ref b| a.0.cmp(&b.0)); - let mut memdb2 = MemoryDB::new(); + let mut memdb2 = MemoryDB::::new(); let mut root2 = H256::new(); - let mut memtrie_sorted = populate_trie(&mut memdb2, &mut root2, &y); + let mut memtrie_sorted = populate_trie::<_, RlpCodec>(&mut memdb2, &mut root2, &y); if *memtrie.root() != real || *memtrie_sorted.root() != real { println!("TRIE MISMATCH"); println!(""); @@ -1242,15 +1260,15 @@ mod tests { #[test] fn test_trie_existing() { + let mut db = MemoryDB::::new(); let mut root = H256::new(); - let mut db = MemoryDB::new(); { let mut t = TrieDBMut::new(&mut db, &mut root); t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]).unwrap(); } { - let _ = TrieDBMut::from_existing(&mut db, &mut root); + let _ = TrieDBMut::from_existing(&mut db, &mut root); } } @@ -1265,7 +1283,7 @@ mod tests { count: 4, }.make_with(&mut seed); - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut db, &mut root); for &(ref key, ref value) in &x { @@ -1279,7 +1297,7 @@ mod tests { } assert!(t.is_empty()); - assert_eq!(*t.root(), KECCAK_NULL_RLP); + assert_eq!(*t.root(), RlpCodec::HASHED_NULL_NODE); } #[test] @@ -1293,7 +1311,7 @@ mod tests { count: 4, }.make_with(&mut seed); - let mut db = MemoryDB::new(); + let mut db = MemoryDB::::new(); let mut root = H256::new(); let mut t = TrieDBMut::new(&mut db, &mut root); for &(ref key, ref value) in &x { diff --git a/util/plain_hasher/Cargo.toml b/util/plain_hasher/Cargo.toml index d909f8f9d..9b2cd5503 100644 --- a/util/plain_hasher/Cargo.toml +++ b/util/plain_hasher/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "plain_hasher" description = "Hasher for 32-bit keys." -version = "0.1.0" +version = "0.2.0" authors = ["Parity Technologies "] license = "MIT" keywords = ["hash", "hasher"] @@ -10,3 +10,4 @@ homepage = "https://github.com/paritytech/plain_hasher" [dependencies] crunchy = "0.1.6" ethereum-types = "0.3" +hashdb = { version = "0.2.0", path = "../hashdb" } \ No newline at end of file diff --git a/util/plain_hasher/src/lib.rs b/util/plain_hasher/src/lib.rs index 74e2225dc..4a8a10441 100644 --- a/util/plain_hasher/src/lib.rs +++ b/util/plain_hasher/src/lib.rs @@ -17,11 +17,12 @@ #[macro_use] extern crate crunchy; extern crate ethereum_types; +extern crate hashdb; use ethereum_types::H256; -use std::collections::{HashMap, HashSet}; +// use hashdb::Hasher; use std::hash; - +use std::collections::{HashMap, HashSet}; /// Specialized version of `HashMap` with H256 keys and fast hashing function. pub type H256FastMap = HashMap>; /// Specialized version of `HashSet` with H256 keys and fast hashing function. diff --git a/util/rlp/benches/rlp.rs b/util/rlp/benches/rlp.rs index 6aeabaf5d..7f2e9f645 100644 --- a/util/rlp/benches/rlp.rs +++ b/util/rlp/benches/rlp.rs @@ -14,13 +14,13 @@ #![feature(test)] -extern crate test; -extern crate ethcore_bigint as bigint; +extern crate ethereum_types; extern crate rlp; +extern crate test; -use test::Bencher; -use bigint::prelude::U256; +use ethereum_types::U256; use rlp::{RlpStream, Rlp}; +use test::Bencher; #[bench] fn bench_stream_u64_value(b: &mut Bencher) { @@ -38,7 +38,7 @@ fn bench_decode_u64_value(b: &mut Bencher) { // u64 let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; let rlp = Rlp::new(&data); - let _: u64 = rlp.as_val(); + let _: u64 = rlp.as_val().unwrap(); }); } @@ -61,7 +61,7 @@ fn bench_decode_u256_value(b: &mut Bencher) { 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0]; let rlp = Rlp::new(&data); - let _ : U256 = rlp.as_val(); + let _ : U256 = rlp.as_val().unwrap(); }); } @@ -83,11 +83,11 @@ fn bench_decode_nested_empty_lists(b: &mut Bencher) { // [ [], [[]], [ [], [[]] ] ] let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0]; let rlp = Rlp::new(&data); - let _v0: Vec = rlp.at(0).as_list(); - let _v1: Vec = rlp.at(1).at(0).as_list(); - let nested_rlp = rlp.at(2); - let _v2a: Vec = nested_rlp.at(0).as_list(); - let _v2b: Vec = nested_rlp.at(1).at(0).as_list(); + let _v0: Vec = rlp.at(0).unwrap().as_list().unwrap(); + let _v1: Vec = rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); + let nested_rlp = rlp.at(2).unwrap(); + let _v2a: Vec = nested_rlp.at(0).unwrap().as_list().unwrap(); + let _v2b: Vec = nested_rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); }); } diff --git a/util/rlp/src/lib.rs b/util/rlp/src/lib.rs index b416b1c25..08c36522d 100644 --- a/util/rlp/src/lib.rs +++ b/util/rlp/src/lib.rs @@ -43,8 +43,8 @@ mod rlpin; mod stream; mod impls; -use std::borrow::Borrow; use elastic_array::ElasticArray1024; +use std::borrow::Borrow; pub use error::DecoderError; pub use traits::{Decodable, Encodable}; diff --git a/util/rlp/src/stream.rs b/util/rlp/src/stream.rs index 550ede039..13ccddaa7 100644 --- a/util/rlp/src/stream.rs +++ b/util/rlp/src/stream.rs @@ -58,6 +58,50 @@ impl RlpStream { stream } + /// Apends null to the end of stream, chainable. + /// + /// ```rust + /// extern crate rlp; + /// use rlp::*; + /// + /// fn main () { + /// let mut stream = RlpStream::new_list(2); + /// stream.append_empty_data().append_empty_data(); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); + /// } + /// ``` + pub fn append_empty_data(&mut self) -> &mut Self { + // self push raw item + self.buffer.push(0x80); + + // try to finish and prepend the length + self.note_appended(1); + + // return chainable self + self + } + + /// Drain the object and return the underlying ElasticArray. Panics if it is not finished. + pub fn drain(self) -> ElasticArray1024 { + match self.is_finished() { + true => self.buffer, + false => panic!() + } + } + + /// Appends raw (pre-serialised) RLP data. Use with caution. Chainable. + pub fn append_raw<'a>(&'a mut self, bytes: &[u8], item_count: usize) -> &'a mut Self { + // push raw items + self.buffer.append_slice(bytes); + + // try to finish and prepend the length + self.note_appended(item_count); + + // return chainable self + self + } + /// Appends value to the end of stream, chainable. /// /// ```rust @@ -145,42 +189,6 @@ impl RlpStream { self } - /// Apends null to the end of stream, chainable. - /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append_empty_data().append_empty_data(); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); - /// } - /// ``` - pub fn append_empty_data(&mut self) -> &mut RlpStream { - // self push raw item - self.buffer.push(0x80); - - // try to finish and prepend the length - self.note_appended(1); - - // return chainable self - self - } - - /// Appends raw (pre-serialised) RLP data. Use with caution. Chainable. - pub fn append_raw<'a>(&'a mut self, bytes: &[u8], item_count: usize) -> &'a mut RlpStream { - // push raw items - self.buffer.append_slice(bytes); - - // try to finish and prepend the length - self.note_appended(item_count); - - // return chainable self - self - } - /// Appends raw (pre-serialised) RLP data. Checks for size oveflow. pub fn append_raw_checked<'a>(&'a mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool { if self.estimate_size(bytes.len()) > max_size { @@ -215,7 +223,7 @@ impl RlpStream { /// ```rust /// extern crate rlp; /// use rlp::*; - /// + /// /// fn main () { /// let mut stream = RlpStream::new_list(3); /// stream.append(&"cat"); @@ -300,14 +308,6 @@ impl RlpStream { BasicEncoder::new(self) } - /// Drain the object and return the underlying ElasticArray. - pub fn drain(self) -> ElasticArray1024 { - match self.is_finished() { - true => self.buffer, - false => panic!() - } - } - /// Finalize current ubnbound list. Panics if no unbounded list has been opened. pub fn complete_unbounded_list(&mut self) { let list = self.unfinished_lists.pop().expect("No open list."); diff --git a/util/rlp/tests/tests.rs b/util/rlp/tests/tests.rs index 041c26766..7aa2920c6 100644 --- a/util/rlp/tests/tests.rs +++ b/util/rlp/tests/tests.rs @@ -423,4 +423,3 @@ fn test_rlp_stream_unbounded_list() { stream.complete_unbounded_list(); assert!(stream.is_finished()); } -