Merge remote-tracking branch 'util/master' into gav

This commit is contained in:
Gav Wood 2016-01-17 13:05:18 +01:00
commit 9b87bae322
88 changed files with 13538 additions and 0 deletions

4
.gitignore vendored
View File

@ -20,3 +20,7 @@ Cargo.lock
# gdb files
.gdb_history
/json-tests/target/

33
.travis.yml Normal file
View File

@ -0,0 +1,33 @@
language: rust
rust:
- beta
os:
#- linux
- osx
before_script:
# g++4.8 for C++11 which is required by rocksdb
#- if [ $TRAVIS_OS_NAME == "linux" ]; then
#sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
#sudo apt-get update -y -qq
#sudo apt-get install -qq --yes --force-yes g++-4.8
#sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
#wget https://github.com/facebook/rocksdb/archive/rocksdb-3.13.tar.gz
#tar xvf rocksdb-3.13.tar.gz && cd rocksdb-rocksdb-3.13 && make shared_lib
#sudo make install
#cd ..
#else
- brew update
- brew install rocksdb
#fi
after_success:
#- if [ $TRAVIS_OS_NAME == "linux" ]; then
#sudo apt-get install libcurl4-openssl-dev libelf-dev libdw-dev
#wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz
#tar xzf master.tar.gz && mkdir kcov-master/build && cd kcov-master/build && cmake .. && make && sudo make install && cd ../.. &&
#kcov --coveralls-id=$TRAVIS_JOB_ID --exclude-pattern=/.cargo target/kcov target/debug/ethcore_util-*
#fi

32
Cargo.toml Normal file
View File

@ -0,0 +1,32 @@
[package]
description = "Ethcore utility library"
homepage = "http://ethcore.io"
license = "GPL-3.0"
name = "ethcore-util"
version = "0.1.0"
authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs"
[build-dependencies]
gcc = "0.3"
[dependencies]
log = "0.3"
env_logger = "0.3"
rustc-serialize = "0.3"
arrayvec = "0.3"
mio = "0.5.0"
rand = "0.3.12"
time = "0.1.34"
tiny-keccak = "1.0"
rocksdb = "0.3"
lazy_static = "0.1"
eth-secp256k1 = { git = "https://github.com/arkpar/rust-secp256k1.git" }
rust-crypto = "0.2.34"
elastic-array = "0.4"
heapsize = "0.2"
itertools = "0.4"
slab = { git = "https://github.com/arkpar/slab.git" }
[dev-dependencies]
json-tests = { path = "json-tests" }

675
LICENSE Normal file
View File

@ -0,0 +1,675 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{one line to give the program's name and a brief idea of what it does.}
Copyright (C) {year} {name of author}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
{project} Copyright (C) {year} {fullname}
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

12
Makefile Normal file
View File

@ -0,0 +1,12 @@
# Makefile for cross-compilation
IOS_ARCHS = i386-apple-ios x86_64-apple-ios armv7-apple-ios armv7s-apple-ios aarch64-apple-ios
IOS_LIB = libethcore_util.a
ios: $(IOS_LIB)
.PHONY: $(IOS_ARCHS)
$(IOS_ARCHS): %:
multirust run ios cargo build --target $@
$(IOS_LIB): $(IOS_ARCHS)
lipo -create -output $@ $(foreach arch,$(IOS_ARCHS),$(wildcard target/$(arch)/debug/$(IOS_LIB)))

1
README.md Normal file
View File

@ -0,0 +1 @@
# ethcore-util

96
benches/rlp.rs Normal file
View File

@ -0,0 +1,96 @@
//! benchmarking for rlp
//! should be started with:
//! ```bash
//! multirust run nightly cargo bench
//! ```
#![feature(test)]
extern crate test;
extern crate ethcore_util;
use test::Bencher;
use std::str::FromStr;
use ethcore_util::rlp::*;
use ethcore_util::uint::U256;
#[bench]
fn bench_stream_u64_value(b: &mut Bencher) {
b.iter(|| {
// u64
let mut stream = RlpStream::new();
stream.append(&0x1023456789abcdefu64);
let _ = stream.out();
});
}
#[bench]
fn bench_decode_u64_value(b: &mut Bencher) {
b.iter(|| {
// u64
let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
let rlp = Rlp::new(&data);
let _: u64 = rlp.as_val();
});
}
#[bench]
fn bench_stream_u256_value(b: &mut Bencher) {
b.iter(|| {
// u256
let mut stream = RlpStream::new();
stream.append(&U256::from_str("8090a0b0c0d0e0f009102030405060770000000000000001000000000\
00012f0")
.unwrap());
let _ = stream.out();
});
}
#[bench]
fn bench_decode_u256_value(b: &mut Bencher) {
b.iter(|| {
// u256
let data = vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20,
0x30, 0x40, 0x50, 0x60, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0];
let rlp = Rlp::new(&data);
let _ : U256 = rlp.as_val();
});
}
#[bench]
fn bench_stream_nested_empty_lists(b: &mut Bencher) {
b.iter(|| {
// [ [], [[]], [ [], [[]] ] ]
let mut stream = RlpStream::new_list(3);
stream.append_list(0);
stream.append_list(1).append_list(0);
stream.append_list(2).append_list(0).append_list(1).append_list(0);
let _ = stream.out();
});
}
#[bench]
fn bench_decode_nested_empty_lists(b: &mut Bencher) {
b.iter(|| {
// [ [], [[]], [ [], [[]] ] ]
let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0];
let rlp = Rlp::new(&data);
let _v0: Vec<u16> = rlp.val_at(0);
let _v1: Vec<Vec<u16>> = rlp.val_at(1);
let nested_rlp = rlp.at(2);
let _v2a: Vec<u16> = nested_rlp.val_at(0);
let _v2b: Vec<Vec<u16>> = nested_rlp.val_at(1);
});
}
#[bench]
fn bench_stream_1000_empty_lists(b: &mut Bencher) {
b.iter(|| {
let mut stream = RlpStream::new_list(1000);
for _ in 0..1000 {
stream.append_list(0);
}
let _ = stream.out();
});
}

198
benches/trie.rs Normal file
View File

@ -0,0 +1,198 @@
#![feature(test)]
extern crate test;
extern crate rand;
extern crate ethcore_util;
#[macro_use]
extern crate log;
use test::Bencher;
use ethcore_util::hash::*;
use ethcore_util::bytes::*;
use ethcore_util::trie::*;
use ethcore_util::hashdb::*;
use ethcore_util::memorydb::*;
use ethcore_util::triehash::*;
use ethcore_util::sha3::*;
fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + diff_count <= 32);
*seed = seed.sha3();
let r = min_count + (seed.bytes()[31] as usize % (diff_count + 1));
let mut ret: Vec<u8> = Vec::with_capacity(r);
for i in 0..r {
ret.push(alphabet[seed.bytes()[i] as usize % alphabet.len()]);
}
ret
}
fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + diff_count <= 32);
*seed = seed.sha3();
let r = min_count + (seed.bytes()[31] as usize % (diff_count + 1));
seed.bytes()[0..r].to_vec()
}
fn random_value(seed: &mut H256) -> Bytes {
*seed = seed.sha3();
match seed.bytes()[0] % 2 {
1 => vec![seed.bytes()[31];1],
_ => seed.bytes().to_vec(),
}
}
#[bench]
fn trie_insertions_six_high(b: &mut Bencher) {
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_bytes(6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
})
}
#[bench]
fn triehash_insertions_six_high(b: &mut Bencher) {
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_bytes(6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(&||{
trie_root(d.clone());
})
}
#[bench]
fn trie_insertions_six_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
debug!("hash_count={:?}", t.hash_count);
})
}
#[bench]
fn triehash_insertions_six_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
trie_root(d.clone());
})
}
#[bench]
fn trie_insertions_random_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 1, 5, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
})
}
#[bench]
fn triehash_insertions_random_mid(b: &mut Bencher) {
let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 1, 5, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
trie_root(d.clone());
})
}
#[bench]
fn trie_insertions_six_low(b: &mut Bencher) {
let alphabet = b"abcdef";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
})
}
#[bench]
fn triehash_insertions_six_low(b: &mut Bencher) {
let alphabet = b"abcdef";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..1000 {
let k = random_word(alphabet, 6, 0, &mut seed);
let v = random_value(&mut seed);
d.push((k, v))
}
b.iter(||{
trie_root(d.clone());
})
}
#[bench]
fn sha3x10000(b: &mut Bencher) {
b.iter(||{
let mut seed = H256::new();
for _ in 0..10000 {
seed = seed.sha3()
}
})
}

10
build.rs Normal file
View File

@ -0,0 +1,10 @@
// build.rs
// Bring in a dependency on an externally maintained `gcc` package which manages
// invoking the C compiler.
extern crate gcc;
fn main() {
gcc::compile_library("libtinykeccak.a", &["src/tinykeccak.c"]);
}

8
json-tests/Cargo.toml Normal file
View File

@ -0,0 +1,8 @@
[package]
name = "json-tests"
version = "0.1.0"
authors = ["debris <marek.kotewicz@gmail.com>"]
[dependencies]
rustc-serialize = "0.3"
glob = "*"

15
json-tests/README.md Normal file
View File

@ -0,0 +1,15 @@
# How to write json test file?
Cause it's very hard to write generic json test files, each subdirectory should follow its own
convention. BUT all json files `within` same directory should be consistent.
### Test files should always contain a single test with input and output.
```json
{
input: ...,
output: ...
}
```
As a reference, please use trietests.

BIN
json-tests/json/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,39 @@
# Rlp tests guideline
Rlp can be tested in various ways. It can encode/decode a value or an array of values. Let's start with encoding.
Each operation must have field:
- `operation` - `append`, `append_list`, `append_empty` or `append_raw`
Additionally `append` and `append_raw` must additionally define a `value` field:
- `value` - data
Also `append_raw` and `append_list` requires `len` field
- `len` - integer
### Encoding Test Example
```json
{
"input":
[
{
"operation": "append_list",
"len": 2
},
{
"operation": "append",
"value": "cat"
},
{
"operation": "append",
"value": "dog"
}
],
"output": "0xc88363617183646f67"
}
```

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "\u0000"
}
],
"output": "0x00"
}

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "\u0001"
}
],
"output": "0x01"
}

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "\u007f"
}
],
"output": "0x7f"
}

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "\u0000"
}
],
"output": "0x00"
}

View File

@ -0,0 +1,9 @@
{
"input":
[
{
"operation": "append_empty"
}
],
"output": "0x80"
}

View File

@ -0,0 +1,38 @@
{
"input":
[
{
"operation": "append_list",
"len": 3
},
{
"operation": "append_list",
"len": 0
},
{
"operation": "append_list",
"len": 1
},
{
"operation": "append_list",
"len": 0
},
{
"operation": "append_list",
"len": 2
},
{
"operation": "append_list",
"len": 0
},
{
"operation": "append_list",
"len": 1
},
{
"operation": "append_list",
"len": 0
}
],
"output": "0xc7c0c1c0c3c0c1c0"
}

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "0x0400"
}
],
"output": "0x820400"
}

View File

@ -0,0 +1,22 @@
{
"input":
[
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": ""
},
{
"operation": "append",
"value": ""
},
{
"operation": "append",
"value": ""
}
],
"output": "0xc3808080"
}

View File

@ -0,0 +1,19 @@
{
"input":
[
{
"operation": "append_list",
"len": 3
},
{
"operation": "append_empty"
},
{
"operation": "append_empty"
},
{
"operation": "append_empty"
}
],
"output": "0xc3808080"
}

View File

@ -0,0 +1,521 @@
{
"input": [
{
"operation": "append_list",
"len": 32
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
},
{
"operation": "append_list",
"len": 3
},
{
"operation": "append",
"value": "asdf"
},
{
"operation": "append",
"value": "qwer"
},
{
"operation": "append",
"value": "zxcv"
}],
"output": "0xf90200cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376cf84617364668471776572847a786376"
}

View File

@ -0,0 +1,10 @@
{
"input":
[
{
"operation": "append",
"value": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur mauris magna, suscipit sed vehicula non, iaculis faucibus tortor. Proin suscipit ultricies malesuada. Duis tortor elit, dictum quis tristique eu, ultrices at risus. Morbi a est imperdiet mi ullamcorper aliquet suscipit nec lorem. Aenean quis leo mollis, vulputate elit varius, consequat enim. Nulla ultrices turpis justo, et posuere urna consectetur nec. Proin non convallis metus. Donec tempor ipsum in mauris congue sollicitudin. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse convallis sem vel massa faucibus, eget lacinia lacus tempor. Nulla quis ultricies purus. Proin auctor rhoncus nibh condimentum mollis. Aliquam consequat enim at metus luctus, a eleifend purus egestas. Curabitur at nibh metus. Nam bibendum, neque at auctor tristique, lorem libero aliquet arcu, non interdum tellus lectus sit amet eros. Cras rhoncus, metus ac ornare cursus, dolor justo ultrices metus, at ullamcorper volutpat"
}
],
"output": "0xb904004c6f72656d20697073756d20646f6c6f722073697420616d65742c20636f6e73656374657475722061646970697363696e6720656c69742e20437572616269747572206d6175726973206d61676e612c20737573636970697420736564207665686963756c61206e6f6e2c20696163756c697320666175636962757320746f72746f722e2050726f696e20737573636970697420756c74726963696573206d616c6573756164612e204475697320746f72746f7220656c69742c2064696374756d2071756973207472697374697175652065752c20756c7472696365732061742072697375732e204d6f72626920612065737420696d70657264696574206d6920756c6c616d636f7270657220616c6971756574207375736369706974206e6563206c6f72656d2e2041656e65616e2071756973206c656f206d6f6c6c69732c2076756c70757461746520656c6974207661726975732c20636f6e73657175617420656e696d2e204e756c6c6120756c74726963657320747572706973206a7573746f2c20657420706f73756572652075726e6120636f6e7365637465747572206e65632e2050726f696e206e6f6e20636f6e76616c6c6973206d657475732e20446f6e65632074656d706f7220697073756d20696e206d617572697320636f6e67756520736f6c6c696369747564696e2e20566573746962756c756d20616e746520697073756d207072696d697320696e206661756369627573206f726369206c756374757320657420756c74726963657320706f737565726520637562696c69612043757261653b2053757370656e646973736520636f6e76616c6c69732073656d2076656c206d617373612066617563696275732c2065676574206c6163696e6961206c616375732074656d706f722e204e756c6c61207175697320756c747269636965732070757275732e2050726f696e20617563746f722072686f6e637573206e69626820636f6e64696d656e74756d206d6f6c6c69732e20416c697175616d20636f6e73657175617420656e696d206174206d65747573206c75637475732c206120656c656966656e6420707572757320656765737461732e20437572616269747572206174206e696268206d657475732e204e616d20626962656e64756d2c206e6571756520617420617563746f72207472697374697175652c206c6f72656d206c696265726f20616c697175657420617263752c206e6f6e20696e74657264756d2074656c6c7573206c65637475732073697420616d65742065726f732e20437261732072686f6e6375732c206d65747573206163206f726e617265206375727375732c20646f6c6f72206a7573746f20756c747269636573206d657475732c20617420756c6c616d636f7270657220766f6c7574706174"
}

View File

@ -0,0 +1,35 @@
# Trie tests guideline
Trie test input is an array of operations. Each operation must have 2 fields:
- `operation` - string, either `insert` or `remove`
- `key` - string, or hex value prefixed with `0x`
And optional field:
- `value`- which is used by `insert` operation
### Example
```json
{
"input":
[
{
"operation": "insert",
"key": "world",
"value": "hello"
},
{
"operation": "insert",
"key": "0x1234",
"value": "ooooops"
},
{
"operation": "remove",
"key": "0x1234"
}
],
"output": "0x5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84"
}
```

View File

@ -0,0 +1,11 @@
{
"input":
[
{
"operation": "insert",
"key": "A",
"value": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"output": "0xd23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab"
}

View File

@ -0,0 +1,229 @@
{
"input": [
{
"operation": "insert",
"key": "0x04110d816c380812a427968ece99b1c963dfbce6",
"value": "something"
},
{
"operation": "insert",
"key": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
"value": "something"
},
{
"operation": "insert",
"key": "0x0a517d755cebbf66312b30fff713666a9cb917e0",
"value": "something"
},
{
"operation": "insert",
"key": "0x24dd378f51adc67a50e339e8031fe9bd4aafab36",
"value": "something"
},
{
"operation": "insert",
"key": "0x293f982d000532a7861ab122bdc4bbfd26bf9030",
"value": "something"
},
{
"operation": "insert",
"key": "0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5",
"value": "something"
},
{
"operation": "insert",
"key": "0x31c640b92c21a1f1465c91070b4b3b4d6854195f",
"value": "something"
},
{
"operation": "insert",
"key": "0x37f998764813b136ddf5a754f34063fd03065e36",
"value": "something"
},
{
"operation": "insert",
"key": "0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a",
"value": "something"
},
{
"operation": "insert",
"key": "0x4f36659fa632310b6ec438dea4085b522a2dd077",
"value": "something"
},
{
"operation": "insert",
"key": "0x62c01474f089b07dae603491675dc5b5748f7049",
"value": "something"
},
{
"operation": "insert",
"key": "0x729af7294be595a0efd7d891c9e51f89c07950c7",
"value": "something"
},
{
"operation": "insert",
"key": "0x83e3e5a16d3b696a0314b30b2534804dd5e11197",
"value": "something"
},
{
"operation": "insert",
"key": "0x8703df2417e0d7c59d063caa9583cb10a4d20532",
"value": "something"
},
{
"operation": "insert",
"key": "0x8dffcd74e5b5923512916c6a64b502689cfa65e1",
"value": "something"
},
{
"operation": "insert",
"key": "0x95a4d7cccb5204733874fa87285a176fe1e9e240",
"value": "something"
},
{
"operation": "insert",
"key": "0x99b2fcba8120bedd048fe79f5262a6690ed38c39",
"value": "something"
},
{
"operation": "insert",
"key": "0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf",
"value": "something"
},
{
"operation": "insert",
"key": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"value": "something"
},
{
"operation": "insert",
"key": "0xa9647f4a0a14042d91dc33c0328030a7157c93ae",
"value": "something"
},
{
"operation": "insert",
"key": "0xaa6cffe5185732689c18f37a7f86170cb7304c2a",
"value": "something"
},
{
"operation": "insert",
"key": "0xaae4a2e3c51c04606dcb3723456e58f3ed214f45",
"value": "something"
},
{
"operation": "insert",
"key": "0xc37a43e940dfb5baf581a0b82b351d48305fc885",
"value": "something"
},
{
"operation": "insert",
"key": "0xd2571607e241ecf590ed94b12d87c94babe36db6",
"value": "something"
},
{
"operation": "insert",
"key": "0xf735071cbee190d76b704ce68384fc21e389fbe7",
"value": "something"
},
{
"operation": "remove",
"key": "0x04110d816c380812a427968ece99b1c963dfbce6"
},
{
"operation": "remove",
"key": "0x095e7baea6a6c7c4c2dfeb977efac326af552d87"
},
{
"operation": "remove",
"key": "0x0a517d755cebbf66312b30fff713666a9cb917e0"
},
{
"operation": "remove",
"key": "0x24dd378f51adc67a50e339e8031fe9bd4aafab36"
},
{
"operation": "remove",
"key": "0x293f982d000532a7861ab122bdc4bbfd26bf9030"
},
{
"operation": "remove",
"key": "0x2cf5732f017b0cf1b1f13a1478e10239716bf6b5"
},
{
"operation": "remove",
"key": "0x31c640b92c21a1f1465c91070b4b3b4d6854195f"
},
{
"operation": "remove",
"key": "0x37f998764813b136ddf5a754f34063fd03065e36"
},
{
"operation": "remove",
"key": "0x37fa399a749c121f8a15ce77e3d9f9bec8020d7a"
},
{
"operation": "remove",
"key": "0x4f36659fa632310b6ec438dea4085b522a2dd077"
},
{
"operation": "remove",
"key": "0x62c01474f089b07dae603491675dc5b5748f7049"
},
{
"operation": "remove",
"key": "0x729af7294be595a0efd7d891c9e51f89c07950c7"
},
{
"operation": "remove",
"key": "0x83e3e5a16d3b696a0314b30b2534804dd5e11197"
},
{
"operation": "remove",
"key": "0x8703df2417e0d7c59d063caa9583cb10a4d20532"
},
{
"operation": "remove",
"key": "0x8dffcd74e5b5923512916c6a64b502689cfa65e1"
},
{
"operation": "remove",
"key": "0x95a4d7cccb5204733874fa87285a176fe1e9e240"
},
{
"operation": "remove",
"key": "0x99b2fcba8120bedd048fe79f5262a6690ed38c39"
},
{
"operation": "remove",
"key": "0xa4202b8b8afd5354e3e40a219bdc17f6001bf2cf"
},
{
"operation": "remove",
"key": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"
},
{
"operation": "remove",
"key": "0xa9647f4a0a14042d91dc33c0328030a7157c93ae"
},
{
"operation": "remove",
"key": "0xaa6cffe5185732689c18f37a7f86170cb7304c2a"
},
{
"operation": "remove",
"key": "0xaae4a2e3c51c04606dcb3723456e58f3ed214f45"
},
{
"operation": "remove",
"key": "0xc37a43e940dfb5baf581a0b82b351d48305fc885"
},
{
"operation": "remove",
"key": "0xd2571607e241ecf590ed94b12d87c94babe36db6"
},
{
"operation": "remove",
"key": "0xf735071cbee190d76b704ce68384fc21e389fbe7"
}],
"output": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
}

View File

@ -0,0 +1,21 @@
{
"input":
[
{
"operation": "insert",
"key": "doe",
"value": "reindeer"
},
{
"operation": "insert",
"key": "dogglesworth",
"value": "cat"
},
{
"operation": "insert",
"key": "dog",
"value": "puppy"
}
],
"output": "0x8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3"
}

View File

@ -0,0 +1,4 @@
{
"input": [],
"output": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
}

View File

@ -0,0 +1,44 @@
{
"input":
[
{
"operation": "insert",
"key": "do",
"value": "verb"
},
{
"operation": "insert",
"key": "ether",
"value": "wookiedoo"
},
{
"operation": "insert",
"key": "horse",
"value": "stallion"
},
{
"operation": "insert",
"key": "shaman",
"value": "horse"
},
{
"operation": "insert",
"key": "doge",
"value": "coin"
},
{
"operation": "remove",
"key": "ether"
},
{
"operation": "insert",
"key": "dog",
"value": "puppy"
},
{
"operation": "remove",
"key": "shaman"
}
],
"output": "0x5991bb8c6514148a29db676a14ac506cd2cd5775ace63c30a4fe457715e9ac84"
}

View File

@ -0,0 +1,16 @@
{
"input":
[
{
"operation": "insert",
"key": "foo",
"value": "bar"
},
{
"operation": "insert",
"key": "food",
"value": "bass"
}
],
"output": "0x17beaa1648bafa633cda809c90c04af50fc8aed3cb40d16efbddee6fdf63c4c3"
}

View File

@ -0,0 +1,58 @@
{
"input": [
{
"operation": "insert",
"key": "0x0000000000000000000000000000000000000000000000000000000000000045",
"value": "0x22b224a1420a802ab51d326e29fa98e34c4f24ea"
},
{
"operation": "insert",
"key": "0x0000000000000000000000000000000000000000000000000000000000000046",
"value": "0x67706c2076330000000000000000000000000000000000000000000000000000"
},
{
"operation": "insert",
"key": "0x0000000000000000000000000000000000000000000000000000001234567890",
"value": "0x697c7b8c961b56f675d570498424ac8de1a918f6"
},
{
"operation": "insert",
"key": "0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6",
"value": "0x1234567890"
},
{
"operation": "insert",
"key": "0x0000000000000000000000007ef9e639e2733cb34e4dfc576d4b23f72db776b2",
"value": "0x4655474156000000000000000000000000000000000000000000000000000000"
},
{
"operation": "insert",
"key": "0x000000000000000000000000ec4f34c97e43fbb2816cfd95e388353c7181dab1",
"value": "0x4e616d6552656700000000000000000000000000000000000000000000000000"
},
{
"operation": "insert",
"key": "0x4655474156000000000000000000000000000000000000000000000000000000",
"value": "0x7ef9e639e2733cb34e4dfc576d4b23f72db776b2"
},
{
"operation": "insert",
"key": "0x4e616d6552656700000000000000000000000000000000000000000000000000",
"value": "0xec4f34c97e43fbb2816cfd95e388353c7181dab1"
},
{
"operation": "remove",
"key": "0x0000000000000000000000000000000000000000000000000000001234567890"
},
{
"operation": "insert",
"key": "0x000000000000000000000000697c7b8c961b56f675d570498424ac8de1a918f6",
"value": "0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000"
},
{
"operation": "insert",
"key": "0x6f6f6f6820736f2067726561742c207265616c6c6c793f000000000000000000",
"value": "0x697c7b8c961b56f675d570498424ac8de1a918f6"
}],
"output": "0x9f6221ebb8efe7cff60a716ecb886e67dd042014be444669f0159d8e68b42100"
}

66
json-tests/src/lib.rs Normal file
View File

@ -0,0 +1,66 @@
extern crate rustc_serialize;
extern crate glob;
use std::str::from_utf8;
use std::path::*;
use std::io::prelude::*;
use std::fs::File;
use glob::glob;
use rustc_serialize::*;
mod util;
pub mod trie;
pub mod rlp;
pub trait JsonTest: Sized {
type Input;
type Output;
fn new(data: &[u8]) -> Self;
fn input(&self) -> Self::Input;
fn output(&self) -> Self::Output;
}
pub struct JsonLoader {
json: json::Json
}
impl JsonTest for JsonLoader {
type Input = json::Json;
type Output = json::Json;
fn new(data: &[u8]) -> Self {
JsonLoader {
json: json::Json::from_str(from_utf8(data).unwrap()).unwrap()
}
}
fn input(&self) -> Self::Input {
self.json.as_object().unwrap()["input"].clone()
}
fn output(&self) -> Self::Output {
self.json.as_object().unwrap()["output"].clone()
}
}
pub fn execute_test<T, F>(data: &[u8], f: &mut F) where T: JsonTest, F: FnMut(T::Input, T::Output) {
let test = T::new(data);
f(test.input(), test.output())
}
pub fn execute_test_from_file<T, F>(path: &Path, f: &mut F) where T: JsonTest, F: FnMut(T::Input, T::Output) {
let mut file = File::open(path).unwrap();
let mut buffer = vec![];
let _ = file.read_to_end(&mut buffer);
let test = T::new(&buffer);
f(test.input(), test.output())
}
pub fn execute_tests_from_directory<T, F>(pattern: &str, f: &mut F) where T: JsonTest, F: FnMut(String, T::Input, T::Output) {
for path in glob(pattern).unwrap().filter_map(Result::ok) {
execute_test_from_file::<T, _>(&path, &mut | input, output | {
f(path.to_str().unwrap().to_string(), input, output);
});
}
}

52
json-tests/src/rlp.rs Normal file
View File

@ -0,0 +1,52 @@
//! json rlp tests
use rustc_serialize::*;
use super::{JsonTest, JsonLoader};
use util::*;
pub enum Operation {
Append(Vec<u8>),
AppendList(usize),
AppendRaw(Vec<u8>, usize),
AppendEmpty
}
impl Into<Operation> for json::Json {
fn into(self) -> Operation {
let obj = self.as_object().unwrap();
match obj["operation"].as_string().unwrap().as_ref() {
"append" => Operation::Append(hex_or_string(obj["value"].as_string().unwrap())),
"append_list" => Operation::AppendList(obj["len"].as_u64().unwrap() as usize),
"append_raw" => Operation::AppendRaw(hex_or_string(obj["value"].as_string().unwrap()), obj["len"].as_u64().unwrap() as usize),
"append_empty" => Operation::AppendEmpty,
other => { panic!("Unsupported opertation: {}", other); }
}
}
}
pub struct RlpStreamTest {
loader: JsonLoader
}
impl JsonTest for RlpStreamTest {
type Input = Vec<Operation>;
type Output = Vec<u8>;
fn new(data: &[u8]) -> Self {
RlpStreamTest {
loader: JsonLoader::new(data)
}
}
fn input(&self) -> Self::Input {
self.loader.input().as_array().unwrap()
.iter()
.cloned()
.map(|i| i.into())
.collect()
}
fn output(&self) -> Self::Output {
hex_or_string(self.loader.output().as_string().unwrap())
}
}

89
json-tests/src/trie.rs Normal file
View File

@ -0,0 +1,89 @@
//! json trie tests
use std::collections::HashMap;
use rustc_serialize::*;
use super::{JsonTest, JsonLoader};
use util::*;
#[derive(RustcDecodable)]
struct RawOperation {
operation: String,
key: String,
value: Option<String>
}
pub enum Operation {
Insert(Vec<u8>, Vec<u8>),
Remove(Vec<u8>)
}
impl Into<Operation> for RawOperation {
fn into(self) -> Operation {
match self.operation.as_ref() {
"insert" => Operation::Insert(hex_or_string(&self.key), hex_or_string(&self.value.unwrap())),
"remove" => Operation::Remove(hex_or_string(&self.key)),
other => panic!("invalid operation type: {}", other)
}
}
}
pub struct TrieTest {
loader: JsonLoader
}
impl JsonTest for TrieTest {
type Input = Vec<Operation>;
type Output = Vec<u8>;
fn new(data: &[u8]) -> Self {
TrieTest {
loader: JsonLoader::new(data)
}
}
fn input(&self) -> Self::Input {
let mut decoder = json::Decoder::new(self.loader.input());
let raw: Vec<RawOperation> = Decodable::decode(&mut decoder).unwrap();
raw.into_iter()
.map(|i| i.into())
.collect()
}
fn output(&self) -> Self::Output {
hex_or_string(self.loader.output().as_string().unwrap())
}
}
pub struct TriehashTest {
trietest: TrieTest
}
impl JsonTest for TriehashTest {
type Input = Vec<(Vec<u8>, Vec<u8>)>;
type Output = Vec<u8>;
fn new(data: &[u8]) -> Self {
TriehashTest {
trietest: TrieTest::new(data)
}
}
fn input(&self) -> Self::Input {
self.trietest.input()
.into_iter()
.fold(HashMap::new(), | mut map, o | {
match o {
Operation::Insert(k, v) => map.insert(k, v),
Operation::Remove(k) => map.remove(&k)
};
map
})
.into_iter()
.map(|p| { p })
.collect()
}
fn output(&self) -> Self::Output {
self.trietest.output()
}
}

8
json-tests/src/util.rs Normal file
View File

@ -0,0 +1,8 @@
use rustc_serialize::hex::FromHex;
pub fn hex_or_string(s: &str) -> Vec<u8> {
match s.starts_with("0x") {
true => s[2..].from_hex().unwrap(),
false => From::from(s)
}
}

1
rustfmt.toml Normal file
View File

@ -0,0 +1 @@
hard_tabs = true

465
src/bytes.rs Normal file
View File

@ -0,0 +1,465 @@
//! Unified interfaces for bytes operations on basic types
//!
//! # Examples
//! ```rust
//! extern crate ethcore_util as util;
//!
//! fn bytes_convertable() {
//! use util::bytes::BytesConvertable;
//!
//! let arr = [0; 5];
//! let slice: &[u8] = arr.bytes();
//! }
//!
//! fn to_bytes() {
//! use util::bytes::ToBytes;
//!
//! let a: Vec<u8> = "hello_world".to_bytes();
//! let b: Vec<u8> = 400u32.to_bytes();
//! let c: Vec<u8> = 0xffffffffffffffffu64.to_bytes();
//! }
//!
//! fn from_bytes() {
//! use util::bytes::FromBytes;
//!
//! let a = String::from_bytes(&[b'd', b'o', b'g']);
//! let b = u16::from_bytes(&[0xfa]);
//! let c = u64::from_bytes(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
//! }
//!
//! fn main() {
//! bytes_convertable();
//! to_bytes();
//! from_bytes();
//! }
//! ```
use std::mem;
use std::fmt;
use std::slice;
use std::cmp::Ordering;
use std::error::Error as StdError;
use std::ops::{Deref, DerefMut};
use uint::{Uint, U128, U256};
use hash::FixedHash;
pub struct PrettySlice<'a> (&'a [u8]);
impl<'a> fmt::Debug for PrettySlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.0.len() {
match i > 0 {
true => { try!(write!(f, "·{:02x}", self.0[i])); },
false => { try!(write!(f, "{:02x}", self.0[i])); },
}
}
Ok(())
}
}
impl<'a> fmt::Display for PrettySlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.0.len() {
try!(write!(f, "{:02x}", self.0[i]));
}
Ok(())
}
}
pub trait ToPretty {
fn pretty(&self) -> PrettySlice;
fn to_hex(&self) -> String {
format!("{}", self.pretty())
}
}
impl<'a> ToPretty for &'a [u8] {
fn pretty(&self) -> PrettySlice {
PrettySlice(self)
}
}
impl<'a> ToPretty for &'a Bytes {
fn pretty(&self) -> PrettySlice {
PrettySlice(self.bytes())
}
}
impl ToPretty for Bytes {
fn pretty(&self) -> PrettySlice {
PrettySlice(self.bytes())
}
}
pub enum BytesRef<'a> {
Flexible(&'a mut Bytes),
Fixed(&'a mut [u8])
}
impl<'a> Deref for BytesRef<'a> {
type Target = [u8];
fn deref(&self) -> &[u8] {
match self {
&BytesRef::Flexible(ref bytes) => bytes,
&BytesRef::Fixed(ref bytes) => bytes
}
}
}
impl <'a> DerefMut for BytesRef<'a> {
fn deref_mut(&mut self) -> &mut [u8] {
match self {
&mut BytesRef::Flexible(ref mut bytes) => bytes,
&mut BytesRef::Fixed(ref mut bytes) => bytes
}
}
}
/// Vector of bytes
pub type Bytes = Vec<u8>;
/// Slice of bytes to underlying memory
pub trait BytesConvertable {
// TODO: rename to as_slice
fn bytes(&self) -> &[u8];
fn as_slice(&self) -> &[u8] { self.bytes() }
fn to_bytes(&self) -> Bytes { self.as_slice().to_vec() }
}
impl<'a> BytesConvertable for &'a [u8] {
fn bytes(&self) -> &[u8] { self }
}
impl BytesConvertable for Vec<u8> {
fn bytes(&self) -> &[u8] { self }
}
macro_rules! impl_bytes_convertable_for_array {
($zero: expr) => ();
($len: expr, $($idx: expr),*) => {
impl BytesConvertable for [u8; $len] {
fn bytes(&self) -> &[u8] { self }
}
impl_bytes_convertable_for_array! { $($idx),* }
}
}
// -1 at the end is not expanded
impl_bytes_convertable_for_array! {
32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1
}
#[test]
fn bytes_convertable() {
assert_eq!(vec![0x12u8, 0x34].bytes(), &[0x12u8, 0x34]);
assert_eq!([0u8; 0].bytes(), &[]);
}
/// Converts given type to its shortest representation in bytes
///
/// TODO: optimise some conversations
pub trait ToBytes {
fn to_bytes(&self) -> Vec<u8>;
fn to_bytes_len(&self) -> usize { self.to_bytes().len() }
fn first_byte(&self) -> Option<u8> { self.to_bytes().first().map(|&x| { x })}
}
impl <'a> ToBytes for &'a str {
fn to_bytes(&self) -> Vec<u8> {
From::from(*self)
}
fn to_bytes_len(&self) -> usize { self.len() }
}
impl ToBytes for String {
fn to_bytes(&self) -> Vec<u8> {
let s: &str = self.as_ref();
From::from(s)
}
fn to_bytes_len(&self) -> usize { self.len() }
}
impl ToBytes for u64 {
fn to_bytes(&self) -> Vec<u8> {
let mut res= vec![];
let count = self.to_bytes_len();
res.reserve(count);
for i in 0..count {
let j = count - 1 - i;
res.push((*self >> (j * 8)) as u8);
}
res
}
fn to_bytes_len(&self) -> usize { 8 - self.leading_zeros() as usize / 8 }
}
impl ToBytes for bool {
fn to_bytes(&self) -> Vec<u8> {
vec![ if *self { 1u8 } else { 0u8 } ]
}
fn to_bytes_len(&self) -> usize { 1 }
}
macro_rules! impl_map_to_bytes {
($from: ident, $to: ty) => {
impl ToBytes for $from {
fn to_bytes(&self) -> Vec<u8> { (*self as $to).to_bytes() }
fn to_bytes_len(&self) -> usize { (*self as $to).to_bytes_len() }
}
}
}
impl_map_to_bytes!(usize, u64);
impl_map_to_bytes!(u16, u64);
impl_map_to_bytes!(u32, u64);
macro_rules! impl_uint_to_bytes {
($name: ident) => {
impl ToBytes for $name {
fn to_bytes(&self) -> Vec<u8> {
let mut res= vec![];
let count = self.to_bytes_len();
res.reserve(count);
for i in 0..count {
let j = count - 1 - i;
res.push(self.byte(j));
}
res
}
fn to_bytes_len(&self) -> usize { (self.bits() + 7) / 8 }
}
}
}
impl_uint_to_bytes!(U256);
impl_uint_to_bytes!(U128);
impl <T>ToBytes for T where T: FixedHash {
fn to_bytes(&self) -> Vec<u8> {
let mut res: Vec<u8> = vec![];
res.reserve(T::size());
unsafe {
use std::ptr;
ptr::copy(self.bytes().as_ptr(), res.as_mut_ptr(), T::size());
res.set_len(T::size());
}
res
}
}
/// Error returned when FromBytes conversation goes wrong
#[derive(Debug, PartialEq, Eq)]
pub enum FromBytesError {
DataIsTooShort,
DataIsTooLong
}
impl StdError for FromBytesError {
fn description(&self) -> &str { "from_bytes error" }
}
impl fmt::Display for FromBytesError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self, f)
}
}
/// Alias for the result of FromBytes trait
pub type FromBytesResult<T> = Result<T, FromBytesError>;
/// Converts to given type from its bytes representation
///
/// TODO: check size of bytes before conversation and return appropriate error
pub trait FromBytes: Sized {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<Self>;
}
impl FromBytes for String {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<String> {
Ok(::std::str::from_utf8(bytes).unwrap().to_string())
}
}
macro_rules! impl_uint_from_bytes {
($to: ident) => {
impl FromBytes for $to {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<$to> {
match bytes.len() {
0 => Ok(0),
l if l <= mem::size_of::<$to>() => {
let mut res = 0 as $to;
for i in 0..l {
let shift = (l - 1 - i) * 8;
res = res + ((bytes[i] as $to) << shift);
}
Ok(res)
}
_ => Err(FromBytesError::DataIsTooLong)
}
}
}
}
}
impl FromBytes for bool {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<bool> {
match bytes.len() {
0 => Ok(false),
1 => Ok(bytes[0] != 0),
_ => Err(FromBytesError::DataIsTooLong),
}
}
}
//impl_uint_from_bytes!(u8);
impl_uint_from_bytes!(u16);
impl_uint_from_bytes!(u32);
impl_uint_from_bytes!(u64);
impl_uint_from_bytes!(usize);
macro_rules! impl_uint_from_bytes {
($name: ident) => {
impl FromBytes for $name {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<$name> {
if bytes.len() <= $name::SIZE {
Ok($name::from(bytes))
} else {
Err(FromBytesError::DataIsTooLong)
}
}
}
}
}
impl_uint_from_bytes!(U256);
impl_uint_from_bytes!(U128);
impl <T>FromBytes for T where T: FixedHash {
fn from_bytes(bytes: &[u8]) -> FromBytesResult<T> {
match bytes.len().cmp(&T::size()) {
Ordering::Less => return Err(FromBytesError::DataIsTooShort),
Ordering::Greater => return Err(FromBytesError::DataIsTooLong),
Ordering::Equal => ()
};
unsafe {
use std::{mem, ptr};
let mut res: T = mem::uninitialized();
ptr::copy(bytes.as_ptr(), res.as_slice_mut().as_mut_ptr(), T::size());
Ok(res)
}
}
}
/// Simple trait to allow for raw population of a Sized object from a byte slice.
pub trait Populatable {
/// Copies a bunch of bytes `d` to `self`, overwriting as necessary.
///
/// If `d` is smaller, zero-out the remaining bytes.
fn populate_raw(&mut self, d: &[u8]) {
let mut s = self.as_slice_mut();
for i in 0..s.len() {
s[i] = if i < d.len() {d[i]} else {0};
}
}
/// Copies a bunch of bytes `d` to `self`, overwriting as necessary.
///
/// If `d` is smaller, will leave some bytes untouched.
fn copy_raw(&mut self, d: &[u8]) {
use std::io::Write;
self.as_slice_mut().write(&d).unwrap();
}
/// Copies the raw representation of an object `d` to `self`, overwriting as necessary.
///
/// If `d` is smaller, zero-out the remaining bytes.
fn populate_raw_from(&mut self, d: &BytesConvertable) { self.populate_raw(d.as_slice()); }
/// Copies the raw representation of an object `d` to `self`, overwriting as necessary.
///
/// If `d` is smaller, will leave some bytes untouched.
fn copy_raw_from(&mut self, d: &BytesConvertable) { self.copy_raw(d.as_slice()); }
/// Get the raw slice for this object.
fn as_slice_mut(&mut self) -> &mut [u8];
}
impl<T> Populatable for T where T: Sized {
fn as_slice_mut(&mut self) -> &mut [u8] {
use std::mem;
unsafe {
slice::from_raw_parts_mut(self as *mut T as *mut u8, mem::size_of::<T>())
}
}
}
impl<T> Populatable for [T] where T: Sized {
fn as_slice_mut(&mut self) -> &mut [u8] {
use std::mem;
unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, mem::size_of::<T>() * self.len())
}
}
}
#[test]
fn fax_raw() {
let mut x = [255u8; 4];
x.copy_raw(&[1u8; 2][..]);
assert_eq!(x, [1u8, 1, 255, 255]);
let mut x = [255u8; 4];
x.copy_raw(&[1u8; 6][..]);
assert_eq!(x, [1u8, 1, 1, 1]);
}
#[test]
fn populate_raw() {
let mut x = [255u8; 4];
x.populate_raw(&[1u8; 2][..]);
assert_eq!(x, [1u8, 1, 0, 0]);
let mut x = [255u8; 4];
x.populate_raw(&[1u8; 6][..]);
assert_eq!(x, [1u8, 1, 1, 1]);
}
#[test]
fn populate_raw_dyn() {
let mut x = [255u8; 4];
x.populate_raw(&[1u8; 2][..]);
assert_eq!(&x[..], [1u8, 1, 0, 0]);
let mut x = [255u8; 4];
x.populate_raw(&[1u8; 6][..]);
assert_eq!(&x[..], [1u8, 1, 1, 1]);
}
#[test]
fn fax_raw_dyn() {
let mut x = [255u8; 4];
x.copy_raw(&[1u8; 2][..]);
assert_eq!(&x[..], [1u8, 1, 255, 255]);
let mut x = [255u8; 4];
x.copy_raw(&[1u8; 6][..]);
assert_eq!(&x[..], [1u8, 1, 1, 1]);
}
#[test]
fn populate_big_types() {
use hash::*;
let a = address_from_hex("ffffffffffffffffffffffffffffffffffffffff");
let mut h = h256_from_u64(0x69);
h.populate_raw_from(&a);
assert_eq!(h, h256_from_hex("ffffffffffffffffffffffffffffffffffffffff000000000000000000000000"));
let mut h = h256_from_u64(0x69);
h.copy_raw_from(&a);
assert_eq!(h, h256_from_hex("ffffffffffffffffffffffffffffffffffffffff000000000000000000000069"));
}

452
src/chainfilter.rs Normal file
View File

@ -0,0 +1,452 @@
//! Multilevel blockchain bloom filter.
//!
//! ```
//! extern crate ethcore_util as util;
//! use std::str::FromStr;
//! use util::chainfilter::*;
//! use util::sha3::*;
//! use util::hash::*;
//!
//! fn main() {
//! let (index_size, bloom_levels) = (16, 3);
//! let mut cache = MemoryCache::new();
//!
//! let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
//!
//! // borrow cache for reading inside the scope
//! let modified_blooms = {
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
//! let block_number = 39;
//! let mut bloom = H2048::new();
//! bloom.shift_bloomed(&address.sha3());
//! filter.add_bloom(&bloom, block_number)
//! };
//!
//! // number of updated blooms is equal number of levels
//! assert_eq!(modified_blooms.len(), bloom_levels as usize);
//!
//! // lets inserts modified blooms into the cache
//! cache.insert_blooms(modified_blooms);
//!
//! // borrow cache for another reading operations
//! {
//! let filter = ChainFilter::new(&cache, index_size, bloom_levels);
//! let blocks = filter.blocks_with_address(&address, 10, 40);
//! assert_eq!(blocks.len(), 1);
//! assert_eq!(blocks[0], 39);
//! }
//! }
//! ```
//!
use std::collections::{HashMap};
use hash::*;
use sha3::*;
/// Represents bloom index in cache
///
/// On cache level 0, every block bloom is represented by different index.
/// On higher cache levels, multiple block blooms are represented by one
/// index. Their `BloomIndex` can be created from block number and given level.
#[derive(Eq, PartialEq, Hash, Clone, Debug)]
pub struct BloomIndex {
pub level: u8,
pub index: usize,
}
impl BloomIndex {
/// Default constructor for `BloomIndex`
pub fn new(level: u8, index: usize) -> BloomIndex {
BloomIndex {
level: level,
index: index,
}
}
}
/// Types implementing this trait should provide read access for bloom filters database.
pub trait FilterDataSource {
/// returns reference to log at given position if it exists
fn bloom_at_index(&self, index: &BloomIndex) -> Option<&H2048>;
}
/// In memory cache for blooms.
///
/// Stores all blooms in HashMap, which indexes them by `BloomIndex`.
pub struct MemoryCache {
blooms: HashMap<BloomIndex, H2048>,
}
impl MemoryCache {
/// Default constructor for MemoryCache
pub fn new() -> MemoryCache {
MemoryCache { blooms: HashMap::new() }
}
/// inserts all blooms into cache
///
/// if bloom at given index already exists, overwrites it
pub fn insert_blooms(&mut self, blooms: HashMap<BloomIndex, H2048>) {
self.blooms.extend(blooms);
}
}
impl FilterDataSource for MemoryCache {
fn bloom_at_index(&self, index: &BloomIndex) -> Option<&H2048> {
self.blooms.get(index)
}
}
/// Should be used for search operations on blockchain.
pub struct ChainFilter<'a, D>
where D: FilterDataSource + 'a
{
data_source: &'a D,
index_size: usize,
level_sizes: Vec<usize>,
}
impl<'a, D> ChainFilter<'a, D> where D: FilterDataSource
{
/// Creates new filter instance.
///
/// Borrows `FilterDataSource` for reading.
pub fn new(data_source: &'a D, index_size: usize, levels: u8) -> Self {
if levels == 0 {
panic!("ChainFilter requires at least 1 level");
}
let mut filter = ChainFilter {
data_source: data_source,
index_size: index_size,
// 0 level has always a size of 1
level_sizes: vec![1]
};
// cache level sizes, so we do not have to calculate them all the time
// eg. if levels == 3, index_size = 16
// level_sizes = [1, 16, 256]
let additional: Vec<usize> = (1..).into_iter()
.scan(1, |acc, _| {
*acc = *acc * index_size;
Some(*acc)
})
.take(levels as usize - 1)
.collect();
filter.level_sizes.extend(additional);
filter
}
/// unsafely get level size
fn level_size(&self, level: u8) -> usize {
self.level_sizes[level as usize]
}
/// converts block number and level to `BloomIndex`
fn bloom_index(&self, block_number: usize, level: u8) -> BloomIndex {
BloomIndex {
level: level,
index: block_number / self.level_size(level),
}
}
/// return bloom which are dependencies for given index
///
/// bloom indexes are ordered from lowest to highest
fn lower_level_bloom_indexes(&self, index: &BloomIndex) -> Vec<BloomIndex> {
// this is the lowest level
if index.level == 0 {
return vec![];
}
let new_level = index.level - 1;
let offset = self.index_size * index.index;
(0..self.index_size).map(|i| BloomIndex::new(new_level, offset + i)).collect()
}
/// return number of levels
fn levels(&self) -> u8 {
self.level_sizes.len() as u8
}
/// returns max filter level
fn max_level(&self) -> u8 {
self.level_sizes.len() as u8 - 1
}
/// internal function which does bloom search recursively
fn blocks(&self, bloom: &H2048, from_block: usize, to_block: usize, level: u8, offset: usize) -> Option<Vec<usize>> {
let index = self.bloom_index(offset, level);
match self.data_source.bloom_at_index(&index) {
None => return None,
Some(level_bloom) => match level {
// if we are on the lowest level
// take the value, exclude to_block
0 if offset < to_block => return Some(vec![offset]),
// return None if it is is equal to to_block
0 => return None,
// return None if current level doesnt contain given bloom
_ if !level_bloom.contains(bloom) => return None,
// continue processing && go down
_ => ()
}
};
let level_size = self.level_size(level - 1);
let from_index = self.bloom_index(from_block, level - 1);
let to_index = self.bloom_index(to_block, level - 1);
let res: Vec<usize> = self.lower_level_bloom_indexes(&index).into_iter()
// chose only blooms in range
.filter(|li| li.index >= from_index.index && li.index <= to_index.index)
// map them to offsets
.map(|li| li.index * level_size)
// get all blocks that may contain our bloom
.map(|off| self.blocks(bloom, from_block, to_block, level - 1, off))
// filter existing ones
.filter_map(|x| x)
// flatten nested structures
.flat_map(|v| v)
.collect();
Some(res)
}
/// Adds new bloom to all filter levels
pub fn add_bloom(&self, bloom: &H2048, block_number: usize) -> HashMap<BloomIndex, H2048> {
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
for level in 0..self.levels() {
let bloom_index = self.bloom_index(block_number, level);
let new_bloom = match self.data_source.bloom_at_index(&bloom_index) {
Some(old_bloom) => old_bloom | bloom,
None => bloom.clone(),
};
result.insert(bloom_index, new_bloom);
}
result
}
/// Adds new blooms starting from block number.
pub fn add_blooms(&self, blooms: &[H2048], block_number: usize) -> HashMap<BloomIndex, H2048> {
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
for level in 0..self.levels() {
for i in 0..blooms.len() {
let bloom_index = self.bloom_index(block_number + i, level);
let is_new_bloom = match result.get_mut(&bloom_index) {
// it was already modified
Some(to_shift) => {
*to_shift = &blooms[i] | to_shift;
false
}
None => true,
};
// it hasn't been modified yet
if is_new_bloom {
let new_bloom = match self.data_source.bloom_at_index(&bloom_index) {
Some(old_bloom) => old_bloom | &blooms[i],
None => blooms[i].clone(),
};
result.insert(bloom_index, new_bloom);
}
}
}
result
}
/// Resets bloom at level 0 and forces rebuild on higher levels.
pub fn reset_bloom(&self, bloom: &H2048, block_number: usize) -> HashMap<BloomIndex, H2048> {
let mut result: HashMap<BloomIndex, H2048> = HashMap::new();
let mut reset_index = self.bloom_index(block_number, 0);
result.insert(reset_index.clone(), bloom.clone());
for level in 1..self.levels() {
let index = self.bloom_index(block_number, level);
// get all bloom indexes that were used to construct this bloom
let lower_indexes = self.lower_level_bloom_indexes(&index);
let new_bloom = lower_indexes.into_iter()
// skip reseted one
.filter(|li| li != &reset_index)
// get blooms for these indexes
.map(|li| self.data_source.bloom_at_index(&li))
// filter existing ones
.filter_map(|b| b)
// BitOr all of them
.fold(H2048::new(), |acc, bloom| &acc | bloom);
reset_index = index.clone();
result.insert(index, &new_bloom | bloom);
}
result
}
/// Sets lowest level bloom to 0 and forces rebuild on higher levels.
pub fn clear_bloom(&self, block_number: usize) -> HashMap<BloomIndex, H2048> {
self.reset_bloom(&H2048::new(), block_number)
}
/// Returns numbers of blocks that may contain Address.
pub fn blocks_with_address(&self, address: &Address, from_block: usize, to_block: usize) -> Vec<usize> {
let mut bloom = H2048::new();
bloom.shift_bloomed(&address.sha3());
self.blocks_with_bloom(&bloom, from_block, to_block)
}
/// Returns numbers of blocks that may contain Topic.
pub fn blocks_with_topic(&self, topic: &H256, from_block: usize, to_block: usize) -> Vec<usize> {
let mut bloom = H2048::new();
bloom.shift_bloomed(&topic.sha3());
self.blocks_with_bloom(&bloom, from_block, to_block)
}
/// Returns numbers of blocks that may log bloom.
pub fn blocks_with_bloom(&self, bloom: &H2048, from_block: usize, to_block: usize) -> Vec<usize> {
let mut result = vec![];
// lets start from highest level
let max_level = self.max_level();
let level_size = self.level_size(max_level);
let from_index = self.bloom_index(from_block, max_level);
let to_index = self.bloom_index(to_block, max_level);
for index in from_index.index..to_index.index + 1 {
// offset will be used to calculate where we are right now
let offset = level_size * index;
// go doooown!
match self.blocks(bloom, from_block, to_block, max_level, offset) {
Some(blocks) => result.extend(blocks),
None => ()
};
}
result
}
}
#[cfg(test)]
mod tests {
use hash::*;
use chainfilter::*;
use sha3::*;
use std::str::FromStr;
#[test]
fn test_level_size() {
let cache = MemoryCache::new();
let filter = ChainFilter::new(&cache, 16, 3);
assert_eq!(filter.level_size(0), 1);
assert_eq!(filter.level_size(1), 16);
assert_eq!(filter.level_size(2), 256);
}
#[test]
fn test_bloom_index() {
let cache = MemoryCache::new();
let filter = ChainFilter::new(&cache, 16, 3);
let bi0 = filter.bloom_index(0, 0);
assert_eq!(bi0.level, 0);
assert_eq!(bi0.index, 0);
let bi1 = filter.bloom_index(1, 0);
assert_eq!(bi1.level, 0);
assert_eq!(bi1.index, 1);
let bi2 = filter.bloom_index(2, 0);
assert_eq!(bi2.level, 0);
assert_eq!(bi2.index, 2);
let bi3 = filter.bloom_index(3, 1);
assert_eq!(bi3.level, 1);
assert_eq!(bi3.index, 0);
let bi4 = filter.bloom_index(15, 1);
assert_eq!(bi4.level, 1);
assert_eq!(bi4.index, 0);
let bi5 = filter.bloom_index(16, 1);
assert_eq!(bi5.level, 1);
assert_eq!(bi5.index, 1);
let bi6 = filter.bloom_index(255, 2);
assert_eq!(bi6.level, 2);
assert_eq!(bi6.index, 0);
let bi7 = filter.bloom_index(256, 2);
assert_eq!(bi7.level, 2);
assert_eq!(bi7.index, 1);
}
#[test]
fn test_lower_level_bloom_indexes() {
let cache = MemoryCache::new();
let filter = ChainFilter::new(&cache, 16, 3);
let bi = filter.bloom_index(256, 2);
assert_eq!(bi.level, 2);
assert_eq!(bi.index, 1);
let mut ebis = vec![];
for i in 16..32 {
ebis.push(BloomIndex::new(1, i));
}
let bis = filter.lower_level_bloom_indexes(&bi);
assert_eq!(ebis, bis);
}
#[test]
fn test_topic_basic_search() {
let index_size = 16;
let bloom_levels = 3;
let mut cache = MemoryCache::new();
let topic = H256::from_str("8d936b1bd3fc635710969ccfba471fb17d598d9d1971b538dd712e1e4b4f4dba").unwrap();
let modified_blooms = {
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let block_number = 23;
let mut bloom = H2048::new();
bloom.shift_bloomed(&topic.sha3());
filter.add_bloom(&bloom, block_number)
};
// number of modified blooms should always be equal number of levels
assert_eq!(modified_blooms.len(), bloom_levels as usize);
cache.insert_blooms(modified_blooms);
{
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_topic(&topic, 0, 100);
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0], 23);
}
{
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_topic(&topic, 0, 23);
assert_eq!(blocks.len(), 0);
}
{
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_topic(&topic, 23, 24);
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0], 23);
}
{
let filter = ChainFilter::new(&cache, index_size, bloom_levels);
let blocks = filter.blocks_with_topic(&topic, 24, 100);
assert_eq!(blocks.len(), 0);
}
}
}

57
src/common.rs Normal file
View File

@ -0,0 +1,57 @@
pub use standard::*;
pub use from_json::*;
pub use error::*;
pub use hash::*;
pub use uint::*;
pub use bytes::*;
pub use vector::*;
pub use sha3::*;
#[macro_export]
macro_rules! map {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ($x, $y) ),* ].into_iter().collect::<BTreeMap<_, _>>()
}
}
#[macro_export]
macro_rules! mapx {
( $( $x:expr => $y:expr ),* ) => {
vec![ $( ( From::from($x), From::from($y) ) ),* ].into_iter().collect::<BTreeMap<_, _>>()
}
}
#[macro_export]
macro_rules! x {
( $x:expr ) => {
From::from($x)
}
}
#[macro_export]
macro_rules! xx {
( $x:expr ) => {
From::from(From::from($x))
}
}
#[macro_export]
macro_rules! flush {
($($arg:tt)*) => ($crate::flush(format!("{}", format_args!($($arg)*))));
}
#[macro_export]
macro_rules! flushln {
($fmt:expr) => (flush!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (flush!(concat!($fmt, "\n"), $($arg)*));
}
pub fn flush(s: String) {
::std::io::stdout().write(s.as_bytes()).unwrap();
::std::io::stdout().flush().unwrap();
}
#[test]
fn test_flush() {
flushln!("hello_world {:?}", 1);
}

361
src/crypto.rs Normal file
View File

@ -0,0 +1,361 @@
use hash::*;
use bytes::*;
use uint::*;
use secp256k1::{key, Secp256k1};
use rand::os::OsRng;
pub type Secret = H256;
pub type Public = H512;
pub type Signature = H520;
impl Signature {
/// Create a new signature from the R, S and V componenets.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature {
use std::ptr;
let mut ret: Signature = Signature::new();
unsafe {
let retslice: &mut [u8] = &mut ret;
ptr::copy(r.as_ptr(), retslice.as_mut_ptr(), 32);
ptr::copy(s.as_ptr(), retslice.as_mut_ptr().offset(32), 32);
}
ret[64] = v;
ret
}
/// Convert transaction to R, S and V components.
pub fn to_rsv(&self) -> (U256, U256, u8) {
(U256::from(&self.as_slice()[0..32]), U256::from(&self.as_slice()[32..64]), self[64])
}
}
#[derive(Debug)]
pub enum CryptoError {
InvalidSecret,
InvalidPublic,
InvalidSignature,
InvalidMessage,
Io(::std::io::Error),
}
impl From<::secp256k1::Error> for CryptoError {
fn from(e: ::secp256k1::Error) -> CryptoError {
match e {
::secp256k1::Error::InvalidMessage => CryptoError::InvalidMessage,
::secp256k1::Error::InvalidPublicKey => CryptoError::InvalidPublic,
::secp256k1::Error::InvalidSignature => CryptoError::InvalidSignature,
::secp256k1::Error::InvalidSecretKey => CryptoError::InvalidSecret,
_ => CryptoError::InvalidSignature,
}
}
}
impl From<::std::io::Error> for CryptoError {
fn from(err: ::std::io::Error) -> CryptoError {
CryptoError::Io(err)
}
}
#[derive(Debug, PartialEq, Eq)]
/// secp256k1 Key pair
///
/// Use `create()` to create a new random key pair.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::crypto::*;
/// use ethcore_util::hash::*;
/// fn main() {
/// let pair = KeyPair::create().unwrap();
/// let message = H256::random();
/// let signature = ec::sign(pair.secret(), &message).unwrap();
///
/// assert!(ec::verify(pair.public(), &signature, &message).unwrap());
/// assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
/// }
/// ```
pub struct KeyPair {
secret: Secret,
public: Public,
}
impl KeyPair {
/// Create a pair from secret key
pub fn from_secret(secret: Secret) -> Result<KeyPair, CryptoError> {
let context = Secp256k1::new();
let s: key::SecretKey = try!(key::SecretKey::from_slice(&context, &secret));
let pub_key = try!(key::PublicKey::from_secret_key(&context, &s));
let serialized = pub_key.serialize_vec(&context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
Ok(KeyPair {
secret: secret,
public: p,
})
}
/// Create a new random key pair
pub fn create() -> Result<KeyPair, CryptoError> {
let context = Secp256k1::new();
let mut rng = try!(OsRng::new());
let (sec, publ) = try!(context.generate_keypair(&mut rng));
let serialized = publ.serialize_vec(&context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
let s: Secret = unsafe { ::std::mem::transmute(sec) };
Ok(KeyPair {
secret: s,
public: p,
})
}
/// Returns public key
pub fn public(&self) -> &Public {
&self.public
}
/// Returns private key
pub fn secret(&self) -> &Secret {
&self.secret
}
/// Sign a message with our secret key.
pub fn sign(&self, message: &H256) -> Result<Signature, CryptoError> { ec::sign(&self.secret, message) }
}
pub mod ec {
use hash::*;
use uint::*;
use standard::*;
use crypto::*;
use crypto::{self};
/// Recovers Public key from signed message hash.
pub fn recover(signature: &Signature, message: &H256) -> Result<Public, CryptoError> {
use secp256k1::*;
let context = Secp256k1::new();
let rsig = try!(RecoverableSignature::from_compact(&context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let publ = try!(context.recover(&try!(Message::from_slice(&message)), &rsig));
let serialized = publ.serialize_vec(&context, false);
let p: Public = Public::from_slice(&serialized[1..65]);
//TODO: check if it's the zero key and fail if so.
Ok(p)
}
/// Returns siganture of message hash.
pub fn sign(secret: &Secret, message: &H256) -> Result<Signature, CryptoError> {
// TODO: allow creation of only low-s signatures.
use secp256k1::*;
let context = Secp256k1::new();
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let s = try!(context.sign_recoverable(&try!(Message::from_slice(&message)), sec));
let (rec_id, data) = s.serialize_compact(&context);
let mut signature: crypto::Signature = unsafe { ::std::mem::uninitialized() };
signature.clone_from_slice(&data);
signature[64] = rec_id.to_i32() as u8;
Ok(signature)
}
/// Verify signature.
pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result<bool, CryptoError> {
use secp256k1::*;
let context = Secp256k1::new();
let rsig = try!(RecoverableSignature::from_compact(&context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32))));
let sig = rsig.to_standard(&context);
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(&context, &pdata));
match context.verify(&try!(Message::from_slice(&message)), &sig, &publ) {
Ok(_) => Ok(true),
Err(Error::IncorrectSignature) => Ok(false),
Err(x) => Err(<CryptoError as From<Error>>::from(x))
}
}
/// Check if this is a "low" signature.
pub fn is_low(sig: &Signature) -> bool {
H256::from_slice(&sig[32..64]) <= h256_from_hex("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0")
}
/// Check if this is a "low" signature.
pub fn is_low_s(s: &U256) -> bool {
s <= &U256::from_str("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0").unwrap()
}
/// Check if each component of the signature is in range.
pub fn is_valid(sig: &Signature) -> bool {
sig[64] <= 1 &&
H256::from_slice(&sig[0..32]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") &&
H256::from_slice(&sig[32..64]) < h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141") &&
H256::from_slice(&sig[32..64]) >= h256_from_u64(1) &&
H256::from_slice(&sig[0..32]) >= h256_from_u64(1)
}
}
pub mod ecdh {
use crypto::*;
pub fn agree(secret: &Secret, public: &Public, ) -> Result<Secret, CryptoError> {
use secp256k1::*;
let context = Secp256k1::new();
let mut pdata: [u8; 65] = [4u8; 65];
let ptr = pdata[1..].as_mut_ptr();
let src = public.as_ptr();
unsafe { ::std::ptr::copy_nonoverlapping(src, ptr, 64) };
let publ = try!(key::PublicKey::from_slice(&context, &pdata));
let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) };
let shared = ecdh::SharedSecret::new_raw(&context, &publ, &sec);
let s: Secret = unsafe { ::std::mem::transmute(shared) };
Ok(s)
}
}
pub mod ecies {
use hash::*;
use bytes::*;
use crypto::*;
pub fn encrypt(public: &Public, plain: &[u8]) -> Result<Bytes, CryptoError> {
use ::rcrypto::digest::Digest;
use ::rcrypto::sha2::Sha256;
use ::rcrypto::hmac::Hmac;
use ::rcrypto::mac::Mac;
let r = try!(KeyPair::create());
let z = try!(ecdh::agree(r.secret(), public));
let mut key = [0u8; 32];
let mut mkey = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let mut hasher = Sha256::new();
let mkey_material = &key[16..32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let ekey = &key[0..16];
let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)];
msg[0] = 0x04u8;
{
let msgd = &mut msg[1..];
r.public().copy_to(&mut msgd[0..64]);
{
let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())];
aes::encrypt(ekey, &H128::new(), plain, cipher);
}
let mut hmac = Hmac::new(Sha256::new(), &mkey);
{
let cipher_iv = &msgd[64..(64 + 16 + plain.len())];
hmac.input(cipher_iv);
}
hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]);
}
Ok(msg)
}
pub fn decrypt(secret: &Secret, encrypted: &[u8]) -> Result<Bytes, CryptoError> {
use ::rcrypto::digest::Digest;
use ::rcrypto::sha2::Sha256;
use ::rcrypto::hmac::Hmac;
use ::rcrypto::mac::Mac;
let meta_len = 1 + 64 + 16 + 32;
if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 {
return Err(CryptoError::InvalidMessage); //invalid message: publickey
}
let e = &encrypted[1..];
let p = Public::from_slice(&e[0..64]);
let z = try!(ecdh::agree(secret, &p));
let mut key = [0u8; 32];
kdf(&z, &[0u8; 0], &mut key);
let ekey = &key[0..16];
let mkey_material = &key[16..32];
let mut hasher = Sha256::new();
let mut mkey = [0u8; 32];
hasher.input(mkey_material);
hasher.result(&mut mkey);
let clen = encrypted.len() - meta_len;
let cipher_with_iv = &e[64..(64+16+clen)];
let cipher_iv = &cipher_with_iv[0..16];
let cipher_no_iv = &cipher_with_iv[16..];
let msg_mac = &e[(64+16+clen)..];
// Verify tag
let mut hmac = Hmac::new(Sha256::new(), &mkey);
hmac.input(cipher_with_iv);
let mut mac = H256::new();
hmac.raw_result(&mut mac);
if &mac[..] != msg_mac {
return Err(CryptoError::InvalidMessage);
}
let mut msg = vec![0u8; clen];
aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]);
Ok(msg)
}
fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) {
use ::rcrypto::digest::Digest;
use ::rcrypto::sha2::Sha256;
let mut hasher = Sha256::new();
// SEC/ISO/Shoup specify counter size SHOULD be equivalent
// to size of hash output, however, it also notes that
// the 4 bytes is okay. NIST specifies 4 bytes.
let mut ctr = 1u32;
let mut written = 0usize;
while written < dest.len() {
let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8];
hasher.input(&ctrs);
hasher.input(secret);
hasher.input(s1);
hasher.result(&mut dest[written..(written + 32)]);
hasher.reset();
written += 32;
ctr += 1;
}
}
}
pub mod aes {
use ::rcrypto::blockmodes::*;
use ::rcrypto::aessafe::*;
use ::rcrypto::symmetriccipher::*;
use ::rcrypto::buffer::*;
pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) {
let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec());
encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding");
}
}
#[cfg(test)]
mod tests {
use hash::*;
use crypto::*;
// TODO: tests for sign/recover roundtrip, at least.
#[test]
fn test_signature() {
let pair = KeyPair::create().unwrap();
let message = H256::random();
let signature = ec::sign(pair.secret(), &message).unwrap();
assert!(ec::verify(pair.public(), &signature, &message).unwrap());
assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public());
}
#[test]
fn test_invalid_key() {
assert!(KeyPair::from_secret(h256_from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("0000000000000000000000000000000000000000000000000000000000000000")).is_err());
assert!(KeyPair::from_secret(h256_from_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141")).is_err());
}
#[test]
fn test_key() {
let pair = KeyPair::from_secret(h256_from_hex("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2")).unwrap();
assert_eq!(pair.public().hex(), "101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c");
}
}

88
src/error.rs Normal file
View File

@ -0,0 +1,88 @@
//! General error types for use in ethcore.
use rustc_serialize::hex::FromHexError;
use network::NetworkError;
use rlp::DecoderError;
use io;
#[derive(Debug)]
pub enum BaseDataError {
NegativelyReferencedHash,
}
#[derive(Debug)]
/// General error type which should be capable of representing all errors in ethcore.
pub enum UtilError {
Crypto(::crypto::CryptoError),
StdIo(::std::io::Error),
Io(io::IoError),
AddressParse(::std::net::AddrParseError),
AddressResolve(Option<::std::io::Error>),
FromHex(FromHexError),
BaseData(BaseDataError),
Network(NetworkError),
Decoder(DecoderError),
BadSize,
}
impl From<FromHexError> for UtilError {
fn from(err: FromHexError) -> UtilError {
UtilError::FromHex(err)
}
}
impl From<BaseDataError> for UtilError {
fn from(err: BaseDataError) -> UtilError {
UtilError::BaseData(err)
}
}
impl From<NetworkError> for UtilError {
fn from(err: NetworkError) -> UtilError {
UtilError::Network(err)
}
}
impl From<::std::io::Error> for UtilError {
fn from(err: ::std::io::Error) -> UtilError {
UtilError::StdIo(err)
}
}
impl From<io::IoError> for UtilError {
fn from(err: io::IoError) -> UtilError {
UtilError::Io(err)
}
}
impl From<::crypto::CryptoError> for UtilError {
fn from(err: ::crypto::CryptoError) -> UtilError {
UtilError::Crypto(err)
}
}
impl From<::std::net::AddrParseError> for UtilError {
fn from(err: ::std::net::AddrParseError) -> UtilError {
UtilError::AddressParse(err)
}
}
impl From<::rlp::DecoderError> for UtilError {
fn from(err: ::rlp::DecoderError) -> UtilError {
UtilError::Decoder(err)
}
}
// TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted.
/*#![feature(concat_idents)]
macro_rules! assimilate {
($name:ident) => (
impl From<concat_idents!($name, Error)> for Error {
fn from(err: concat_idents!($name, Error)) -> Error {
Error:: $name (err)
}
}
)
}
assimilate!(FromHex);
assimilate!(BaseData);*/

12
src/from_json.rs Normal file
View File

@ -0,0 +1,12 @@
use standard::*;
#[macro_export]
macro_rules! xjson {
( $x:expr ) => {
FromJson::from_json($x)
}
}
pub trait FromJson {
fn from_json(json: &Json) -> Self;
}

614
src/hash.rs Normal file
View File

@ -0,0 +1,614 @@
//! General hash types, a fixed-size raw-data type used as the output of hash functions.
use standard::*;
use math::log2;
use error::UtilError;
use rand::Rng;
use rand::os::OsRng;
use bytes::{BytesConvertable,Populatable};
use from_json::*;
use uint::{Uint, U256};
/// Trait for a fixed-size byte array to be used as the output of hash functions.
///
/// Note: types implementing `FixedHash` must be also `BytesConvertable`.
pub trait FixedHash: Sized + BytesConvertable + Populatable + FromStr + Default {
fn new() -> Self;
/// Synonym for `new()`. Prefer to new as it's more readable.
fn zero() -> Self;
fn random() -> Self;
fn randomize(&mut self);
fn size() -> usize;
fn from_slice(src: &[u8]) -> Self;
fn clone_from_slice(&mut self, src: &[u8]) -> usize;
fn copy_to(&self, dest: &mut [u8]);
fn shift_bloomed<'a, T>(&'a mut self, b: &T) -> &'a mut Self where T: FixedHash;
fn with_bloomed<T>(mut self, b: &T) -> Self where T: FixedHash { self.shift_bloomed(b); self }
fn bloom_part<T>(&self, m: usize) -> T where T: FixedHash;
fn contains_bloomed<T>(&self, b: &T) -> bool where T: FixedHash;
fn contains<'a>(&'a self, b: &'a Self) -> bool;
fn is_zero(&self) -> bool;
}
fn clean_0x(s: &str) -> &str {
if s.len() >= 2 && &s[0..2] == "0x" {
&s[2..]
} else {
s
}
}
macro_rules! impl_hash {
($from: ident, $size: expr) => {
#[derive(Eq)]
pub struct $from (pub [u8; $size]);
impl BytesConvertable for $from {
fn bytes(&self) -> &[u8] {
&self.0
}
}
impl Deref for $from {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
&self.0
}
}
impl DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl FixedHash for $from {
fn new() -> $from {
$from([0; $size])
}
fn zero() -> $from {
$from([0; $size])
}
fn random() -> $from {
let mut hash = $from::new();
hash.randomize();
hash
}
fn randomize(&mut self) {
let mut rng = OsRng::new().unwrap();
rng.fill_bytes(&mut self.0);
}
fn size() -> usize {
$size
}
// TODO: remove once slice::clone_from_slice is stable
#[inline]
fn clone_from_slice(&mut self, src: &[u8]) -> usize {
let min = ::std::cmp::min($size, src.len());
let dst = &mut self.deref_mut()[.. min];
let src = &src[.. min];
for i in 0..min {
dst[i] = src[i];
}
min
}
fn from_slice(src: &[u8]) -> Self {
let mut r = Self::new();
r.clone_from_slice(src);
r
}
fn copy_to(&self, dest: &mut[u8]) {
unsafe {
let min = ::std::cmp::min($size, dest.len());
::std::ptr::copy(self.0.as_ptr(), dest.as_mut_ptr(), min);
}
}
fn shift_bloomed<'a, T>(&'a mut self, b: &T) -> &'a mut Self where T: FixedHash {
let bp: Self = b.bloom_part($size);
let new_self = &bp | self;
// impl |= instead
// TODO: that's done now!
unsafe {
use std::{mem, ptr};
ptr::copy(new_self.0.as_ptr(), self.0.as_mut_ptr(), mem::size_of::<Self>());
}
self
}
fn bloom_part<T>(&self, m: usize) -> T where T: FixedHash {
// numbers of bits
// TODO: move it to some constant
let p = 3;
let bloom_bits = m * 8;
let mask = bloom_bits - 1;
let bloom_bytes = (log2(bloom_bits) + 7) / 8;
//println!("bb: {}", bloom_bytes);
// must be a power of 2
assert_eq!(m & (m - 1), 0);
// out of range
assert!(p * bloom_bytes <= $size);
// return type
let mut ret = T::new();
// 'ptr' to out slice
let mut ptr = 0;
// set p number of bits,
// p is equal 3 according to yellowpaper
for _ in 0..p {
let mut index = 0 as usize;
for _ in 0..bloom_bytes {
index = (index << 8) | self.0[ptr] as usize;
ptr += 1;
}
index &= mask;
ret.as_slice_mut()[m - 1 - index / 8] |= 1 << (index % 8);
}
ret
}
fn contains_bloomed<T>(&self, b: &T) -> bool where T: FixedHash {
let bp: Self = b.bloom_part($size);
self.contains(&bp)
}
fn contains<'a>(&'a self, b: &'a Self) -> bool {
&(b & self) == b
}
fn is_zero(&self) -> bool {
self.eq(&Self::new())
}
}
impl FromStr for $from {
type Err = UtilError;
fn from_str(s: &str) -> Result<$from, UtilError> {
let a = try!(s.from_hex());
if a.len() != $size { return Err(UtilError::BadSize); }
let mut ret = $from([0;$size]);
for i in 0..$size {
ret.0[i] = a[i];
}
Ok(ret)
}
}
impl FromJson for $from {
fn from_json(json: &Json) -> Self {
match json {
&Json::String(ref s) => {
match s.len() % 2 {
0 => FromStr::from_str(clean_0x(s)).unwrap(),
_ => FromStr::from_str(&("0".to_string() + &(clean_0x(s).to_string()))[..]).unwrap()
}
},
_ => Default::default(),
}
}
}
impl fmt::Debug for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in self.0.iter() {
try!(write!(f, "{:02x}", i));
}
Ok(())
}
}
impl fmt::Display for $from {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in self.0[0..3].iter() {
try!(write!(f, "{:02x}", i));
}
write!(f, "…{:02x}", self.0.last().unwrap())
}
}
impl Clone for $from {
fn clone(&self) -> $from {
unsafe {
use std::{mem, ptr};
let mut ret: $from = mem::uninitialized();
ptr::copy(self.0.as_ptr(), ret.0.as_mut_ptr(), mem::size_of::<$from>());
ret
}
}
}
impl PartialEq for $from {
fn eq(&self, other: &Self) -> bool {
for i in 0..$size {
if self.0[i] != other.0[i] {
return false;
}
}
true
}
}
impl Ord for $from {
fn cmp(&self, other: &Self) -> Ordering {
for i in 0..$size {
if self.0[i] > other.0[i] {
return Ordering::Greater;
} else if self.0[i] < other.0[i] {
return Ordering::Less;
}
}
Ordering::Equal
}
}
impl PartialOrd for $from {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for $from {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write(&self.0);
state.finish();
}
}
impl Index<usize> for $from {
type Output = u8;
fn index<'a>(&'a self, index: usize) -> &'a u8 {
&self.0[index]
}
}
impl IndexMut<usize> for $from {
fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut u8 {
&mut self.0[index]
}
}
impl Index<ops::Range<usize>> for $from {
type Output = [u8];
fn index<'a>(&'a self, index: ops::Range<usize>) -> &'a [u8] {
&self.0[index]
}
}
impl IndexMut<ops::Range<usize>> for $from {
fn index_mut<'a>(&'a mut self, index: ops::Range<usize>) -> &'a mut [u8] {
&mut self.0[index]
}
}
impl Index<ops::RangeFull> for $from {
type Output = [u8];
fn index<'a>(&'a self, _index: ops::RangeFull) -> &'a [u8] {
&self.0
}
}
impl IndexMut<ops::RangeFull> for $from {
fn index_mut<'a>(&'a mut self, _index: ops::RangeFull) -> &'a mut [u8] {
&mut self.0
}
}
/// BitOr on references
impl<'a> BitOr for &'a $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
unsafe {
use std::mem;
let mut ret: $from = mem::uninitialized();
for i in 0..$size {
ret.0[i] = self.0[i] | rhs.0[i];
}
ret
}
}
}
/// Moving BitOr
impl BitOr for $from {
type Output = $from;
fn bitor(self, rhs: Self) -> Self::Output {
&self | &rhs
}
}
/// Moving BitOrAssign
impl<'a> BitOrAssign<&'a $from> for $from {
fn bitor_assign(&mut self, rhs: &'a Self) {
for i in 0..$size {
self.0[i] = self.0[i] | rhs.0[i];
}
}
}
/// BitAnd on references
impl <'a> BitAnd for &'a $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
unsafe {
use std::mem;
let mut ret: $from = mem::uninitialized();
for i in 0..$size {
ret.0[i] = self.0[i] & rhs.0[i];
}
ret
}
}
}
/// Moving BitAnd
impl BitAnd for $from {
type Output = $from;
fn bitand(self, rhs: Self) -> Self::Output {
&self & &rhs
}
}
/// BitXor on references
impl <'a> BitXor for &'a $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
unsafe {
use std::mem;
let mut ret: $from = mem::uninitialized();
for i in 0..$size {
ret.0[i] = self.0[i] ^ rhs.0[i];
}
ret
}
}
}
/// Moving BitXor
impl BitXor for $from {
type Output = $from;
fn bitxor(self, rhs: Self) -> Self::Output {
&self ^ &rhs
}
}
impl $from {
pub fn hex(&self) -> String {
format!("{:?}", self)
}
pub fn from_bloomed<T>(b: &T) -> Self where T: FixedHash { b.bloom_part($size) }
}
impl Default for $from {
fn default() -> Self { $from::new() }
}
impl From<u64> for $from {
fn from(mut value: u64) -> $from {
let mut ret = $from::new();
for i in 0..8 {
if i < $size {
ret.0[$size - i - 1] = (value & 0xff) as u8;
value >>= 8;
}
}
ret
}
}
impl<'_> From<&'_ str> for $from {
fn from(s: &'_ str) -> $from {
use std::str::FromStr;
if s.len() % 2 == 1 {
$from::from_str(&("0".to_string() + &(clean_0x(s).to_string()))[..]).unwrap_or($from::new())
} else {
$from::from_str(clean_0x(s)).unwrap_or($from::new())
}
}
}
}
}
impl From<U256> for H256 {
fn from(value: U256) -> H256 {
unsafe {
let mut ret: H256 = ::std::mem::uninitialized();
value.to_bytes(&mut ret);
ret
}
}
}
impl<'_> From<&'_ U256> for H256 {
fn from(value: &'_ U256) -> H256 {
unsafe {
let mut ret: H256 = ::std::mem::uninitialized();
value.to_bytes(&mut ret);
ret
}
}
}
impl From<H256> for Address {
fn from(value: H256) -> Address {
unsafe {
let mut ret: Address = ::std::mem::uninitialized();
::std::ptr::copy(value.as_ptr().offset(12), ret.as_mut_ptr(), 20);
ret
}
}
}
impl From<H256> for H64 {
fn from(value: H256) -> H64 {
unsafe {
let mut ret: H64 = ::std::mem::uninitialized();
::std::ptr::copy(value.as_ptr().offset(20), ret.as_mut_ptr(), 8);
ret
}
}
}
/*
impl<'_> From<&'_ H256> for Address {
fn from(value: &'_ H256) -> Address {
unsafe {
let mut ret: Address = ::std::mem::uninitialized();
::std::ptr::copy(value.as_ptr().offset(12), ret.as_mut_ptr(), 20);
ret
}
}
}
*/
impl From<Address> for H256 {
fn from(value: Address) -> H256 {
unsafe {
let mut ret = H256::new();
::std::ptr::copy(value.as_ptr(), ret.as_mut_ptr().offset(12), 20);
ret
}
}
}
impl<'_> From<&'_ Address> for H256 {
fn from(value: &'_ Address) -> H256 {
unsafe {
let mut ret = H256::new();
::std::ptr::copy(value.as_ptr(), ret.as_mut_ptr().offset(12), 20);
ret
}
}
}
pub fn h256_from_hex(s: &str) -> H256 {
use std::str::FromStr;
H256::from_str(s).unwrap()
}
pub fn h256_from_u64(n: u64) -> H256 {
use uint::U256;
H256::from(&U256::from(n))
}
pub fn address_from_hex(s: &str) -> Address {
use std::str::FromStr;
Address::from_str(s).unwrap()
}
pub fn address_from_u64(n: u64) -> Address {
let h256 = h256_from_u64(n);
From::from(h256)
}
impl_hash!(H32, 4);
impl_hash!(H64, 8);
impl_hash!(H128, 16);
impl_hash!(Address, 20);
impl_hash!(H256, 32);
impl_hash!(H264, 33);
impl_hash!(H512, 64);
impl_hash!(H520, 65);
impl_hash!(H1024, 128);
impl_hash!(H2048, 256);
/// Constant address for point 0. Often used as a default.
pub static ZERO_ADDRESS: Address = Address([0x00; 20]);
/// Constant 256-bit datum for 0. Often used as a default.
pub static ZERO_H256: H256 = H256([0x00; 32]);
#[cfg(test)]
mod tests {
use hash::*;
use std::str::FromStr;
#[test]
fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);
assert_eq!(format!("{}", h), "012345…ef");
assert_eq!(format!("{:?}", h), "0123456789abcdef");
assert_eq!(h.hex(), "0123456789abcdef");
assert!(h == h);
assert!(h != H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xee]));
assert!(h != H64([0; 8]));
}
#[test]
fn hash_bitor() {
let a = H64([1; 8]);
let b = H64([2; 8]);
let c = H64([3; 8]);
// borrow
assert_eq!(&a | &b, c);
// move
assert_eq!(a | b, c);
}
#[test]
fn shift_bloomed() {
use sha3::Hashable;
let bloom = H2048::from_str("00000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002020000000000000000000000000000000000000000000008000000001000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
let topic = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap();
let mut my_bloom = H2048::new();
assert!(!my_bloom.contains_bloomed(&address.sha3()));
assert!(!my_bloom.contains_bloomed(&topic.sha3()));
my_bloom.shift_bloomed(&address.sha3());
assert!(my_bloom.contains_bloomed(&address.sha3()));
assert!(!my_bloom.contains_bloomed(&topic.sha3()));
my_bloom.shift_bloomed(&topic.sha3());
assert_eq!(my_bloom, bloom);
assert!(my_bloom.contains_bloomed(&address.sha3()));
assert!(my_bloom.contains_bloomed(&topic.sha3()));
}
#[test]
fn from_and_to_address() {
let address = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap();
let h = H256::from(address.clone());
let a = Address::from(h);
assert_eq!(address, a);
}
#[test]
fn from_u64() {
assert_eq!(H128::from(0x1234567890abcdef), H128::from_str("00000000000000001234567890abcdef").unwrap());
assert_eq!(H64::from(0x1234567890abcdef), H64::from_str("1234567890abcdef").unwrap());
assert_eq!(H32::from(0x1234567890abcdef), H32::from_str("90abcdef").unwrap());
}
#[test]
fn from_str() {
assert_eq!(H64::from(0x1234567890abcdef), H64::from("0x1234567890abcdef"));
assert_eq!(H64::from(0x1234567890abcdef), H64::from("1234567890abcdef"));
assert_eq!(H64::from(0x234567890abcdef), H64::from("0x234567890abcdef"));
// too short.
assert_eq!(H64::from(0), H64::from("0x34567890abcdef"));
}
}

97
src/hashdb.rs Normal file
View File

@ -0,0 +1,97 @@
//! Database of byte-slices keyed to their Keccak hash.
use hash::*;
use bytes::*;
use std::collections::HashMap;
/// Trait modelling datastore keyed by a 32-byte Keccak hash.
pub trait HashDB {
/// Get the keys in the database together with number of underlying references.
fn keys(&self) -> HashMap<H256, i32>;
/// Deprecated. use `get`.
fn lookup(&self, key: &H256) -> Option<&[u8]>; // TODO: rename to get.
/// Look up a given hash into the bytes that hash to it, returning None if the
/// hash is not known.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert_eq!(m.lookup(&hash).unwrap(), hello_bytes);
/// }
/// ```
fn get(&self, key: &H256) -> Option<&[u8]> { self.lookup(key) }
/// Deprecated. Use `contains`.
fn exists(&self, key: &H256) -> bool; // TODO: rename to contains.
/// Check for the existance of a hash-key.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// use ethcore_util::sha3::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// assert!(!m.exists(&hello_bytes.sha3()));
/// let key = m.insert(hello_bytes);
/// assert!(m.exists(&key));
/// m.kill(&key);
/// assert!(!m.exists(&key));
/// }
/// ```
fn contains(&self, key: &H256) -> bool { self.exists(key) }
/// Insert a datum item into the DB and return the datum's hash for a later lookup. Insertions
/// are counted and the equivalent number of `kill()`s must be performed before the data
/// is considered dead.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// use ethcore_util::hash::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let key = m.insert("Hello world!".as_bytes());
/// assert!(m.exists(&key));
/// }
/// ```
fn insert(&mut self, value: &[u8]) -> H256;
/// Like `insert()` , except you provide the key and the data is all moved.
fn emplace(&mut self, key: H256, value: Bytes);
/// Deprecated - use `remove`.
fn kill(&mut self, key: &H256); // TODO: rename to remove.
/// Remove a datum previously inserted. Insertions can be "owed" such that the same number of `insert()`s may
/// happen without the data being eventually being inserted into the DB.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// use ethcore_util::sha3::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
/// let key = &d.sha3();
/// m.kill(key); // OK - we now owe an insertion.
/// assert!(!m.exists(key));
/// m.insert(d); // OK - now it's "empty" again.
/// assert!(!m.exists(key));
/// m.insert(d); // OK - now we've
/// assert_eq!(m.lookup(key).unwrap(), d);
/// }
/// ```
fn remove(&mut self, key: &H256) { self.kill(key) }
}

5
src/heapsizeof.rs Normal file
View File

@ -0,0 +1,5 @@
use uint::*;
use hash::*;
known_heap_size!(0, H32, H64, H128, Address, H256, H264, H512, H520, H1024, H2048);
known_heap_size!(0, U128, U256);

107
src/io/mod.rs Normal file
View File

@ -0,0 +1,107 @@
/// General IO module.
///
/// Example usage for craeting a network service and adding an IO handler:
///
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::*;
///
/// struct MyHandler;
///
/// struct MyMessage {
/// data: u32
/// }
///
/// impl IoHandler<MyMessage> for MyHandler {
/// fn initialize(&mut self, io: &mut IoContext<MyMessage>) {
/// io.register_timer(1000).unwrap();
/// }
///
/// fn timeout(&mut self, _io: &mut IoContext<MyMessage>, timer: TimerToken) {
/// println!("Timeout {}", timer);
/// }
///
/// fn message(&mut self, _io: &mut IoContext<MyMessage>, message: &mut MyMessage) {
/// println!("Message {}", message.data);
/// }
/// }
///
/// fn main () {
/// let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
/// service.register_handler(Box::new(MyHandler)).unwrap();
///
/// // Wait for quit condition
/// // ...
/// // Drop the service
/// }
/// ```
mod service;
#[derive(Debug)]
pub enum IoError {
Mio(::std::io::Error),
}
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send {
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
}
}
/// Generic IO handler.
/// All the handler function are called from within IO event loop.
/// `Message` type is used as notification data
pub trait IoHandler<Message>: Send where Message: Send + 'static {
/// Initialize the handler
fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, Message>) {}
/// Timer function called after a timeout created with `HandlerIo::timeout`.
fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _timer: TimerToken) {}
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
fn message<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _message: &'s mut Message) {} // TODO: make message immutable and provide internal channel for adding network handler
/// Called when an IO stream gets closed
fn stream_hup<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
/// Called when an IO stream can be read from
fn stream_readable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
/// Called when an IO stream can be written to
fn stream_writable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
}
pub type TimerToken = service::TimerToken;
pub type StreamToken = service::StreamToken;
pub type IoContext<'s, M> = service::IoContext<'s, M>;
pub type IoService<M> = service::IoService<M>;
pub type IoChannel<M> = service::IoChannel<M>;
//pub const USER_TOKEN_START: usize = service::USER_TOKEN; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12)
#[cfg(test)]
mod tests {
use io::*;
struct MyHandler;
struct MyMessage {
data: u32
}
impl IoHandler<MyMessage> for MyHandler {
fn initialize(&mut self, io: &mut IoContext<MyMessage>) {
io.register_timer(1000).unwrap();
}
fn timeout(&mut self, _io: &mut IoContext<MyMessage>, timer: TimerToken) {
println!("Timeout {}", timer);
}
fn message(&mut self, _io: &mut IoContext<MyMessage>, message: &mut MyMessage) {
println!("Message {}", message.data);
}
}
#[test]
fn test_service_register_handler () {
let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
service.register_handler(Box::new(MyHandler)).unwrap();
}
}

211
src/io/service.rs Normal file
View File

@ -0,0 +1,211 @@
use std::thread::{self, JoinHandle};
use mio::*;
use mio::util::{Slab};
use hash::*;
use rlp::*;
use error::*;
use io::{IoError, IoHandler};
pub type TimerToken = usize;
pub type StreamToken = usize;
// Tokens
const MAX_USER_TIMERS: usize = 32;
const USER_TIMER: usize = 0;
const LAST_USER_TIMER: usize = USER_TIMER + MAX_USER_TIMERS - 1;
//const USER_TOKEN: usize = LAST_USER_TIMER + 1;
/// Messages used to communicate with the event loop from other threads.
pub enum IoMessage<Message> where Message: Send + Sized {
/// Shutdown the event loop
Shutdown,
/// Register a new protocol handler.
AddHandler {
handler: Box<IoHandler<Message>+Send>,
},
/// Broadcast a message across all protocol handlers.
UserMessage(Message)
}
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
pub struct IoContext<'s, Message> where Message: Send + 'static {
timers: &'s mut Slab<UserTimer>,
/// Low leve MIO Event loop for custom handler registration.
pub event_loop: &'s mut EventLoop<IoManager<Message>>,
}
impl<'s, Message> IoContext<'s, Message> where Message: Send + 'static {
/// Create a new IO access point. Takes references to all the data that can be updated within the IO handler.
fn new(event_loop: &'s mut EventLoop<IoManager<Message>>, timers: &'s mut Slab<UserTimer>) -> IoContext<'s, Message> {
IoContext {
event_loop: event_loop,
timers: timers,
}
}
/// Register a new IO timer. Returns a new timer token. 'IoHandler::timeout' will be called with the token.
pub fn register_timer(&mut self, ms: u64) -> Result<TimerToken, UtilError> {
match self.timers.insert(UserTimer {
delay: ms,
}) {
Ok(token) => {
self.event_loop.timeout_ms(token, ms).expect("Error registering user timer");
Ok(token.as_usize())
},
_ => { panic!("Max timers reached") }
}
}
/// Broadcast a message to other IO clients
pub fn message(&mut self, message: Message) {
match self.event_loop.channel().send(IoMessage::UserMessage(message)) {
Ok(_) => {}
Err(e) => { panic!("Error sending io message {:?}", e); }
}
}
}
struct UserTimer {
delay: u64,
}
/// Root IO handler. Manages user handlers, messages and IO timers.
pub struct IoManager<Message> where Message: Send {
timers: Slab<UserTimer>,
handlers: Vec<Box<IoHandler<Message>>>,
}
impl<Message> IoManager<Message> where Message: Send + 'static {
/// Creates a new instance and registers it with the event loop.
pub fn start(event_loop: &mut EventLoop<IoManager<Message>>) -> Result<(), UtilError> {
let mut io = IoManager {
timers: Slab::new_starting_at(Token(USER_TIMER), MAX_USER_TIMERS),
handlers: Vec::new(),
};
try!(event_loop.run(&mut io));
Ok(())
}
}
impl<Message> Handler for IoManager<Message> where Message: Send + 'static {
type Timeout = Token;
type Message = IoMessage<Message>;
fn ready(&mut self, event_loop: &mut EventLoop<Self>, token: Token, events: EventSet) {
if events.is_hup() {
for h in self.handlers.iter_mut() {
h.stream_hup(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
}
}
else if events.is_readable() {
for h in self.handlers.iter_mut() {
h.stream_readable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
}
}
else if events.is_writable() {
for h in self.handlers.iter_mut() {
h.stream_writable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
}
}
}
fn timeout(&mut self, event_loop: &mut EventLoop<Self>, token: Token) {
match token.as_usize() {
USER_TIMER ... LAST_USER_TIMER => {
let delay = {
let timer = self.timers.get_mut(token).expect("Unknown user timer token");
timer.delay
};
for h in self.handlers.iter_mut() {
h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
}
event_loop.timeout_ms(token, delay).expect("Error re-registering user timer");
}
_ => { // Just pass the event down. IoHandler is supposed to re-register it if required.
for h in self.handlers.iter_mut() {
h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
}
}
}
}
fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
let mut m = msg;
match m {
IoMessage::Shutdown => event_loop.shutdown(),
IoMessage::AddHandler {
handler,
} => {
self.handlers.push(handler);
self.handlers.last_mut().unwrap().initialize(&mut IoContext::new(event_loop, &mut self.timers));
},
IoMessage::UserMessage(ref mut data) => {
for h in self.handlers.iter_mut() {
h.message(&mut IoContext::new(event_loop, &mut self.timers), data);
}
}
}
}
}
/// Allows sending messages into the event loop. All the IO handlers will get the message
/// in the `message` callback.
pub struct IoChannel<Message> where Message: Send {
channel: Sender<IoMessage<Message>>
}
impl<Message> IoChannel<Message> where Message: Send {
pub fn send(&mut self, message: Message) -> Result<(), IoError> {
try!(self.channel.send(IoMessage::UserMessage(message)));
Ok(())
}
}
/// General IO Service. Starts an event loop and dispatches IO requests.
/// 'Message' is a notification message type
pub struct IoService<Message> where Message: Send + 'static {
thread: Option<JoinHandle<()>>,
host_channel: Sender<IoMessage<Message>>
}
impl<Message> IoService<Message> where Message: Send + 'static {
/// Starts IO event loop
pub fn start() -> Result<IoService<Message>, UtilError> {
let mut event_loop = EventLoop::new().unwrap();
let channel = event_loop.channel();
let thread = thread::spawn(move || {
IoManager::<Message>::start(&mut event_loop).unwrap(); //TODO:
});
Ok(IoService {
thread: Some(thread),
host_channel: channel
})
}
/// Regiter a IO hadnler with the event loop.
pub fn register_handler(&mut self, handler: Box<IoHandler<Message>+Send>) -> Result<(), IoError> {
try!(self.host_channel.send(IoMessage::AddHandler {
handler: handler,
}));
Ok(())
}
/// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads.
pub fn send_message(&mut self, message: Message) -> Result<(), IoError> {
try!(self.host_channel.send(IoMessage::UserMessage(message)));
Ok(())
}
/// Create a new message channel
pub fn channel(&mut self) -> IoChannel<Message> {
IoChannel { channel: self.host_channel.clone() }
}
}
impl<Message> Drop for IoService<Message> where Message: Send {
fn drop(&mut self) {
self.host_channel.send(IoMessage::Shutdown).unwrap();
self.thread.take().unwrap().join().unwrap();
}
}

137
src/json_aid.rs Normal file
View File

@ -0,0 +1,137 @@
use common::*;
pub fn clean(s: &str) -> &str {
if s.len() >= 2 && &s[0..2] == "0x" {
&s[2..]
} else {
s
}
}
fn u256_from_str(s: &str) -> U256 {
if s.len() >= 2 && &s[0..2] == "0x" {
U256::from_str(&s[2..]).unwrap_or(U256::from(0))
} else {
U256::from_dec_str(s).unwrap_or(U256::from(0))
}
}
impl FromJson for Bytes {
fn from_json(json: &Json) -> Self {
match json {
&Json::String(ref s) => match s.len() % 2 {
0 => FromHex::from_hex(clean(s)).unwrap_or(vec![]),
_ => FromHex::from_hex(&("0".to_string() + &(clean(s).to_string()))[..]).unwrap_or(vec![]),
},
_ => vec![],
}
}
}
impl FromJson for BTreeMap<H256, H256> {
fn from_json(json: &Json) -> Self {
match json {
&Json::Object(ref o) => o.iter().map(|(key, value)| (x!(&u256_from_str(key)), x!(&U256::from_json(value)))).collect(),
_ => BTreeMap::new(),
}
}
}
impl<T> FromJson for Vec<T> where T: FromJson {
fn from_json(json: &Json) -> Self {
match json {
&Json::Array(ref o) => o.iter().map(|x|T::from_json(x)).collect(),
_ => Vec::new(),
}
}
}
impl<T> FromJson for Option<T> where T: FromJson {
fn from_json(json: &Json) -> Self {
match json {
&Json::String(ref o) if o.is_empty() => None,
&Json::Null => None,
_ => Some(FromJson::from_json(json)),
}
}
}
impl FromJson for u64 {
fn from_json(json: &Json) -> Self {
U256::from_json(json).low_u64()
}
}
impl FromJson for u32 {
fn from_json(json: &Json) -> Self {
U256::from_json(json).low_u64() as u32
}
}
impl FromJson for u16 {
fn from_json(json: &Json) -> Self {
U256::from_json(json).low_u64() as u16
}
}
#[test]
fn u256_from_json() {
let j = Json::from_str("{ \"dec\": \"10\", \"hex\": \"0x0a\", \"int\": 10 }").unwrap();
let v: U256 = xjson!(&j["dec"]);
assert_eq!(U256::from(10), v);
let v: U256 = xjson!(&j["hex"]);
assert_eq!(U256::from(10), v);
let v: U256 = xjson!(&j["int"]);
assert_eq!(U256::from(10), v);
}
#[test]
fn h256_from_json() {
let j = Json::from_str("{ \"with\": \"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\", \"without\": \"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\" }").unwrap();
let v: H256 = xjson!(&j["with"]);
assert_eq!(H256::from_str("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef").unwrap(), v);
let v: H256 = xjson!(&j["without"]);
assert_eq!(H256::from_str("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef").unwrap(), v);
}
#[test]
fn vec_u256_from_json() {
let j = Json::from_str("{ \"array\": [ \"10\", \"0x0a\", 10] }").unwrap();
let v: Vec<U256> = xjson!(&j["array"]);
assert_eq!(vec![U256::from(10); 3], v);
}
#[test]
fn vec_h256_from_json() {
let j = Json::from_str("{ \"array\": [ \"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\", \"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\"] }").unwrap();
let v: Vec<H256> = xjson!(&j["array"]);
assert_eq!(vec![H256::from_str("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef").unwrap(); 2], v);
}
#[test]
fn simple_types() {
let j = Json::from_str("{ \"null\": null, \"empty\": \"\", \"int\": 42, \"dec\": \"42\", \"hex\": \"0x2a\" }").unwrap();
let v: u16 = xjson!(&j["int"]);
assert_eq!(42u16, v);
let v: u32 = xjson!(&j["dec"]);
assert_eq!(42u32, v);
let v: u64 = xjson!(&j["hex"]);
assert_eq!(42u64, v);
}
#[test]
fn option_types() {
let j = Json::from_str("{ \"null\": null, \"empty\": \"\", \"int\": 42, \"dec\": \"42\", \"hex\": \"0x2a\" }").unwrap();
let v: Option<u16> = xjson!(&j["int"]);
assert_eq!(Some(42u16), v);
let v: Option<u16> = xjson!(&j["dec"]);
assert_eq!(Some(42u16), v);
let v: Option<u16> = xjson!(&j["null"]);
assert_eq!(None, v);
let v: Option<u16> = xjson!(&j["empty"]);
assert_eq!(None, v);
}

101
src/lib.rs Normal file
View File

@ -0,0 +1,101 @@
#![feature(op_assign_traits)]
#![feature(augmented_assignments)]
#![feature(associated_consts)]
#![feature(wrapping)]
//! Ethcore-util library
//!
//! ### Rust version:
//! - beta
//! - nightly
//!
//! ### Supported platforms:
//! - OSX
//! - Linux
//!
//! ### Dependencies:
//! - RocksDB 3.13
//!
//! ### Dependencies Installation:
//!
//! - OSX:
//!
//! ```bash
//! brew install rocksdb
//! ```
//!
//! - From source:
//!
//! ```bash
//! wget https://github.com/facebook/rocksdb/archive/rocksdb-3.13.tar.gz
//! tar xvf rocksdb-3.13.tar.gz && cd rocksdb-rocksdb-3.13 && make shared_lib
//! sudo make install
//! ```
extern crate slab;
extern crate rustc_serialize;
extern crate mio;
extern crate rand;
extern crate rocksdb;
extern crate tiny_keccak;
#[macro_use]
extern crate heapsize;
#[macro_use]
extern crate log;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate itertools;
extern crate env_logger;
extern crate time;
extern crate crypto as rcrypto;
extern crate secp256k1;
extern crate arrayvec;
extern crate elastic_array;
pub mod standard;
#[macro_use]
pub mod from_json;
#[macro_use]
pub mod common;
pub mod error;
pub mod hash;
pub mod uint;
pub mod bytes;
pub mod rlp;
pub mod misc;
pub mod json_aid;
pub mod vector;
pub mod sha3;
pub mod hashdb;
pub mod memorydb;
pub mod overlaydb;
pub mod math;
pub mod chainfilter;
pub mod crypto;
pub mod triehash;
pub mod trie;
pub mod nibbleslice;
pub mod heapsizeof;
pub mod squeeze;
pub mod semantic_version;
pub mod io;
pub mod network;
pub use common::*;
pub use misc::*;
pub use json_aid::*;
pub use rlp::*;
pub use hashdb::*;
pub use memorydb::*;
pub use overlaydb::*;
pub use math::*;
pub use chainfilter::*;
pub use crypto::*;
pub use triehash::*;
pub use trie::*;
pub use nibbleslice::*;
pub use heapsizeof::*;
pub use squeeze::*;
pub use semantic_version::*;
pub use network::*;
pub use io::*;

9
src/math.rs Normal file
View File

@ -0,0 +1,9 @@
/// log2
pub fn log2(x: usize) -> u32 {
if x <= 1 {
return 0;
}
let n = x.leading_zeros();
::std::mem::size_of::<usize>() as u32 * 8 - n
}

219
src/memorydb.rs Normal file
View File

@ -0,0 +1,219 @@
//! Reference-counted memory-based HashDB implementation.
use hash::*;
use bytes::*;
use rlp::*;
use sha3::*;
use hashdb::*;
use std::mem;
use std::collections::HashMap;
#[derive(Debug,Clone)]
/// Reference-counted memory-based HashDB implementation.
///
/// Use `new()` to create a new database. Insert items with `insert()`, remove items
/// with `kill()`, check for existance with `exists()` and lookup a hash to derive
/// the data with `lookup()`. Clear with `clear()` and purge the portions of the data
/// that have no references with `purge()`.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let d = "Hello world!".as_bytes();
///
/// let k = m.insert(d);
/// assert!(m.exists(&k));
/// assert_eq!(m.lookup(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.exists(&k));
///
/// m.kill(&k);
/// assert!(m.exists(&k));
///
/// m.kill(&k);
/// assert!(!m.exists(&k));
///
/// m.kill(&k);
/// assert!(!m.exists(&k));
///
/// m.insert(d);
/// assert!(!m.exists(&k));
/// m.insert(d);
/// assert!(m.exists(&k));
/// assert_eq!(m.lookup(&k).unwrap(), d);
///
/// m.kill(&k);
/// assert!(!m.exists(&k));
/// }
/// ```
pub struct MemoryDB {
data: HashMap<H256, (Bytes, i32)>,
static_null_rlp: (Bytes, i32),
}
impl MemoryDB {
/// Create a new instance of the memory DB.
pub fn new() -> MemoryDB {
MemoryDB {
data: HashMap::new(),
static_null_rlp: (vec![0x80u8; 1], 1),
}
}
/// Clear all data from the database.
///
/// # Examples
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::memorydb::*;
/// fn main() {
/// let mut m = MemoryDB::new();
/// let hello_bytes = "Hello world!".as_bytes();
/// let hash = m.insert(hello_bytes);
/// assert!(m.exists(&hash));
/// m.clear();
/// assert!(!m.exists(&hash));
/// }
/// ```
pub fn clear(&mut self) {
self.data.clear();
}
/// Purge all zero-referenced data from the database.
pub fn purge(&mut self) {
let empties: Vec<_> = self.data.iter()
.filter(|&(_, &(_, rc))| rc == 0)
.map(|(k, _)| k.clone())
.collect();
for empty in empties { self.data.remove(&empty); }
}
/// Grab the raw information associated with a key. Returns None if the key
/// doesn't exist.
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &H256) -> Option<&(Bytes, i32)> {
if key == &SHA3_NULL_RLP {
return Some(&self.static_null_rlp);
}
self.data.get(key)
}
pub fn drain(&mut self) -> HashMap<H256, (Bytes, i32)> {
let mut data = HashMap::new();
mem::swap(&mut self.data, &mut data);
data
}
pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) {
if self.raw(key) == None {
unsafe {
let p = &self.data as *const HashMap<H256, (Bytes, i32)> as *mut HashMap<H256, (Bytes, i32)>;
(*p).insert(key.clone(), (value, 0));
}
}
self.raw(key).unwrap()
}
}
static NULL_RLP_STATIC: [u8; 1] = [0x80; 1];
impl HashDB for MemoryDB {
fn lookup(&self, key: &H256) -> Option<&[u8]> {
if key == &SHA3_NULL_RLP {
return Some(&NULL_RLP_STATIC);
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d),
_ => None
}
}
fn keys(&self) -> HashMap<H256, i32> {
self.data.iter().filter_map(|(k, v)| if v.1 != 0 {Some((k.clone(), v.1))} else {None}).collect()
}
fn exists(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
match self.data.get(key) {
Some(&(_, x)) if x > 0 => true,
_ => false
}
}
fn insert(&mut self, value: &[u8]) -> H256 {
if value == &NULL_RLP {
return SHA3_NULL_RLP.clone();
}
let key = value.sha3();
if match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32 ... 0)) => {
*old_value = From::from(value.bytes());
*rc += 1;
false
},
Some(&mut (_, ref mut x)) => { *x += 1; false } ,
None => true,
}{ // ... None falls through into...
self.data.insert(key.clone(), (From::from(value.bytes()), 1));
}
key
}
fn emplace(&mut self, key: H256, value: Bytes) {
if value == &NULL_RLP {
return;
}
match self.data.get_mut(&key) {
Some(&mut (ref mut old_value, ref mut rc @ -0x80000000i32 ... 0)) => {
*old_value = value;
*rc += 1;
return;
},
Some(&mut (_, ref mut x)) => { *x += 1; return; } ,
None => {},
}
// ... None falls through into...
self.data.insert(key, (value, 1));
}
fn kill(&mut self, key: &H256) {
if key == &SHA3_NULL_RLP {
return;
}
if match self.data.get_mut(key) {
Some(&mut (_, ref mut x)) => { *x -= 1; false }
None => true
}{ // ... None falls through into...
self.data.insert(key.clone(), (Bytes::new(), -1));
}
}
}
#[test]
fn memorydb_denote() {
let mut m = MemoryDB::new();
let hello_bytes = b"Hello world!";
let hash = m.insert(hello_bytes);
assert_eq!(m.lookup(&hash).unwrap(), b"Hello world!");
for _ in 0..1000 {
let r = H256::random();
let k = r.sha3();
let &(ref v, ref rc) = m.denote(&k, r.bytes().to_vec());
assert_eq!(v, &r.bytes());
assert_eq!(*rc, 0);
}
assert_eq!(m.lookup(&hash).unwrap(), b"Hello world!");
}

31
src/misc.rs Normal file
View File

@ -0,0 +1,31 @@
use common::*;
#[derive(Debug,Clone,PartialEq,Eq)]
/// Diff type for specifying a change (or not).
pub enum Diff<T> where T: Eq {
Same,
Born(T),
Changed(T, T),
Died(T),
}
impl<T> Diff<T> where T: Eq {
/// Construct new object with given `pre` and `post`.
pub fn new(pre: T, post: T) -> Self { if pre == post { Diff::Same } else { Diff::Changed(pre, post) } }
/// Get the before value, if there is one.
pub fn pre(&self) -> Option<&T> { match self { &Diff::Died(ref x) | &Diff::Changed(ref x, _) => Some(x), _ => None } }
/// Get the after value, if there is one.
pub fn post(&self) -> Option<&T> { match self { &Diff::Born(ref x) | &Diff::Changed(_, ref x) => Some(x), _ => None } }
/// Determine whether there was a change or not.
pub fn is_same(&self) -> bool { match self { &Diff::Same => true, _ => false }}
}
#[derive(PartialEq,Eq,Clone,Copy)]
/// Boolean type for clean/dirty status.
pub enum Filth {
Clean,
Dirty,
}

410
src/network/connection.rs Normal file
View File

@ -0,0 +1,410 @@
use std::collections::VecDeque;
use mio::{Handler, Token, EventSet, EventLoop, Timeout, PollOpt, TryRead, TryWrite};
use mio::tcp::*;
use hash::*;
use sha3::*;
use bytes::*;
use rlp::*;
use std::io::{self, Cursor, Read};
use error::*;
use network::error::NetworkError;
use network::handshake::Handshake;
use crypto;
use rcrypto::blockmodes::*;
use rcrypto::aessafe::*;
use rcrypto::symmetriccipher::*;
use rcrypto::buffer::*;
use tiny_keccak::Keccak;
const ENCRYPTED_HEADER_LEN: usize = 32;
/// Low level tcp connection
pub struct Connection {
/// Connection id (token)
pub token: Token,
/// Network socket
pub socket: TcpStream,
/// Receive buffer
rec_buf: Bytes,
/// Expected size
rec_size: usize,
/// Send out packets FIFO
send_queue: VecDeque<Cursor<Bytes>>,
/// Event flags this connection expects
interest: EventSet,
}
/// Connection write status.
#[derive(PartialEq, Eq)]
pub enum WriteStatus {
/// Some data is still pending for current packet
Ongoing,
/// All data sent.
Complete
}
impl Connection {
/// Create a new connection with given id and socket.
pub fn new(token: Token, socket: TcpStream) -> Connection {
Connection {
token: token,
socket: socket,
send_queue: VecDeque::new(),
rec_buf: Bytes::new(),
rec_size: 0,
interest: EventSet::hup(),
}
}
/// Put a connection into read mode. Receiving up `size` bytes of data.
pub fn expect(&mut self, size: usize) {
if self.rec_size != self.rec_buf.len() {
warn!(target:"net", "Unexpected connection read start");
}
unsafe { self.rec_buf.set_len(0) }
self.rec_size = size;
}
/// Readable IO handler. Called when there is some data to be read.
//TODO: return a slice
pub fn readable(&mut self) -> io::Result<Option<Bytes>> {
if self.rec_size == 0 || self.rec_buf.len() >= self.rec_size {
warn!(target:"net", "Unexpected connection read");
}
let max = self.rec_size - self.rec_buf.len();
// resolve "multiple applicable items in scope [E0034]" error
let sock_ref = <TcpStream as Read>::by_ref(&mut self.socket);
match sock_ref.take(max as u64).try_read_buf(&mut self.rec_buf) {
Ok(Some(_)) if self.rec_buf.len() == self.rec_size => {
self.rec_size = 0;
Ok(Some(::std::mem::replace(&mut self.rec_buf, Bytes::new())))
},
Ok(_) => Ok(None),
Err(e) => Err(e),
}
}
/// Add a packet to send queue.
pub fn send(&mut self, data: Bytes) {
if data.len() != 0 {
self.send_queue.push_back(Cursor::new(data));
}
if !self.interest.is_writable() {
self.interest.insert(EventSet::writable());
}
}
/// Writable IO handler. Called when the socket is ready to send.
pub fn writable(&mut self) -> io::Result<WriteStatus> {
if self.send_queue.is_empty() {
return Ok(WriteStatus::Complete)
}
{
let buf = self.send_queue.front_mut().unwrap();
let send_size = buf.get_ref().len();
if (buf.position() as usize) >= send_size {
warn!(target:"net", "Unexpected connection data");
return Ok(WriteStatus::Complete)
}
match self.socket.try_write_buf(buf) {
Ok(_) if (buf.position() as usize) < send_size => {
self.interest.insert(EventSet::writable());
Ok(WriteStatus::Ongoing)
},
Ok(_) if (buf.position() as usize) == send_size => {
Ok(WriteStatus::Complete)
},
Ok(_) => { panic!("Wrote past buffer");},
Err(e) => Err(e)
}
}.and_then(|r| {
if r == WriteStatus::Complete {
self.send_queue.pop_front();
}
if self.send_queue.is_empty() {
self.interest.remove(EventSet::writable());
}
else {
self.interest.insert(EventSet::writable());
}
Ok(r)
})
}
/// Register this connection with the IO event loop.
pub fn register<Host: Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
trace!(target: "net", "connection register; token={:?}", self.token);
self.interest.insert(EventSet::readable());
event_loop.register(&self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
error!("Failed to register {:?}, {:?}", self.token, e);
Err(e)
})
}
/// Update connection registration. Should be called at the end of the IO handler.
pub fn reregister<Host: Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
trace!(target: "net", "connection reregister; token={:?}", self.token);
event_loop.reregister( &self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
error!("Failed to reregister {:?}, {:?}", self.token, e);
Err(e)
})
}
}
/// RLPx packet
pub struct Packet {
pub protocol: u16,
pub data: Bytes,
}
/// Encrypted connection receiving state.
enum EncryptedConnectionState {
/// Reading a header.
Header,
/// Reading the rest of the packet.
Payload,
}
/// Connection implementing RLPx framing
/// https://github.com/ethereum/devp2p/blob/master/rlpx.md#framing
pub struct EncryptedConnection {
/// Underlying tcp connection
connection: Connection,
/// Egress data encryptor
encoder: CtrMode<AesSafe256Encryptor>,
/// Ingress data decryptor
decoder: CtrMode<AesSafe256Encryptor>,
/// Ingress data decryptor
mac_encoder: EcbEncryptor<AesSafe256Encryptor, EncPadding<NoPadding>>,
/// MAC for egress data
egress_mac: Keccak,
/// MAC for ingress data
ingress_mac: Keccak,
/// Read state
read_state: EncryptedConnectionState,
/// Disconnect timeout
idle_timeout: Option<Timeout>,
/// Protocol id for the last received packet
protocol_id: u16,
/// Payload expected to be received for the last header.
payload_len: usize,
}
impl EncryptedConnection {
/// Create an encrypted connection out of the handshake. Consumes a handshake object.
pub fn new(handshake: Handshake) -> Result<EncryptedConnection, UtilError> {
let shared = try!(crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_public));
let mut nonce_material = H512::new();
if handshake.originated {
handshake.remote_nonce.copy_to(&mut nonce_material[0..32]);
handshake.nonce.copy_to(&mut nonce_material[32..64]);
}
else {
handshake.nonce.copy_to(&mut nonce_material[0..32]);
handshake.remote_nonce.copy_to(&mut nonce_material[32..64]);
}
let mut key_material = H512::new();
shared.copy_to(&mut key_material[0..32]);
nonce_material.sha3_into(&mut key_material[32..64]);
key_material.sha3().copy_to(&mut key_material[32..64]);
key_material.sha3().copy_to(&mut key_material[32..64]);
let iv = vec![0u8; 16];
let encoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv);
let iv = vec![0u8; 16];
let decoder = CtrMode::new(AesSafe256Encryptor::new(&key_material[32..64]), iv);
key_material.sha3().copy_to(&mut key_material[32..64]);
let mac_encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key_material[32..64]), NoPadding);
let mut egress_mac = Keccak::new_keccak256();
let mut mac_material = &H256::from_slice(&key_material[32..64]) ^ &handshake.remote_nonce;
egress_mac.update(&mac_material);
egress_mac.update(if handshake.originated { &handshake.auth_cipher } else { &handshake.ack_cipher });
let mut ingress_mac = Keccak::new_keccak256();
mac_material = &H256::from_slice(&key_material[32..64]) ^ &handshake.nonce;
ingress_mac.update(&mac_material);
ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher });
Ok(EncryptedConnection {
connection: handshake.connection,
encoder: encoder,
decoder: decoder,
mac_encoder: mac_encoder,
egress_mac: egress_mac,
ingress_mac: ingress_mac,
read_state: EncryptedConnectionState::Header,
idle_timeout: None,
protocol_id: 0,
payload_len: 0
})
}
/// Send a packet
pub fn send_packet(&mut self, payload: &[u8]) -> Result<(), UtilError> {
let mut header = RlpStream::new();
let len = payload.len() as usize;
header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1);
header.append_raw(&[0xc2u8, 0x80u8, 0x80u8], 1);
//TODO: ger rid of vectors here
let mut header = header.out();
let padding = (16 - (payload.len() % 16)) % 16;
header.resize(16, 0u8);
let mut packet = vec![0u8; (32 + payload.len() + padding + 16)];
self.encoder.encrypt(&mut RefReadBuffer::new(&header), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding");
EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &packet[0..16]);
self.egress_mac.clone().finalize(&mut packet[16..32]);
self.encoder.encrypt(&mut RefReadBuffer::new(&payload), &mut RefWriteBuffer::new(&mut packet[32..(32 + len)]), padding == 0).expect("Invalid length or padding");
if padding != 0 {
let pad = [0u8; 16];
self.encoder.encrypt(&mut RefReadBuffer::new(&pad[0..padding]), &mut RefWriteBuffer::new(&mut packet[(32 + len)..(32 + len + padding)]), true).expect("Invalid length or padding");
}
self.egress_mac.update(&packet[32..(32 + len + padding)]);
EncryptedConnection::update_mac(&mut self.egress_mac, &mut self.mac_encoder, &[0u8; 0]);
self.egress_mac.clone().finalize(&mut packet[(32 + len + padding)..]);
self.connection.send(packet);
Ok(())
}
/// Decrypt and authenticate an incoming packet header. Prepare for receiving payload.
fn read_header(&mut self, header: &[u8]) -> Result<(), UtilError> {
if header.len() != ENCRYPTED_HEADER_LEN {
return Err(From::from(NetworkError::Auth));
}
EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &header[0..16]);
let mac = &header[16..];
let mut expected = H256::new();
self.ingress_mac.clone().finalize(&mut expected);
if mac != &expected[0..16] {
return Err(From::from(NetworkError::Auth));
}
let mut hdec = H128::new();
self.decoder.decrypt(&mut RefReadBuffer::new(&header[0..16]), &mut RefWriteBuffer::new(&mut hdec), false).expect("Invalid length or padding");
let length = ((((hdec[0] as u32) << 8) + (hdec[1] as u32)) << 8) + (hdec[2] as u32);
let header_rlp = UntrustedRlp::new(&hdec[3..6]);
let protocol_id = try!(header_rlp.val_at::<u16>(0));
self.payload_len = length as usize;
self.protocol_id = protocol_id;
self.read_state = EncryptedConnectionState::Payload;
let padding = (16 - (length % 16)) % 16;
let full_length = length + padding + 16;
self.connection.expect(full_length as usize);
Ok(())
}
/// Decrypt and authenticate packet payload.
fn read_payload(&mut self, payload: &[u8]) -> Result<Packet, UtilError> {
let padding = (16 - (self.payload_len % 16)) % 16;
let full_length = self.payload_len + padding + 16;
if payload.len() != full_length {
return Err(From::from(NetworkError::Auth));
}
self.ingress_mac.update(&payload[0..payload.len() - 16]);
EncryptedConnection::update_mac(&mut self.ingress_mac, &mut self.mac_encoder, &[0u8; 0]);
let mac = &payload[(payload.len() - 16)..];
let mut expected = H128::new();
self.ingress_mac.clone().finalize(&mut expected);
if mac != &expected[..] {
return Err(From::from(NetworkError::Auth));
}
let mut packet = vec![0u8; self.payload_len];
self.decoder.decrypt(&mut RefReadBuffer::new(&payload[0..self.payload_len]), &mut RefWriteBuffer::new(&mut packet), false).expect("Invalid length or padding");
let mut pad_buf = [0u8; 16];
self.decoder.decrypt(&mut RefReadBuffer::new(&payload[self.payload_len..(payload.len() - 16)]), &mut RefWriteBuffer::new(&mut pad_buf), false).expect("Invalid length or padding");
Ok(Packet {
protocol: self.protocol_id,
data: packet
})
}
/// Update MAC after reading or writing any data.
fn update_mac(mac: &mut Keccak, mac_encoder: &mut EcbEncryptor<AesSafe256Encryptor, EncPadding<NoPadding>>, seed: &[u8]) {
let mut prev = H128::new();
mac.clone().finalize(&mut prev);
let mut enc = H128::new();
mac_encoder.encrypt(&mut RefReadBuffer::new(&prev), &mut RefWriteBuffer::new(&mut enc), true).unwrap();
mac_encoder.reset();
enc = enc ^ if seed.is_empty() { prev } else { H128::from_slice(seed) };
mac.update(&enc);
}
/// Readable IO handler. Tracker receive status and returns decoded packet if avaialable.
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<Option<Packet>, UtilError> {
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
match self.read_state {
EncryptedConnectionState::Header => {
match try!(self.connection.readable()) {
Some(data) => {
try!(self.read_header(&data));
},
None => {}
};
Ok(None)
},
EncryptedConnectionState::Payload => {
match try!(self.connection.readable()) {
Some(data) => {
self.read_state = EncryptedConnectionState::Header;
self.connection.expect(ENCRYPTED_HEADER_LEN);
Ok(Some(try!(self.read_payload(&data))))
},
None => Ok(None)
}
}
}
}
/// Writable IO handler. Processes send queeue.
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
try!(self.connection.writable());
Ok(())
}
/// Register this connection with the event handler.
pub fn register<Host:Handler<Timeout=Token>>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
self.connection.expect(ENCRYPTED_HEADER_LEN);
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok();
try!(self.connection.reregister(event_loop));
Ok(())
}
/// Update connection registration. This should be called at the end of the event loop.
pub fn reregister<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
try!(self.connection.reregister(event_loop));
Ok(())
}
}
#[test]
pub fn test_encryption() {
use hash::*;
use std::str::FromStr;
let key = H256::from_str("2212767d793a7a3d66f869ae324dd11bd17044b82c9f463b8a541a4d089efec5").unwrap();
let before = H128::from_str("12532abaec065082a3cf1da7d0136f15").unwrap();
let before2 = H128::from_str("7e99f682356fdfbc6b67a9562787b18a").unwrap();
let after = H128::from_str("89464c6b04e7c99e555c81d3f7266a05").unwrap();
let after2 = H128::from_str("85c070030589ef9c7a2879b3a8489316").unwrap();
let mut got = H128::new();
let mut encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key), NoPadding);
encoder.encrypt(&mut RefReadBuffer::new(&before), &mut RefWriteBuffer::new(&mut got), true).unwrap();
encoder.reset();
assert_eq!(got, after);
got = H128::new();
encoder.encrypt(&mut RefReadBuffer::new(&before2), &mut RefWriteBuffer::new(&mut got), true).unwrap();
encoder.reset();
assert_eq!(got, after2);
}

206
src/network/discovery.rs Normal file
View File

@ -0,0 +1,206 @@
// This module is a work in progress
#![allow(dead_code)] //TODO: remove this after everything is done
use std::collections::{HashSet, BTreeMap};
use std::cell::{RefCell};
use std::ops::{DerefMut};
use mio::*;
use mio::udp::*;
use hash::*;
use sha3::Hashable;
use crypto::*;
use network::node::*;
const ADDRESS_BYTES_SIZE: u32 = 32; ///< Size of address type in bytes.
const ADDRESS_BITS: u32 = 8 * ADDRESS_BYTES_SIZE; ///< Denoted by n in [Kademlia].
const NODE_BINS: u32 = ADDRESS_BITS - 1; ///< Size of m_state (excludes root, which is us).
const DISCOVERY_MAX_STEPS: u16 = 8; ///< Max iterations of discovery. (discover)
const BUCKET_SIZE: u32 = 16; ///< Denoted by k in [Kademlia]. Number of nodes stored in each bucket.
const ALPHA: usize = 3; ///< Denoted by \alpha in [Kademlia]. Number of concurrent FindNode requests.
struct NodeBucket {
distance: u32,
nodes: Vec<NodeId>
}
impl NodeBucket {
fn new(distance: u32) -> NodeBucket {
NodeBucket {
distance: distance,
nodes: Vec::new()
}
}
}
struct Discovery {
id: NodeId,
discovery_round: u16,
discovery_id: NodeId,
discovery_nodes: HashSet<NodeId>,
node_buckets: Vec<NodeBucket>,
}
struct FindNodePacket;
impl FindNodePacket {
fn new(_endpoint: &NodeEndpoint, _id: &NodeId) -> FindNodePacket {
FindNodePacket
}
fn sign(&mut self, _secret: &Secret) {
}
fn send(& self, _socket: &mut UdpSocket) {
}
}
impl Discovery {
pub fn new(id: &NodeId) -> Discovery {
Discovery {
id: id.clone(),
discovery_round: 0,
discovery_id: NodeId::new(),
discovery_nodes: HashSet::new(),
node_buckets: (0..NODE_BINS).map(|x| NodeBucket::new(x)).collect(),
}
}
pub fn add_node(&mut self, id: &NodeId) {
self.node_buckets[Discovery::distance(&self.id, &id) as usize].nodes.push(id.clone());
}
fn start_node_discovery<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) {
self.discovery_round = 0;
self.discovery_id.randomize();
self.discovery_nodes.clear();
self.discover(event_loop);
}
fn discover<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) {
if self.discovery_round == DISCOVERY_MAX_STEPS
{
debug!("Restarting discovery");
self.start_node_discovery(event_loop);
return;
}
let mut tried_count = 0;
{
let nearest = Discovery::nearest_node_entries(&self.id, &self.discovery_id, &self.node_buckets).into_iter();
let nodes = RefCell::new(&mut self.discovery_nodes);
let nearest = nearest.filter(|x| nodes.borrow().contains(&x)).take(ALPHA);
for r in nearest {
//let mut p = FindNodePacket::new(&r.endpoint, &self.discovery_id);
//p.sign(&self.secret);
//p.send(&mut self.udp_socket);
let mut borrowed = nodes.borrow_mut();
borrowed.deref_mut().insert(r.clone());
tried_count += 1;
}
}
if tried_count == 0
{
debug!("Restarting discovery");
self.start_node_discovery(event_loop);
return;
}
self.discovery_round += 1;
//event_loop.timeout_ms(Token(NODETABLE_DISCOVERY), 1200).unwrap();
}
fn distance(a: &NodeId, b: &NodeId) -> u32 {
let d = a.sha3() ^ b.sha3();
let mut ret:u32 = 0;
for i in 0..32 {
let mut v: u8 = d[i];
while v != 0 {
v >>= 1;
ret += 1;
}
}
ret
}
fn nearest_node_entries<'b>(source: &NodeId, target: &NodeId, buckets: &'b Vec<NodeBucket>) -> Vec<&'b NodeId>
{
// send ALPHA FindNode packets to nodes we know, closest to target
const LAST_BIN: u32 = NODE_BINS - 1;
let mut head = Discovery::distance(source, target);
let mut tail = if head == 0 { LAST_BIN } else { (head - 1) % NODE_BINS };
let mut found: BTreeMap<u32, Vec<&'b NodeId>> = BTreeMap::new();
let mut count = 0;
// if d is 0, then we roll look forward, if last, we reverse, else, spread from d
if head > 1 && tail != LAST_BIN {
while head != tail && head < NODE_BINS && count < BUCKET_SIZE
{
for n in buckets[head as usize].nodes.iter()
{
if count < BUCKET_SIZE {
count += 1;
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
}
else {
break;
}
}
if count < BUCKET_SIZE && tail != 0 {
for n in buckets[tail as usize].nodes.iter() {
if count < BUCKET_SIZE {
count += 1;
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
}
else {
break;
}
}
}
head += 1;
if tail > 0 {
tail -= 1;
}
}
}
else if head < 2 {
while head < NODE_BINS && count < BUCKET_SIZE {
for n in buckets[head as usize].nodes.iter() {
if count < BUCKET_SIZE {
count += 1;
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
}
else {
break;
}
}
head += 1;
}
}
else {
while tail > 0 && count < BUCKET_SIZE {
for n in buckets[tail as usize].nodes.iter() {
if count < BUCKET_SIZE {
count += 1;
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
}
else {
break;
}
}
tail -= 1;
}
}
let mut ret:Vec<&NodeId> = Vec::new();
for (_, nodes) in found {
for n in nodes {
if ret.len() < BUCKET_SIZE as usize /* && n->endpoint && n->endpoint.isAllowed() */ {
ret.push(n);
}
}
}
ret
}
}

41
src/network/error.rs Normal file
View File

@ -0,0 +1,41 @@
use io::IoError;
use rlp::*;
#[derive(Debug, Copy, Clone)]
pub enum DisconnectReason
{
DisconnectRequested,
//TCPError,
//BadProtocol,
UselessPeer,
//TooManyPeers,
//DuplicatePeer,
//IncompatibleProtocol,
//NullIdentity,
//ClientQuit,
//UnexpectedIdentity,
//LocalIdentity,
//PingTimeout,
}
#[derive(Debug)]
pub enum NetworkError {
Auth,
BadProtocol,
PeerNotFound,
Disconnect(DisconnectReason),
Io(IoError),
}
impl From<DecoderError> for NetworkError {
fn from(_err: DecoderError) -> NetworkError {
NetworkError::Auth
}
}
impl From<IoError> for NetworkError {
fn from(err: IoError) -> NetworkError {
NetworkError::Io(err)
}
}

217
src/network/handshake.rs Normal file
View File

@ -0,0 +1,217 @@
use mio::*;
use mio::tcp::*;
use hash::*;
use sha3::Hashable;
use bytes::Bytes;
use crypto::*;
use crypto;
use network::connection::{Connection};
use network::host::{HostInfo};
use network::node::NodeId;
use error::*;
use network::error::NetworkError;
#[derive(PartialEq, Eq, Debug)]
enum HandshakeState {
/// Just created
New,
/// Waiting for auth packet
ReadingAuth,
/// Waiting for ack packet
ReadingAck,
/// Ready to start a session
StartSession,
}
/// RLPx protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake
pub struct Handshake {
/// Remote node public key
pub id: NodeId,
/// Underlying connection
pub connection: Connection,
/// Handshake state
state: HandshakeState,
/// Outgoing or incoming connection
pub originated: bool,
/// Disconnect timeout
idle_timeout: Option<Timeout>,
/// ECDH ephemeral
pub ecdhe: KeyPair,
/// Connection nonce
pub nonce: H256,
/// Handshake public key
pub remote_public: Public,
/// Remote connection nonce.
pub remote_nonce: H256,
/// A copy of received encryped auth packet
pub auth_cipher: Bytes,
/// A copy of received encryped ack packet
pub ack_cipher: Bytes
}
const AUTH_PACKET_SIZE: usize = 307;
const ACK_PACKET_SIZE: usize = 210;
impl Handshake {
/// Create a new handshake object
pub fn new(token: Token, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result<Handshake, UtilError> {
Ok(Handshake {
id: id.clone(),
connection: Connection::new(token, socket),
originated: false,
state: HandshakeState::New,
idle_timeout: None,
ecdhe: try!(KeyPair::create()),
nonce: nonce.clone(),
remote_public: Public::new(),
remote_nonce: H256::new(),
auth_cipher: Bytes::new(),
ack_cipher: Bytes::new(),
})
}
/// Start a handhsake
pub fn start(&mut self, host: &HostInfo, originated: bool) -> Result<(), UtilError> {
self.originated = originated;
if originated {
try!(self.write_auth(host));
}
else {
self.state = HandshakeState::ReadingAuth;
self.connection.expect(AUTH_PACKET_SIZE);
};
Ok(())
}
/// Check if handshake is complete
pub fn done(&self) -> bool {
self.state == HandshakeState::StartSession
}
/// Readable IO handler. Drives the state change.
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<(), UtilError> {
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
match self.state {
HandshakeState::ReadingAuth => {
match try!(self.connection.readable()) {
Some(data) => {
try!(self.read_auth(host, &data));
try!(self.write_ack());
},
None => {}
};
},
HandshakeState::ReadingAck => {
match try!(self.connection.readable()) {
Some(data) => {
try!(self.read_ack(host, &data));
self.state = HandshakeState::StartSession;
},
None => {}
};
},
_ => { panic!("Unexpected state"); }
}
if self.state != HandshakeState::StartSession {
try!(self.connection.reregister(event_loop));
}
Ok(())
}
/// Writabe IO handler.
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, _host: &HostInfo) -> Result<(), UtilError> {
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
try!(self.connection.writable());
if self.state != HandshakeState::StartSession {
try!(self.connection.reregister(event_loop));
}
Ok(())
}
/// Register the IO handler with the event loop
pub fn register<Host:Handler<Timeout=Token>>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok();
try!(self.connection.register(event_loop));
Ok(())
}
/// Parse, validate and confirm auth message
fn read_auth(&mut self, host: &HostInfo, data: &[u8]) -> Result<(), UtilError> {
trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr());
assert!(data.len() == AUTH_PACKET_SIZE);
self.auth_cipher = data.to_vec();
let auth = try!(ecies::decrypt(host.secret(), data));
let (sig, rest) = auth.split_at(65);
let (hepubk, rest) = rest.split_at(32);
let (pubk, rest) = rest.split_at(64);
let (nonce, _) = rest.split_at(32);
self.remote_public.clone_from_slice(pubk);
self.remote_nonce.clone_from_slice(nonce);
let shared = try!(ecdh::agree(host.secret(), &self.remote_public));
let signature = Signature::from_slice(sig);
let spub = try!(ec::recover(&signature, &(&shared ^ &self.remote_nonce)));
if &spub.sha3()[..] != hepubk {
trace!(target:"net", "Handshake hash mismath with {:?}", self.connection.socket.peer_addr());
return Err(From::from(NetworkError::Auth));
};
self.write_ack()
}
/// Parse and validate ack message
fn read_ack(&mut self, host: &HostInfo, data: &[u8]) -> Result<(), UtilError> {
trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr());
assert!(data.len() == ACK_PACKET_SIZE);
self.ack_cipher = data.to_vec();
let ack = try!(ecies::decrypt(host.secret(), data));
self.remote_public.clone_from_slice(&ack[0..64]);
self.remote_nonce.clone_from_slice(&ack[64..(64+32)]);
Ok(())
}
/// Sends auth message
fn write_auth(&mut self, host: &HostInfo) -> Result<(), UtilError> {
trace!(target:"net", "Sending handshake auth to {:?}", self.connection.socket.peer_addr());
let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants
let len = data.len();
{
data[len - 1] = 0x0;
let (sig, rest) = data.split_at_mut(65);
let (hepubk, rest) = rest.split_at_mut(32);
let (pubk, rest) = rest.split_at_mut(64);
let (nonce, _) = rest.split_at_mut(32);
// E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0)
let shared = try!(crypto::ecdh::agree(host.secret(), &self.id));
try!(crypto::ec::sign(self.ecdhe.secret(), &(&shared ^ &self.nonce))).copy_to(sig);
self.ecdhe.public().sha3_into(hepubk);
host.id().copy_to(pubk);
self.nonce.copy_to(nonce);
}
let message = try!(crypto::ecies::encrypt(&self.id, &data));
self.auth_cipher = message.clone();
self.connection.send(message);
self.connection.expect(ACK_PACKET_SIZE);
self.state = HandshakeState::ReadingAck;
Ok(())
}
/// Sends ack message
fn write_ack(&mut self) -> Result<(), UtilError> {
trace!(target:"net", "Sending handshake ack to {:?}", self.connection.socket.peer_addr());
let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants
let len = data.len();
{
data[len - 1] = 0x0;
let (epubk, rest) = data.split_at_mut(64);
let (nonce, _) = rest.split_at_mut(32);
self.ecdhe.public().copy_to(epubk);
self.nonce.copy_to(nonce);
}
let message = try!(crypto::ecies::encrypt(&self.id, &data));
self.ack_cipher = message.clone();
self.connection.send(message);
self.state = HandshakeState::StartSession;
Ok(())
}
}

651
src/network/host.rs Normal file
View File

@ -0,0 +1,651 @@
use std::mem;
use std::net::{SocketAddr};
use std::collections::{HashMap};
use std::hash::{Hasher};
use std::str::{FromStr};
use mio::*;
use mio::tcp::*;
use mio::udp::*;
use hash::*;
use crypto::*;
use sha3::Hashable;
use rlp::*;
use network::handshake::Handshake;
use network::session::{Session, SessionData};
use error::*;
use io::*;
use network::NetworkProtocolHandler;
use network::node::*;
type Slab<T> = ::slab::Slab<T, usize>;
const _DEFAULT_PORT: u16 = 30304;
const MAX_CONNECTIONS: usize = 1024;
const IDEAL_PEERS: u32 = 10;
const MAINTENANCE_TIMEOUT: u64 = 1000;
#[derive(Debug)]
struct NetworkConfiguration {
listen_address: SocketAddr,
public_address: SocketAddr,
nat_enabled: bool,
discovery_enabled: bool,
pin: bool,
}
impl NetworkConfiguration {
fn new() -> NetworkConfiguration {
NetworkConfiguration {
listen_address: SocketAddr::from_str("0.0.0.0:30304").unwrap(),
public_address: SocketAddr::from_str("0.0.0.0:30304").unwrap(),
nat_enabled: true,
discovery_enabled: true,
pin: false,
}
}
}
// Tokens
//const TOKEN_BEGIN: usize = USER_TOKEN_START; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12)
const TOKEN_BEGIN: usize = 32;
const TCP_ACCEPT: usize = TOKEN_BEGIN + 1;
const IDLE: usize = TOKEN_BEGIN + 2;
const NODETABLE_RECEIVE: usize = TOKEN_BEGIN + 3;
const NODETABLE_MAINTAIN: usize = TOKEN_BEGIN + 4;
const NODETABLE_DISCOVERY: usize = TOKEN_BEGIN + 5;
const FIRST_CONNECTION: usize = TOKEN_BEGIN + 16;
const LAST_CONNECTION: usize = FIRST_CONNECTION + MAX_CONNECTIONS - 1;
/// Protocol handler level packet id
pub type PacketId = u8;
/// Protocol / handler id
pub type ProtocolId = &'static str;
/// Messages used to communitate with the event loop from other threads.
pub enum NetworkIoMessage<Message> where Message: Send {
/// Register a new protocol handler.
AddHandler {
handler: Option<Box<NetworkProtocolHandler<Message>+Send>>,
protocol: ProtocolId,
versions: Vec<u8>,
},
/// Send data over the network.
Send {
peer: PeerId,
packet_id: PacketId,
protocol: ProtocolId,
data: Vec<u8>,
},
/// User message
User(Message),
}
/// Local (temporary) peer session ID.
pub type PeerId = usize;
#[derive(Debug, PartialEq, Eq)]
/// Protocol info
pub struct CapabilityInfo {
pub protocol: ProtocolId,
pub version: u8,
/// Total number of packet IDs this protocol support.
pub packet_count: u8,
}
impl Encodable for CapabilityInfo {
fn encode<E>(&self, encoder: &mut E) -> () where E: Encoder {
encoder.emit_list(|e| {
self.protocol.encode(e);
(self.version as u32).encode(e);
});
}
}
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
pub struct NetworkContext<'s, 'io, Message> where Message: Send + 'static, 'io: 's {
io: &'s mut IoContext<'io, NetworkIoMessage<Message>>,
protocol: ProtocolId,
connections: &'s mut Slab<ConnectionEntry>,
timers: &'s mut HashMap<TimerToken, ProtocolId>,
session: Option<StreamToken>,
}
impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 'static, {
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
fn new(io: &'s mut IoContext<'io, NetworkIoMessage<Message>>,
protocol: ProtocolId,
session: Option<StreamToken>, connections: &'s mut Slab<ConnectionEntry>,
timers: &'s mut HashMap<TimerToken, ProtocolId>) -> NetworkContext<'s, 'io, Message> {
NetworkContext {
io: io,
protocol: protocol,
session: session,
connections: connections,
timers: timers,
}
}
/// Send a packet over the network to another peer.
pub fn send(&mut self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
match self.connections.get_mut(peer) {
Some(&mut ConnectionEntry::Session(ref mut s)) => {
s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| {
warn!(target: "net", "Send error: {:?}", e);
}); //TODO: don't copy vector data
},
_ => {
warn!(target: "net", "Send: Peer does not exist");
}
}
Ok(())
}
/// Respond to a current network message. Panics if no there is no packet in the context.
pub fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
match self.session {
Some(session) => self.send(session, packet_id, data),
None => {
panic!("Respond: Session does not exist")
}
}
}
/// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected.
pub fn disable_peer(&mut self, _peer: PeerId) {
//TODO: remove capability, disconnect if no capabilities left
}
/// Register a new IO timer. Returns a new timer token. 'NetworkProtocolHandler::timeout' will be called with the token.
pub fn register_timer(&mut self, ms: u64) -> Result<TimerToken, UtilError>{
match self.io.register_timer(ms) {
Ok(token) => {
self.timers.insert(token, self.protocol);
Ok(token)
},
e => e,
}
}
/// Returns peer identification string
pub fn peer_info(&self, peer: PeerId) -> String {
match self.connections.get(peer) {
Some(&ConnectionEntry::Session(ref s)) => {
s.info.client_version.clone()
},
_ => {
"unknown".to_string()
}
}
}
}
/// Shared host information
pub struct HostInfo {
/// Our private and public keys.
keys: KeyPair,
/// Current network configuration
config: NetworkConfiguration,
/// Connection nonce.
nonce: H256,
/// RLPx protocol version
pub protocol_version: u32,
/// Client identifier
pub client_version: String,
/// TCP connection port.
pub listen_port: u16,
/// Registered capabilities (handlers)
pub capabilities: Vec<CapabilityInfo>
}
impl HostInfo {
/// Returns public key
pub fn id(&self) -> &NodeId {
self.keys.public()
}
/// Returns secret key
pub fn secret(&self) -> &Secret {
self.keys.secret()
}
/// Increments and returns connection nonce.
pub fn next_nonce(&mut self) -> H256 {
self.nonce = self.nonce.sha3();
return self.nonce.clone();
}
}
enum ConnectionEntry {
Handshake(Handshake),
Session(Session)
}
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
pub struct Host<Message> where Message: Send {
pub info: HostInfo,
udp_socket: UdpSocket,
listener: TcpListener,
connections: Slab<ConnectionEntry>,
timers: HashMap<TimerToken, ProtocolId>,
nodes: HashMap<NodeId, Node>,
handlers: HashMap<ProtocolId, Box<NetworkProtocolHandler<Message>>>,
}
impl<Message> Host<Message> where Message: Send {
pub fn new() -> Host<Message> {
let config = NetworkConfiguration::new();
let addr = config.listen_address;
// Setup the server socket
let listener = TcpListener::bind(&addr).unwrap();
let udp_socket = UdpSocket::bound(&addr).unwrap();
Host::<Message> {
info: HostInfo {
keys: KeyPair::create().unwrap(),
config: config,
nonce: H256::random(),
protocol_version: 4,
client_version: "parity".to_string(),
listen_port: 0,
capabilities: Vec::new(),
},
udp_socket: udp_socket,
listener: listener,
connections: Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS),
timers: HashMap::new(),
nodes: HashMap::new(),
handlers: HashMap::new(),
}
}
fn add_node(&mut self, id: &str) {
match Node::from_str(id) {
Err(e) => { warn!("Could not add node: {:?}", e); },
Ok(n) => {
self.nodes.insert(n.id.clone(), n);
}
}
}
fn maintain_network(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
self.connect_peers(io);
io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap();
}
fn have_session(&self, id: &NodeId) -> bool {
self.connections.iter().any(|e| match e { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false })
}
fn connecting_to(&self, id: &NodeId) -> bool {
self.connections.iter().any(|e| match e { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false })
}
fn connect_peers(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
struct NodeInfo {
id: NodeId,
peer_type: PeerType
}
let mut to_connect: Vec<NodeInfo> = Vec::new();
let mut req_conn = 0;
//TODO: use nodes from discovery here
//for n in self.node_buckets.iter().flat_map(|n| &n.nodes).map(|id| NodeInfo { id: id.clone(), peer_type: self.nodes.get(id).unwrap().peer_type}) {
for n in self.nodes.values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) {
let connected = self.have_session(&n.id) || self.connecting_to(&n.id);
let required = n.peer_type == PeerType::Required;
if connected && required {
req_conn += 1;
}
else if !connected && (!self.info.config.pin || required) {
to_connect.push(n);
}
}
for n in to_connect.iter() {
if n.peer_type == PeerType::Required {
if req_conn < IDEAL_PEERS {
self.connect_peer(&n.id, io);
}
req_conn += 1;
}
}
if !self.info.config.pin
{
let pending_count = 0; //TODO:
let peer_count = 0;
let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn;
if open_slots > 0 {
for n in to_connect.iter() {
if n.peer_type == PeerType::Optional && open_slots > 0 {
open_slots -= 1;
self.connect_peer(&n.id, io);
}
}
}
}
}
fn connect_peer(&mut self, id: &NodeId, io: &mut IoContext<NetworkIoMessage<Message>>) {
if self.have_session(id)
{
warn!("Aborted connect. Node already connected.");
return;
}
if self.connecting_to(id)
{
warn!("Aborted connect. Node already connecting.");
return;
}
let socket = {
let node = self.nodes.get_mut(id).unwrap();
node.last_attempted = Some(::time::now());
match TcpStream::connect(&node.endpoint.address) {
Ok(socket) => socket,
Err(_) => {
warn!("Cannot connect to node");
return;
}
}
};
let nonce = self.info.next_nonce();
match self.connections.insert_with(|token| ConnectionEntry::Handshake(Handshake::new(Token(token), id, socket, &nonce).expect("Can't create handshake"))) {
Some(token) => {
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
h.start(&self.info, true)
.and_then(|_| h.register(io.event_loop))
.unwrap_or_else (|e| {
debug!(target: "net", "Handshake create error: {:?}", e);
});
},
_ => {}
}
},
None => { warn!("Max connections reached") }
}
}
fn accept(&mut self, _io: &mut IoContext<NetworkIoMessage<Message>>) {
trace!(target: "net", "accept");
}
fn connection_writable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
let mut kill = false;
let mut create_session = false;
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
h.writable(io.event_loop, &self.info).unwrap_or_else(|e| {
debug!(target: "net", "Handshake write error: {:?}", e);
kill = true;
});
create_session = h.done();
},
Some(&mut ConnectionEntry::Session(ref mut s)) => {
s.writable(io.event_loop, &self.info).unwrap_or_else(|e| {
debug!(target: "net", "Session write error: {:?}", e);
kill = true;
});
}
_ => {
warn!(target: "net", "Received event for unknown connection");
}
}
if kill {
self.kill_connection(token, io);
return;
} else if create_session {
self.start_session(token, io);
}
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Session(ref mut s)) => {
s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
},
_ => (),
}
}
fn connection_closed<'s>(&'s mut self, token: TimerToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
self.kill_connection(token, io);
}
fn connection_readable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
let mut kill = false;
let mut create_session = false;
let mut ready_data: Vec<ProtocolId> = Vec::new();
let mut packet_data: Option<(ProtocolId, PacketId, Vec<u8>)> = None;
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
h.readable(io.event_loop, &self.info).unwrap_or_else(|e| {
debug!(target: "net", "Handshake read error: {:?}", e);
kill = true;
});
create_session = h.done();
},
Some(&mut ConnectionEntry::Session(ref mut s)) => {
let sd = { s.readable(io.event_loop, &self.info).unwrap_or_else(|e| {
debug!(target: "net", "Session read error: {:?}", e);
kill = true;
SessionData::None
}) };
match sd {
SessionData::Ready => {
for (p, _) in self.handlers.iter_mut() {
if s.have_capability(p) {
ready_data.push(p);
}
}
},
SessionData::Packet {
data,
protocol,
packet_id,
} => {
match self.handlers.get_mut(protocol) {
None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) },
Some(_) => packet_data = Some((protocol, packet_id, data)),
}
},
SessionData::None => {},
}
}
_ => {
warn!(target: "net", "Received event for unknown connection");
}
}
if kill {
self.kill_connection(token, io);
return;
}
if create_session {
self.start_session(token, io);
}
for p in ready_data {
let mut h = self.handlers.get_mut(p).unwrap();
h.connected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token);
}
if let Some((p, packet_id, data)) = packet_data {
let mut h = self.handlers.get_mut(p).unwrap();
h.read(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token, packet_id, &data[1..]);
}
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Session(ref mut s)) => {
s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
},
_ => (),
}
}
fn start_session(&mut self, token: StreamToken, io: &mut IoContext<NetworkIoMessage<Message>>) {
let info = &self.info;
// TODO: use slab::replace_with (currently broken)
/*
match self.connections.remove(token) {
Some(ConnectionEntry::Handshake(h)) => {
match Session::new(h, io.event_loop, info) {
Ok(session) => {
assert!(token == self.connections.insert(ConnectionEntry::Session(session)).ok().unwrap());
},
Err(e) => {
debug!(target: "net", "Session construction error: {:?}", e);
}
}
},
_ => panic!("Error updating slab with session")
}*/
self.connections.replace_with(token, |c| {
match c {
ConnectionEntry::Handshake(h) => Session::new(h, io.event_loop, info)
.map(|s| Some(ConnectionEntry::Session(s)))
.unwrap_or_else(|e| {
debug!(target: "net", "Session construction error: {:?}", e);
None
}),
_ => { panic!("No handshake to create a session from"); }
}
}).expect("Error updating slab with session");
}
fn connection_timeout<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
self.kill_connection(token, io)
}
fn kill_connection<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
let mut remove = true;
match self.connections.get_mut(token) {
Some(&mut ConnectionEntry::Handshake(_)) => (), // just abandon handshake
Some(&mut ConnectionEntry::Session(ref mut s)) if s.is_ready() => {
for (p, _) in self.handlers.iter_mut() {
if s.have_capability(p) {
to_disconnect.push(p);
}
}
},
_ => {
remove = false;
},
}
for p in to_disconnect {
let mut h = self.handlers.get_mut(p).unwrap();
h.disconnected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token);
}
if remove {
self.connections.remove(token);
}
}
}
impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Message: Send + 'static {
/// Initialize networking
fn initialize(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
/*
match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() {
Some(iface) => config.public_address = iface.addr.unwrap(),
None => warn!("No public network interface"),
*/
// Start listening for incoming connections
io.event_loop.register(&self.listener, Token(TCP_ACCEPT), EventSet::readable(), PollOpt::edge()).unwrap();
io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap();
// open the udp socket
io.event_loop.register(&self.udp_socket, Token(NODETABLE_RECEIVE), EventSet::readable(), PollOpt::edge()).unwrap();
io.event_loop.timeout_ms(Token(NODETABLE_MAINTAIN), 7200).unwrap();
let port = self.info.config.listen_address.port();
self.info.listen_port = port;
// self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303");
// GO bootnodes
self.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE
self.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR
self.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG
// ETH/DEV cpp-ethereum (poc-9.ethdev.com)
self.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");
}
fn stream_hup<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
trace!(target: "net", "Hup: {}", stream);
match stream {
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_closed(stream, io),
_ => warn!(target: "net", "Unexpected hup"),
};
}
fn stream_readable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
match stream {
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_readable(stream, io),
NODETABLE_RECEIVE => {},
TCP_ACCEPT => self.accept(io),
_ => panic!("Received unknown readable token"),
}
}
fn stream_writable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
match stream {
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_writable(stream, io),
_ => panic!("Received unknown writable token"),
}
}
fn timeout<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, token: TimerToken) {
match token {
IDLE => self.maintain_network(io),
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io),
NODETABLE_DISCOVERY => {},
NODETABLE_MAINTAIN => {},
_ => match self.timers.get_mut(&token).map(|p| *p) {
Some(protocol) => match self.handlers.get_mut(protocol) {
None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) },
Some(h) => { h.timeout(&mut NetworkContext::new(io, protocol, Some(token), &mut self.connections, &mut self.timers), token); }
},
None => {} // time not registerd through us
}
}
}
fn message<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, message: &'s mut NetworkIoMessage<Message>) {
match message {
&mut NetworkIoMessage::AddHandler {
ref mut handler,
ref protocol,
ref versions
} => {
let mut h = mem::replace(handler, None).unwrap();
h.initialize(&mut NetworkContext::new(io, protocol, None, &mut self.connections, &mut self.timers));
self.handlers.insert(protocol, h);
for v in versions {
self.info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 });
}
},
&mut NetworkIoMessage::Send {
ref peer,
ref packet_id,
ref protocol,
ref data,
} => {
match self.connections.get_mut(*peer as usize) {
Some(&mut ConnectionEntry::Session(ref mut s)) => {
s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| {
warn!(target: "net", "Send error: {:?}", e);
}); //TODO: don't copy vector data
},
_ => {
warn!(target: "net", "Send: Peer does not exist");
}
}
},
&mut NetworkIoMessage::User(ref message) => {
for (p, h) in self.handlers.iter_mut() {
h.message(&mut NetworkContext::new(io, p, None, &mut self.connections, &mut self.timers), &message);
}
}
}
}
}

86
src/network/mod.rs Normal file
View File

@ -0,0 +1,86 @@
/// Network and general IO module.
///
/// Example usage for craeting a network service and adding an IO handler:
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::*;
///
/// struct MyHandler;
///
/// struct MyMessage {
/// data: u32
/// }
///
/// impl NetworkProtocolHandler<MyMessage> for MyHandler {
/// fn initialize(&mut self, io: &mut NetworkContext<MyMessage>) {
/// io.register_timer(1000);
/// }
///
/// fn read(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
/// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
/// }
///
/// fn connected(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId) {
/// println!("Connected {}", peer);
/// }
///
/// fn disconnected(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId) {
/// println!("Disconnected {}", peer);
/// }
///
/// fn timeout(&mut self, io: &mut NetworkContext<MyMessage>, timer: TimerToken) {
/// println!("Timeout {}", timer);
/// }
///
/// fn message(&mut self, io: &mut NetworkContext<MyMessage>, message: &MyMessage) {
/// println!("Message {}", message.data);
/// }
/// }
///
/// fn main () {
/// let mut service = NetworkService::<MyMessage>::start().expect("Error creating network service");
/// service.register_protocol(Box::new(MyHandler), "myproto", &[1u8]);
///
/// // Wait for quit condition
/// // ...
/// // Drop the service
/// }
/// ```
mod host;
mod connection;
mod handshake;
mod session;
mod discovery;
mod service;
mod error;
mod node;
pub type PeerId = host::PeerId;
pub type PacketId = host::PacketId;
pub type NetworkContext<'s,'io, Message> = host::NetworkContext<'s, 'io, Message>;
pub type NetworkService<Message> = service::NetworkService<Message>;
pub type NetworkIoMessage<Message> = host::NetworkIoMessage<Message>;
pub use network::host::NetworkIoMessage::User as UserMessage;
pub type NetworkError = error::NetworkError;
use io::*;
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
/// All the handler function are called from within IO event loop.
/// `Message` is the type for message data.
pub trait NetworkProtocolHandler<Message>: Send where Message: Send {
/// Initialize the handler
fn initialize(&mut self, _io: &mut NetworkContext<Message>) {}
/// Called when new network packet received.
fn read(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId, packet_id: u8, data: &[u8]);
/// Called when new peer is connected. Only called when peer supports the same protocol.
fn connected(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId);
/// Called when a previously connected peer disconnects.
fn disconnected(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId);
/// Timer function called after a timeout created with `NetworkContext::timeout`.
fn timeout(&mut self, _io: &mut NetworkContext<Message>, _timer: TimerToken) {}
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
fn message(&mut self, _io: &mut NetworkContext<Message>, _message: &Message) {}
}

83
src/network/node.rs Normal file
View File

@ -0,0 +1,83 @@
use std::net::{SocketAddr, ToSocketAddrs};
use std::hash::{Hash, Hasher};
use std::str::{FromStr};
use hash::*;
use rlp::*;
use time::Tm;
use error::*;
/// Node public key
pub type NodeId = H512;
#[derive(Debug)]
/// Noe address info
pub struct NodeEndpoint {
/// IP(V4 or V6) address
pub address: SocketAddr,
/// Address as string (can be host name).
pub address_str: String,
/// Conneciton port.
pub udp_port: u16
}
impl NodeEndpoint {
/// Create endpoint from string. Performs name resolution if given a host name.
fn from_str(s: &str) -> Result<NodeEndpoint, UtilError> {
let address = s.to_socket_addrs().map(|mut i| i.next());
match address {
Ok(Some(a)) => Ok(NodeEndpoint {
address: a,
address_str: s.to_string(),
udp_port: a.port()
}),
Ok(_) => Err(UtilError::AddressResolve(None)),
Err(e) => Err(UtilError::AddressResolve(Some(e)))
}
}
}
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum PeerType {
Required,
Optional
}
pub struct Node {
pub id: NodeId,
pub endpoint: NodeEndpoint,
pub peer_type: PeerType,
pub last_attempted: Option<Tm>,
}
impl FromStr for Node {
type Err = UtilError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (id, endpoint) = if &s[0..8] == "enode://" && s.len() > 136 && &s[136..137] == "@" {
(try!(NodeId::from_str(&s[8..136])), try!(NodeEndpoint::from_str(&s[137..])))
}
else {
(NodeId::new(), try!(NodeEndpoint::from_str(s)))
};
Ok(Node {
id: id,
endpoint: endpoint,
peer_type: PeerType::Optional,
last_attempted: None,
})
}
}
impl PartialEq for Node {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for Node { }
impl Hash for Node {
fn hash<H>(&self, state: &mut H) where H: Hasher {
self.id.hash(state)
}
}

61
src/network/service.rs Normal file
View File

@ -0,0 +1,61 @@
use error::*;
use network::{NetworkProtocolHandler};
use network::error::{NetworkError};
use network::host::{Host, NetworkIoMessage, PeerId, PacketId, ProtocolId};
use io::*;
/// IO Service with networking
/// `Message` defines a notification data type.
pub struct NetworkService<Message> where Message: Send + 'static {
io_service: IoService<NetworkIoMessage<Message>>,
host_info: String,
}
impl<Message> NetworkService<Message> where Message: Send + 'static {
/// Starts IO event loop
pub fn start() -> Result<NetworkService<Message>, UtilError> {
let mut io_service = try!(IoService::<NetworkIoMessage<Message>>::start());
let host = Box::new(Host::new());
let host_info = host.info.client_version.clone();
info!("NetworkService::start(): id={:?}", host.info.id());
try!(io_service.register_handler(host));
Ok(NetworkService {
io_service: io_service,
host_info: host_info,
})
}
/// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads.
pub fn send(&mut self, peer: &PeerId, packet_id: PacketId, protocol: ProtocolId, data: &[u8]) -> Result<(), NetworkError> {
try!(self.io_service.send_message(NetworkIoMessage::Send {
peer: *peer,
packet_id: packet_id,
protocol: protocol,
data: data.to_vec()
}));
Ok(())
}
/// Regiter a new protocol handler with the event loop.
pub fn register_protocol(&mut self, handler: Box<NetworkProtocolHandler<Message>+Send>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> {
try!(self.io_service.send_message(NetworkIoMessage::AddHandler {
handler: Some(handler),
protocol: protocol,
versions: versions.to_vec(),
}));
Ok(())
}
/// Returns host identifier string as advertised to other peers
pub fn host_info(&self) -> String {
self.host_info.clone()
}
/// Returns underlying io service.
pub fn io(&mut self) -> &mut IoService<NetworkIoMessage<Message>> {
&mut self.io_service
}
}

282
src/network/session.rs Normal file
View File

@ -0,0 +1,282 @@
use mio::*;
use hash::*;
use rlp::*;
use network::connection::{EncryptedConnection, Packet};
use network::handshake::Handshake;
use error::*;
use network::error::{NetworkError, DisconnectReason};
use network::host::*;
use network::node::NodeId;
/// Peer session over encrypted connection.
/// When created waits for Hello packet exchange and signals ready state.
/// Sends and receives protocol packets and handles basic packes such as ping/pong and disconnect.
pub struct Session {
/// Shared session information
pub info: SessionInfo,
/// Underlying connection
connection: EncryptedConnection,
/// Session ready flag. Set after successfull Hello packet exchange
had_hello: bool,
}
/// Structure used to report various session events.
pub enum SessionData {
None,
/// Session is ready to send/receive packets.
Ready,
/// A packet has been received
Packet {
/// Packet data
data: Vec<u8>,
/// Packet protocol ID
protocol: &'static str,
/// Zero based packet ID
packet_id: u8,
},
}
/// Shared session information
pub struct SessionInfo {
/// Peer public key
pub id: NodeId,
/// Peer client ID
pub client_version: String,
/// Peer RLPx protocol version
pub protocol_version: u32,
/// Peer protocol capabilities
capabilities: Vec<SessionCapabilityInfo>,
}
#[derive(Debug, PartialEq, Eq)]
pub struct PeerCapabilityInfo {
pub protocol: String,
pub version: u8,
}
impl Decodable for PeerCapabilityInfo {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let c = try!(decoder.as_list());
let v: u32 = try!(Decodable::decode(&c[1]));
Ok(PeerCapabilityInfo {
protocol: try!(Decodable::decode(&c[0])),
version: v as u8,
})
}
}
#[derive(Debug, PartialEq, Eq)]
struct SessionCapabilityInfo {
pub protocol: &'static str,
pub version: u8,
pub packet_count: u8,
pub id_offset: u8,
}
const PACKET_HELLO: u8 = 0x80;
const PACKET_DISCONNECT: u8 = 0x01;
const PACKET_PING: u8 = 0x02;
const PACKET_PONG: u8 = 0x03;
const PACKET_GET_PEERS: u8 = 0x04;
const PACKET_PEERS: u8 = 0x05;
const PACKET_USER: u8 = 0x10;
const PACKET_LAST: u8 = 0x7f;
impl Session {
/// Create a new session out of comepleted handshake. Consumes handshake object.
pub fn new<Host:Handler<Timeout=Token>>(h: Handshake, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<Session, UtilError> {
let id = h.id.clone();
let connection = try!(EncryptedConnection::new(h));
let mut session = Session {
connection: connection,
had_hello: false,
info: SessionInfo {
id: id,
client_version: String::new(),
protocol_version: 0,
capabilities: Vec::new(),
},
};
try!(session.write_hello(host));
try!(session.write_ping());
try!(session.connection.register(event_loop));
Ok(session)
}
/// Check if session is ready to send/receive data
pub fn is_ready(&self) -> bool {
self.had_hello
}
/// Readable IO handler. Returns packet data if available.
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<SessionData, UtilError> {
match try!(self.connection.readable(event_loop)) {
Some(data) => Ok(try!(self.read_packet(data, host))),
None => Ok(SessionData::None)
}
}
/// Writable IO handler. Sends pending packets.
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, _host: &HostInfo) -> Result<(), UtilError> {
self.connection.writable(event_loop)
}
/// Checks if peer supports given capability
pub fn have_capability(&self, protocol: &str) -> bool {
self.info.capabilities.iter().any(|c| c.protocol == protocol)
}
/// Update registration with the event loop. Should be called at the end of the IO handler.
pub fn reregister<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
self.connection.reregister(event_loop)
}
/// Send a protocol packet to peer.
pub fn send_packet(&mut self, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), UtilError> {
let mut i = 0usize;
while protocol != self.info.capabilities[i].protocol {
i += 1;
if i == self.info.capabilities.len() {
debug!(target: "net", "Unkown protocol: {:?}", protocol);
return Ok(())
}
}
let pid = self.info.capabilities[i].id_offset + packet_id;
let mut rlp = RlpStream::new();
rlp.append(&(pid as u32));
rlp.append_raw(data, 1);
self.connection.send_packet(&rlp.out())
}
fn read_packet(&mut self, packet: Packet, host: &HostInfo) -> Result<SessionData, UtilError> {
if packet.data.len() < 2 {
return Err(From::from(NetworkError::BadProtocol));
}
let packet_id = packet.data[0];
if packet_id != PACKET_HELLO && packet_id != PACKET_DISCONNECT && !self.had_hello {
return Err(From::from(NetworkError::BadProtocol));
}
match packet_id {
PACKET_HELLO => {
let rlp = UntrustedRlp::new(&packet.data[1..]); //TODO: validate rlp expected size
try!(self.read_hello(&rlp, host));
Ok(SessionData::Ready)
},
PACKET_DISCONNECT => Err(From::from(NetworkError::Disconnect(DisconnectReason::DisconnectRequested))),
PACKET_PING => {
try!(self.write_pong());
Ok(SessionData::None)
},
PACKET_GET_PEERS => Ok(SessionData::None), //TODO;
PACKET_PEERS => Ok(SessionData::None),
PACKET_USER ... PACKET_LAST => {
let mut i = 0usize;
while packet_id < self.info.capabilities[i].id_offset {
i += 1;
if i == self.info.capabilities.len() {
debug!(target: "net", "Unkown packet: {:?}", packet_id);
return Ok(SessionData::None)
}
}
// map to protocol
let protocol = self.info.capabilities[i].protocol;
let pid = packet_id - self.info.capabilities[i].id_offset;
return Ok(SessionData::Packet { data: packet.data, protocol: protocol, packet_id: pid } )
},
_ => {
debug!(target: "net", "Unkown packet: {:?}", packet_id);
Ok(SessionData::None)
}
}
}
fn write_hello(&mut self, host: &HostInfo) -> Result<(), UtilError> {
let mut rlp = RlpStream::new();
rlp.append_raw(&[PACKET_HELLO as u8], 0);
rlp.append_list(5)
.append(&host.protocol_version)
.append(&host.client_version)
.append(&host.capabilities)
.append(&host.listen_port)
.append(host.id());
self.connection.send_packet(&rlp.out())
}
fn read_hello(&mut self, rlp: &UntrustedRlp, host: &HostInfo) -> Result<(), UtilError> {
let protocol = try!(rlp.val_at::<u32>(0));
let client_version = try!(rlp.val_at::<String>(1));
let peer_caps = try!(rlp.val_at::<Vec<PeerCapabilityInfo>>(2));
let id = try!(rlp.val_at::<NodeId>(4));
// Intersect with host capabilities
// Leave only highset mutually supported capability version
let mut caps: Vec<SessionCapabilityInfo> = Vec::new();
for hc in host.capabilities.iter() {
if peer_caps.iter().any(|c| c.protocol == hc.protocol && c.version == hc.version) {
caps.push(SessionCapabilityInfo {
protocol: hc.protocol,
version: hc.version,
id_offset: 0,
packet_count: hc.packet_count,
});
}
}
caps.retain(|c| host.capabilities.iter().any(|hc| hc.protocol == c.protocol && hc.version == c.version));
let mut i = 0;
while i < caps.len() {
if caps.iter().any(|c| c.protocol == caps[i].protocol && c.version > caps[i].version) {
caps.remove(i);
}
else {
i += 1;
}
}
i = 0;
let mut offset: u8 = PACKET_USER;
while i < caps.len() {
caps[i].id_offset = offset;
offset += caps[i].packet_count;
i += 1;
}
trace!(target: "net", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps);
self.info.client_version = client_version;
self.info.capabilities = caps;
if protocol != host.protocol_version {
return Err(From::from(self.disconnect(DisconnectReason::UselessPeer)));
}
self.had_hello = true;
Ok(())
}
fn write_ping(&mut self) -> Result<(), UtilError> {
self.send(try!(Session::prepare(PACKET_PING)))
}
fn write_pong(&mut self) -> Result<(), UtilError> {
self.send(try!(Session::prepare(PACKET_PONG)))
}
fn disconnect(&mut self, reason: DisconnectReason) -> NetworkError {
let mut rlp = RlpStream::new();
rlp.append(&(PACKET_DISCONNECT as u32));
rlp.append_list(1);
rlp.append(&(reason.clone() as u32));
self.connection.send_packet(&rlp.out()).ok();
NetworkError::Disconnect(reason)
}
fn prepare(packet_id: u8) -> Result<RlpStream, UtilError> {
let mut rlp = RlpStream::new();
rlp.append(&(packet_id as u32));
rlp.append_list(0);
Ok(rlp)
}
fn send(&mut self, rlp: RlpStream) -> Result<(), UtilError> {
self.connection.send_packet(&rlp.out())
}
}

253
src/nibbleslice.rs Normal file
View File

@ -0,0 +1,253 @@
//! Nibble-orientated view onto byte-slice, allowing nibble-precision offsets.
use std::cmp::*;
use std::fmt;
use bytes::*;
/// Nibble-orientated view onto byte-slice, allowing nibble-precision offsets.
///
/// This is an immutable struct. No operations actually change it.
///
/// # Example
/// ```rust
/// extern crate ethcore_util;
/// use ethcore_util::nibbleslice::*;
/// fn main() {
/// let d1 = &[0x01u8, 0x23, 0x45];
/// let d2 = &[0x34u8, 0x50, 0x12];
/// let d3 = &[0x00u8, 0x12];
/// let n1 = NibbleSlice::new(d1); // 0,1,2,3,4,5
/// let n2 = NibbleSlice::new(d2); // 3,4,5,0,1,2
/// let n3 = NibbleSlice::new_offset(d3, 1); // 0,1,2
/// assert!(n1 > n3); // 0,1,2,... > 0,1,2
/// assert!(n1 < n2); // 0,... < 3,...
/// assert!(n2.mid(3) == n3); // 0,1,2 == 0,1,2
/// assert!(n1.starts_with(&n3));
/// assert_eq!(n1.common_prefix(&n3), 3);
/// assert_eq!(n2.mid(3).common_prefix(&n1), 3);
/// }
/// ```
#[derive(Copy, Clone, Eq, Ord)]
pub struct NibbleSlice<'a> {
data: &'a [u8],
offset: usize,
data_encode_suffix: &'a [u8],
offset_encode_suffix: usize,
}
impl<'a, 'view> NibbleSlice<'a> where 'a: 'view {
/// Create a new nibble slice with the given byte-slice.
pub fn new(data: &[u8]) -> NibbleSlice { NibbleSlice::new_offset(data, 0) }
/// Create a new nibble slice with the given byte-slice with a nibble offset.
pub fn new_offset(data: &'a [u8], offset: usize) -> NibbleSlice { NibbleSlice{data: data, offset: offset, data_encode_suffix: &b""[..], offset_encode_suffix: 0} }
///
pub fn new_composed(a: &'a NibbleSlice, b: &'a NibbleSlice) -> NibbleSlice<'a> { NibbleSlice{data: a.data, offset: a.offset, data_encode_suffix: b.data, offset_encode_suffix: b.offset} }
/*pub fn new_composed_bytes_offset(a: &NibbleSlice, b: &NibbleSlice) -> (Bytes, usize) {
let r: Vec<u8>::with_capacity((a.len() + b.len() + 1) / 2);
let mut i = (a.len() + b.len()) % 2;
while i < a.len() {
match i % 2 {
0 => ,
1 => ,
}
i += 1;
}
while i < a.len() + b.len() {
i += 1;
}
(r, a.len() + b.len())
}*/
/// Create a new nibble slice from the given HPE encoded data (e.g. output of `encoded()`).
pub fn from_encoded(data: &'a [u8]) -> (NibbleSlice, bool) {
(Self::new_offset(data, if data[0] & 16 == 16 {1} else {2}), data[0] & 32 == 32)
}
/// Is this an empty slice?
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Get the length (in nibbles, naturally) of this slice.
pub fn len(&self) -> usize { (self.data.len() + self.data_encode_suffix.len()) * 2 - self.offset - self.offset_encode_suffix }
/// Get the nibble at position `i`.
pub fn at(&self, i: usize) -> u8 {
let l = self.data.len() * 2 - self.offset;
if i < l {
if (self.offset + i) & 1 == 1 {
self.data[(self.offset + i) / 2] & 15u8
}
else {
self.data[(self.offset + i) / 2] >> 4
}
}
else {
let i = i - l;
if (self.offset_encode_suffix + i) & 1 == 1 {
self.data_encode_suffix[(self.offset_encode_suffix + i) / 2] & 15u8
}
else {
self.data_encode_suffix[(self.offset_encode_suffix + i) / 2] >> 4
}
}
}
/// Return object which represents a view on to this slice (further) offset by `i` nibbles.
pub fn mid(&'view self, i: usize) -> NibbleSlice<'a> { NibbleSlice{ data: self.data, offset: self.offset + i, data_encode_suffix: &b""[..], offset_encode_suffix: 0 } }
/// Do we start with the same nibbles as the whole of `them`?
pub fn starts_with(&self, them: &Self) -> bool { self.common_prefix(them) == them.len() }
/// How many of the same nibbles at the beginning do we match with `them`?
pub fn common_prefix(&self, them: &Self) -> usize {
let s = min(self.len(), them.len());
let mut i = 0usize;
while i < s {
if self.at(i) != them.at(i) { break; }
i += 1;
}
i
}
pub fn encoded(&self, is_leaf: bool) -> Bytes {
let l = self.len();
let mut r = Bytes::with_capacity(l / 2 + 1);
let mut i = l % 2;
r.push(if i == 1 {0x10 + self.at(0)} else {0} + if is_leaf {0x20} else {0});
while i < l {
r.push(self.at(i) * 16 + self.at(i + 1));
i += 2;
}
r
}
pub fn encoded_leftmost(&self, n: usize, is_leaf: bool) -> Bytes {
let l = min(self.len(), n);
let mut r = Bytes::with_capacity(l / 2 + 1);
let mut i = l % 2;
r.push(if i == 1 {0x10 + self.at(0)} else {0} + if is_leaf {0x20} else {0});
while i < l {
r.push(self.at(i) * 16 + self.at(i + 1));
i += 2;
}
r
}
}
impl<'a> PartialEq for NibbleSlice<'a> {
fn eq(&self, them: &Self) -> bool {
self.len() == them.len() && self.starts_with(them)
}
}
impl<'a> PartialOrd for NibbleSlice<'a> {
fn partial_cmp(&self, them: &Self) -> Option<Ordering> {
let s = min(self.len(), them.len());
let mut i = 0usize;
while i < s {
match self.at(i).partial_cmp(&them.at(i)).unwrap() {
Ordering::Less => return Some(Ordering::Less),
Ordering::Greater => return Some(Ordering::Greater),
_ => i += 1,
}
}
self.len().partial_cmp(&them.len())
}
}
impl<'a> fmt::Debug for NibbleSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..self.len() {
match i {
0 => try!(write!(f, "{:01x}", self.at(i))),
_ => try!(write!(f, "'{:01x}", self.at(i))),
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::NibbleSlice;
static D: &'static [u8;3] = &[0x01u8, 0x23, 0x45];
#[test]
fn basics() {
let n = NibbleSlice::new(D);
assert_eq!(n.len(), 6);
assert!(!n.is_empty());
let n = NibbleSlice::new_offset(D, 6);
assert!(n.is_empty());
let n = NibbleSlice::new_offset(D, 3);
assert_eq!(n.len(), 3);
for i in 0..3 {
assert_eq!(n.at(i), i as u8 + 3);
}
}
#[test]
fn mid() {
let n = NibbleSlice::new(D);
let m = n.mid(2);
for i in 0..4 {
assert_eq!(m.at(i), i as u8 + 2);
}
let m = n.mid(3);
for i in 0..3 {
assert_eq!(m.at(i), i as u8 + 3);
}
}
#[test]
fn encoded() {
let n = NibbleSlice::new(D);
assert_eq!(n.encoded(false), &[0x00, 0x01, 0x23, 0x45]);
assert_eq!(n.encoded(true), &[0x20, 0x01, 0x23, 0x45]);
assert_eq!(n.mid(1).encoded(false), &[0x11, 0x23, 0x45]);
assert_eq!(n.mid(1).encoded(true), &[0x31, 0x23, 0x45]);
}
#[test]
fn from_encoded() {
let n = NibbleSlice::new(D);
assert_eq!((n, false), NibbleSlice::from_encoded(&[0x00, 0x01, 0x23, 0x45]));
assert_eq!((n, true), NibbleSlice::from_encoded(&[0x20, 0x01, 0x23, 0x45]));
assert_eq!((n.mid(1), false), NibbleSlice::from_encoded(&[0x11, 0x23, 0x45]));
assert_eq!((n.mid(1), true), NibbleSlice::from_encoded(&[0x31, 0x23, 0x45]));
}
#[test]
fn shared() {
let n = NibbleSlice::new(D);
let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45, 0x67];
let m = NibbleSlice::new(other);
assert_eq!(n.common_prefix(&m), 4);
assert_eq!(m.common_prefix(&n), 4);
assert_eq!(n.mid(1).common_prefix(&m.mid(1)), 3);
assert_eq!(n.mid(1).common_prefix(&m.mid(2)), 0);
assert_eq!(n.common_prefix(&m.mid(4)), 6);
assert!(!n.starts_with(&m.mid(4)));
assert!(m.mid(4).starts_with(&n));
}
#[test]
fn compare() {
let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45];
let n = NibbleSlice::new(D);
let m = NibbleSlice::new(other);
assert!(n != m);
assert!(n > m);
assert!(m < n);
assert!(n == m.mid(4));
assert!(n >= m.mid(4));
assert!(n <= m.mid(4));
}
}

310
src/overlaydb.rs Normal file
View File

@ -0,0 +1,310 @@
//! Disk-backed HashDB implementation.
use error::*;
use hash::*;
use bytes::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use std::ops::*;
use std::sync::*;
use std::env;
use std::collections::HashMap;
use rocksdb::{DB, Writable, IteratorMode};
#[derive(Clone)]
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay.
///
/// The operations `insert()` and `kill()` take place on the memory overlay; batches of
/// such operations may be flushed to the disk-backed DB with `commit()` or discarded with
/// `revert()`.
///
/// `lookup()` and `exists()` maintain normal behaviour - all `insert()` and `kill()`
/// queries have an immediate effect in terms of these functions.
pub struct OverlayDB {
overlay: MemoryDB,
backing: Arc<DB>,
}
impl OverlayDB {
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new(backing: DB) -> OverlayDB {
OverlayDB{ overlay: MemoryDB::new(), backing: Arc::new(backing) }
}
/// Create a new instance of OverlayDB with an anonymous temporary database.
pub fn new_temp() -> OverlayDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(DB::open_default(dir.to_str().unwrap()).unwrap())
}
/// Commit all memory operations to the backing database.
///
/// Returns either an error or the number of items changed in the backing database.
///
/// Will return an error if the number of `kill()`s ever exceeds the number of
/// `insert()`s for any key. This will leave the database in an undeterminate
/// state. Don't ever let it happen.
///
/// # Example
/// ```
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::overlaydb::*;
/// fn main() {
/// let mut m = OverlayDB::new_temp();
/// let key = m.insert(b"foo"); // insert item.
/// assert!(m.exists(&key)); // key exists (in memory).
/// assert_eq!(m.commit().unwrap(), 1); // 1 item changed.
/// assert!(m.exists(&key)); // key still exists (in backing).
/// m.kill(&key); // delete item.
/// assert!(!m.exists(&key)); // key "doesn't exist" (though still does in backing).
/// m.kill(&key); // oh dear... more kills than inserts for the key...
/// //m.commit().unwrap(); // this commit/unwrap would cause a panic.
/// m.revert(); // revert both kills.
/// assert!(m.exists(&key)); // key now still exists.
/// }
/// ```
pub fn commit(&mut self) -> Result<u32, UtilError> {
let mut ret = 0u32;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
self.put_payload(&key, (back_value, total_rc as u32));
}
None => {
if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
self.put_payload(&key, (value, rc as u32));
}
};
ret += 1;
}
}
Ok(ret)
}
/// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the
/// last `commit()`.
///
/// # Example
/// ```
/// extern crate ethcore_util;
/// use ethcore_util::hashdb::*;
/// use ethcore_util::overlaydb::*;
/// fn main() {
/// let mut m = OverlayDB::new_temp();
/// let foo = m.insert(b"foo"); // insert foo.
/// m.commit().unwrap(); // commit - new operations begin here...
/// let bar = m.insert(b"bar"); // insert bar.
/// m.kill(&foo); // kill foo.
/// assert!(!m.exists(&foo)); // foo is gone.
/// assert!(m.exists(&bar)); // bar is here.
/// m.revert(); // revert the last two operations.
/// assert!(m.exists(&foo)); // foo is here.
/// assert!(!m.exists(&bar)); // bar is gone.
/// }
/// ```
pub fn revert(&mut self) { self.overlay.clear(); }
/// Get the refs and value of the given key.
fn payload(&self, key: &H256) -> Option<(Bytes, u32)> {
self.backing.get(&key.bytes())
.expect("Low-level database error. Some issue with your hard disk?")
.map(|d| {
let r = Rlp::new(d.deref());
(r.at(1).as_val(), r.at(0).as_val())
})
}
/// Get the refs and value of the given key.
fn put_payload(&self, key: &H256, payload: (Bytes, u32)) {
let mut s = RlpStream::new_list(2);
s.append(&payload.1);
s.append(&payload.0);
self.backing.put(&key.bytes(), &s.out()).expect("Low-level database error. Some issue with your hard disk?");
}
}
impl HashDB for OverlayDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iterator(IteratorMode::Start) {
let h = H256::from_slice(key.deref());
let r = self.payload(&h).unwrap().1;
ret.insert(h, r as i32);
}
for (key, refs) in self.overlay.keys().into_iter() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}
ret
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
// return ok if positive; if negative, check backing - might be enough references there to make
// it positive again.
let k = self.overlay.raw(key);
match k {
Some(&(ref d, rc)) if rc > 0 => Some(d),
_ => {
let memrc = k.map(|&(_, rc)| rc).unwrap_or(0);
match self.payload(key) {
Some(x) => {
let (d, rc) = x;
if rc as i32 + memrc > 0 {
Some(&self.overlay.denote(key, d).0)
}
else {
None
}
}
// Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done.
//Some((d, rc)) if rc + memrc > 0 => Some(d),
_ => None,
}
}
}
}
fn exists(&self, key: &H256) -> bool {
// return ok if positive; if negative, check backing - might be enough references there to make
// it positive again.
let k = self.overlay.raw(key);
match k {
Some(&(_, rc)) if rc > 0 => true,
_ => {
let memrc = k.map(|&(_, rc)| rc).unwrap_or(0);
match self.payload(key) {
Some(x) => {
let (_, rc) = x;
if rc as i32 + memrc > 0 {
true
}
else {
false
}
}
// Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done.
//Some((d, rc)) if rc + memrc > 0 => true,
_ => false,
}
}
}
}
fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) }
fn emplace(&mut self, key: H256, value: Bytes) { self.overlay.emplace(key, value); }
fn kill(&mut self, key: &H256) { self.overlay.kill(key); }
}
#[test]
fn overlaydb_overlay_insert_and_kill() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(b"hello world");
assert_eq!(trie.lookup(&h).unwrap(), b"hello world");
trie.kill(&h);
assert_eq!(trie.lookup(&h), None);
}
#[test]
fn overlaydb_backing_insert_revert() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(b"hello world");
assert_eq!(trie.lookup(&h).unwrap(), b"hello world");
trie.commit().unwrap();
assert_eq!(trie.lookup(&h).unwrap(), b"hello world");
trie.revert();
assert_eq!(trie.lookup(&h).unwrap(), b"hello world");
}
#[test]
fn overlaydb_backing_kill() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(b"hello world");
trie.commit().unwrap();
trie.kill(&h);
assert_eq!(trie.lookup(&h), None);
trie.commit().unwrap();
assert_eq!(trie.lookup(&h), None);
trie.revert();
assert_eq!(trie.lookup(&h), None);
}
#[test]
fn overlaydb_backing_kill_revert() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(b"hello world");
trie.commit().unwrap();
trie.kill(&h);
assert_eq!(trie.lookup(&h), None);
trie.revert();
assert_eq!(trie.lookup(&h).unwrap(), b"hello world");
}
#[test]
fn overlaydb_negative() {
let mut trie = OverlayDB::new_temp();
let h = trie.insert(b"hello world");
trie.commit().unwrap();
trie.kill(&h);
trie.kill(&h); //bad - sends us into negative refs.
assert_eq!(trie.lookup(&h), None);
assert!(trie.commit().is_err());
}
#[test]
fn overlaydb_complex() {
let mut trie = OverlayDB::new_temp();
let hfoo = trie.insert(b"foo");
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
let hbar = trie.insert(b"bar");
assert_eq!(trie.lookup(&hbar).unwrap(), b"bar");
trie.commit().unwrap();
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
assert_eq!(trie.lookup(&hbar).unwrap(), b"bar");
trie.insert(b"foo"); // two refs
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
trie.commit().unwrap();
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
assert_eq!(trie.lookup(&hbar).unwrap(), b"bar");
trie.kill(&hbar); // zero refs - delete
assert_eq!(trie.lookup(&hbar), None);
trie.kill(&hfoo); // one ref - keep
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
trie.commit().unwrap();
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
trie.kill(&hfoo); // zero ref - would delete, but...
assert_eq!(trie.lookup(&hfoo), None);
trie.insert(b"foo"); // one ref - keep after all.
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
trie.commit().unwrap();
assert_eq!(trie.lookup(&hfoo).unwrap(), b"foo");
trie.kill(&hfoo); // zero ref - delete
assert_eq!(trie.lookup(&hfoo), None);
trie.commit().unwrap(); //
assert_eq!(trie.lookup(&hfoo), None);
}
#[test]
fn playpen() {
use std::fs;
{
let db: DB = DB::open_default("/tmp/test").unwrap();
db.put(b"test", b"test2").unwrap();
match db.get(b"test") {
Ok(Some(value)) => println!("Got value {:?}", value.deref()),
Ok(None) => println!("No value for that key"),
Err(..) => println!("Gah"),
}
db.delete(b"test").unwrap();
}
fs::remove_dir_all("/tmp/test").unwrap();
}

87
src/rlp/mod.rs Normal file
View File

@ -0,0 +1,87 @@
//! Rlp serialization module
//!
//! Allows encoding, decoding, and view onto rlp-slice
//!
//!# What should you use when?
//!
//!### Use `encode` function when:
//! * You want to encode something inline.
//! * You do not work on big set of data.
//! * You want to encode whole data structure at once.
//!
//!### Use `decode` function when:
//! * You want to decode something inline.
//! * You do not work on big set of data.
//! * You want to decode whole rlp at once.
//!
//!### Use `RlpStream` when:
//! * You want to encode something in portions.
//! * You encode a big set of data.
//!
//!### Use `Rlp` when:
//! * You are working on trusted data (not corrupted).
//! * You want to get view onto rlp-slice.
//! * You don't want to decode whole rlp at once.
//!
//!### Use `UntrustedRlp` when:
//! * You are working on untrusted data (~corrupted).
//! * You need to handle data corruption errors.
//! * You are working on input data.
//! * You want to get view onto rlp-slice.
//! * You don't want to decode whole rlp at once.
pub mod rlptraits;
pub mod rlperrors;
pub mod rlpin;
pub mod untrusted_rlp;
pub mod rlpstream;
#[cfg(test)]
mod tests;
pub use self::rlperrors::DecoderError;
pub use self::rlptraits::{Decoder, Decodable, View, Stream, Encodable, Encoder};
pub use self::untrusted_rlp::{UntrustedRlp, UntrustedRlpIterator, PayloadInfo, Prototype};
pub use self::rlpin::{Rlp, RlpIterator};
pub use self::rlpstream::{RlpStream,RlpStandard};
use super::hash::H256;
pub const NULL_RLP: [u8; 1] = [0x80; 1];
pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1];
pub const SHA3_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] );
pub const SHA3_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] );
/// Shortcut function to decode trusted rlp
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let animals: Vec<String> = decode(&data);
/// assert_eq!(animals, vec!["cat".to_string(), "dog".to_string()]);
/// }
/// ```
pub fn decode<T>(bytes: &[u8]) -> T where T: Decodable {
let rlp = Rlp::new(bytes);
rlp.as_val()
}
/// Shortcut function to encode structure into rlp.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let animals = vec!["cat", "dog"];
/// let out = encode(&animals);
/// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']);
/// }
/// ```
pub fn encode<E>(object: &E) -> Vec<u8> where E: Encodable {
let mut stream = RlpStream::new();
stream.append(object);
stream.out()
}

33
src/rlp/rlperrors.rs Normal file
View File

@ -0,0 +1,33 @@
use std::fmt;
use std::error::Error as StdError;
use bytes::FromBytesError;
#[derive(Debug, PartialEq, Eq)]
pub enum DecoderError {
FromBytesError(FromBytesError),
RlpIsTooShort,
RlpExpectedToBeList,
RlpExpectedToBeData,
RlpIncorrectListLen,
RlpDataLenWithZeroPrefix,
RlpListLenWithZeroPrefix,
RlpInvalidIndirection,
}
impl StdError for DecoderError {
fn description(&self) -> &str {
"builder error"
}
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self, f)
}
}
impl From<FromBytesError> for DecoderError {
fn from(err: FromBytesError) -> DecoderError {
DecoderError::FromBytesError(err)
}
}

151
src/rlp/rlpin.rs Normal file
View File

@ -0,0 +1,151 @@
use std::fmt;
use rlp::{View, Decodable, DecoderError, UntrustedRlp, PayloadInfo, Prototype};
impl<'a> From<UntrustedRlp<'a>> for Rlp<'a> {
fn from(rlp: UntrustedRlp<'a>) -> Rlp<'a> {
Rlp { rlp: rlp }
}
}
/// Data-oriented view onto trusted rlp-slice.
///
/// Unlikely to `UntrustedRlp` doesn't bother you with error
/// handling. It assumes that you know what you are doing.
#[derive(Debug)]
pub struct Rlp<'a> {
rlp: UntrustedRlp<'a>
}
impl<'a> fmt::Display for Rlp<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.rlp)
}
}
impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view {
type Prototype = Prototype;
type PayloadInfo = PayloadInfo;
type Data = &'a [u8];
type Item = Rlp<'a>;
type Iter = RlpIterator<'a, 'view>;
/// Create a new instance of `Rlp`
fn new(bytes: &'a [u8]) -> Rlp<'a> {
Rlp {
rlp: UntrustedRlp::new(bytes)
}
}
fn as_raw(&'view self) -> &'a [u8] {
self.rlp.as_raw()
}
fn prototype(&self) -> Self::Prototype {
self.rlp.prototype().unwrap()
}
fn payload_info(&self) -> Self::PayloadInfo {
self.rlp.payload_info().unwrap()
}
fn data(&'view self) -> Self::Data {
self.rlp.data().unwrap()
}
fn item_count(&self) -> usize {
self.rlp.item_count()
}
fn size(&self) -> usize {
self.rlp.size()
}
fn at(&'view self, index: usize) -> Self::Item {
From::from(self.rlp.at(index).unwrap())
}
fn is_null(&self) -> bool {
self.rlp.is_null()
}
fn is_empty(&self) -> bool {
self.rlp.is_empty()
}
fn is_list(&self) -> bool {
self.rlp.is_list()
}
fn is_data(&self) -> bool {
self.rlp.is_data()
}
fn is_int(&self) -> bool {
self.rlp.is_int()
}
fn iter(&'view self) -> Self::Iter {
self.into_iter()
}
fn as_val<T>(&self) -> Result<T, DecoderError> where T: Decodable {
self.rlp.as_val()
}
fn val_at<T>(&self, index: usize) -> Result<T, DecoderError> where T: Decodable {
self.at(index).rlp.as_val()
}
}
impl <'a, 'view> Rlp<'a> where 'a: 'view {
fn view_as_val<T, R>(r: &R) -> T where R: View<'a, 'view>, T: Decodable {
let res: Result<T, DecoderError> = r.as_val();
res.unwrap_or_else(|_| panic!())
}
pub fn as_val<T>(&self) -> T where T: Decodable {
Self::view_as_val(self)
}
pub fn val_at<T>(&self, index: usize) -> T where T: Decodable {
Self::view_as_val(&self.at(index))
}
}
/// Iterator over trusted rlp-slice list elements.
pub struct RlpIterator<'a, 'view> where 'a: 'view {
rlp: &'view Rlp<'a>,
index: usize
}
impl<'a, 'view> IntoIterator for &'view Rlp<'a> where 'a: 'view {
type Item = Rlp<'a>;
type IntoIter = RlpIterator<'a, 'view>;
fn into_iter(self) -> Self::IntoIter {
RlpIterator {
rlp: self,
index: 0,
}
}
}
impl<'a, 'view> Iterator for RlpIterator<'a, 'view> {
type Item = Rlp<'a>;
fn next(&mut self) -> Option<Rlp<'a>> {
let index = self.index;
let result = self.rlp.rlp.at(index).ok().map(| iter | { From::from(iter) });
self.index += 1;
result
}
}
#[test]
fn break_it() {
use common::*;
let h: Bytes = FromHex::from_hex("f84d0589010efbef67941f79b2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap();
let r: Rlp = Rlp::new(&h);
let u: U256 = r.val_at(1);
assert_eq!(format!("{}", u), "19526463837540678066");
}

279
src/rlp/rlpstream.rs Normal file
View File

@ -0,0 +1,279 @@
use elastic_array::*;
use bytes::{Bytes, ToBytes};
use rlp::{Stream, Encoder, Encodable};
use hash::H256;
use sha3::*;
#[derive(Debug, Copy, Clone)]
struct ListInfo {
position: usize,
current: usize,
max: usize,
}
impl ListInfo {
fn new(position: usize, max: usize) -> ListInfo {
ListInfo {
position: position,
current: 0,
max: max,
}
}
}
/// Appendable rlp encoder.
pub struct RlpStream {
unfinished_lists: ElasticArray16<ListInfo>,
encoder: BasicEncoder,
}
impl Stream for RlpStream {
fn new() -> Self {
RlpStream {
unfinished_lists: ElasticArray16::new(),
encoder: BasicEncoder::new(),
}
}
fn new_list(len: usize) -> Self {
let mut stream = RlpStream::new();
stream.append_list(len);
stream
}
fn append<'a, E>(&'a mut self, object: &E) -> &'a mut RlpStream where E: Encodable {
// encode given value and add it at the end of the stream
object.encode(&mut self.encoder);
// if list is finished, prepend the length
self.note_appended(1);
// return chainable self
self
}
fn append_list<'a>(&'a mut self, len: usize) -> &'a mut RlpStream {
match len {
0 => {
// we may finish, if the appended list len is equal 0
self.encoder.bytes.push(0xc0u8);
self.note_appended(1);
},
_ => {
let position = self.encoder.bytes.len();
self.unfinished_lists.push(ListInfo::new(position, len));
},
}
// return chainable self
self
}
fn append_empty_data<'a>(&'a mut self) -> &'a mut RlpStream {
// self push raw item
self.encoder.bytes.push(0x80);
// try to finish and prepend the length
self.note_appended(1);
// return chainable self
self
}
fn append_raw<'a>(&'a mut self, bytes: &[u8], item_count: usize) -> &'a mut RlpStream {
// push raw items
self.encoder.bytes.append_slice(bytes);
// try to finish and prepend the length
self.note_appended(item_count);
// return chainable self
self
}
fn clear(&mut self) {
// clear bytes
self.encoder.bytes.clear();
// clear lists
self.unfinished_lists.clear();
}
fn is_finished(&self) -> bool {
self.unfinished_lists.len() == 0
}
fn as_raw(&self) -> &[u8] {
&self.encoder.bytes
}
fn out(self) -> Vec<u8> {
match self.is_finished() {
true => self.encoder.out().to_vec(),
false => panic!()
}
}
}
impl RlpStream {
/// Try to finish lists
fn note_appended(&mut self, inserted_items: usize) -> () {
if self.unfinished_lists.len() == 0 {
return;
}
let back = self.unfinished_lists.len() - 1;
let should_finish = match self.unfinished_lists.get_mut(back) {
None => false,
Some(ref mut x) => {
x.current += inserted_items;
if x.current > x.max {
panic!("You cannot append more items then you expect!");
}
x.current == x.max
}
};
if should_finish {
let x = self.unfinished_lists.pop().unwrap();
let len = self.encoder.bytes.len() - x.position;
self.encoder.insert_list_len_at_pos(len, x.position);
self.note_appended(1);
}
}
}
struct BasicEncoder {
bytes: ElasticArray1024<u8>,
}
impl BasicEncoder {
fn new() -> BasicEncoder {
BasicEncoder { bytes: ElasticArray1024::new() }
}
/// inserts list prefix at given position
/// TODO: optimise it further?
fn insert_list_len_at_pos(&mut self, len: usize, pos: usize) -> () {
let mut res = vec![];
match len {
0...55 => res.push(0xc0u8 + len as u8),
_ => {
res.push(0xf7u8 + len.to_bytes_len() as u8);
res.extend(len.to_bytes());
}
};
self.bytes.insert_slice(pos, &res);
}
/// get encoded value
fn out(self) -> ElasticArray1024<u8> {
self.bytes
}
}
impl Encoder for BasicEncoder {
fn emit_value(&mut self, bytes: &[u8]) -> () {
match bytes.len() {
// just 0
0 => self.bytes.push(0x80u8),
// byte is its own encoding
1 if bytes[0] < 0x80 => self.bytes.append_slice(bytes),
// (prefix + length), followed by the string
len @ 1 ... 55 => {
self.bytes.push(0x80u8 + len as u8);
self.bytes.append_slice(bytes);
}
// (prefix + length of length), followed by the length, followd by the string
len => {
self.bytes.push(0xb7 + len.to_bytes_len() as u8);
self.bytes.append_slice(&len.to_bytes());
self.bytes.append_slice(bytes);
}
}
}
fn emit_raw(&mut self, bytes: &[u8]) -> () {
self.bytes.append_slice(bytes);
}
fn emit_list<F>(&mut self, f: F) -> () where F: FnOnce(&mut Self) -> () {
// get len before inserting a list
let before_len = self.bytes.len();
// insert all list elements
f(self);
// get len after inserting a list
let after_len = self.bytes.len();
// diff is list len
let list_len = after_len - before_len;
self.insert_list_len_at_pos(list_len, before_len);
}
}
pub trait RlpStandard {
fn rlp_append(&self, s: &mut RlpStream);
fn rlp_bytes(&self) -> Bytes {
let mut s = RlpStream::new();
self.rlp_append(&mut s);
s.out()
}
fn rlp_sha3(&self) -> H256 { self.rlp_bytes().sha3() }
}
// @debris TODO: implement Encoder for RlpStandard.
impl<T> Encodable for T where T: ToBytes {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
encoder.emit_value(&self.to_bytes())
}
}
impl<'a, T> Encodable for &'a [T] where T: Encodable + 'a {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
encoder.emit_list(|e| {
// insert all list elements
for el in self.iter() {
el.encode(e);
}
})
}
}
impl<T> Encodable for Vec<T> where T: Encodable {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
let r: &[T] = self.as_ref();
r.encode(encoder)
}
}
/// lets treat bytes differently than other lists
/// they are a single value
impl<'a> Encodable for &'a [u8] {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
encoder.emit_value(self)
}
}
/// lets treat bytes differently than other lists
/// they are a single value
impl Encodable for Vec<u8> {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
encoder.emit_value(self)
}
}
impl<T> Encodable for Option<T> where T: Encodable {
fn encode<E>(&self, encoder: &mut E) where E: Encoder {
match *self {
Some(ref x) => x.encode(encoder),
None => encoder.emit_value(&[])
}
}
}

293
src/rlp/rlptraits.rs Normal file
View File

@ -0,0 +1,293 @@
use rlp::{DecoderError, UntrustedRlp};
pub trait Decoder: Sized {
fn read_value<T, F>(&self, f: F) -> Result<T, DecoderError>
where F: FnOnce(&[u8]) -> Result<T, DecoderError>;
fn as_list(&self) -> Result<Vec<Self>, DecoderError>;
fn as_rlp<'a>(&'a self) -> &'a UntrustedRlp<'a>;
fn as_raw(&self) -> &[u8];
}
pub trait Decodable: Sized {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder;
}
pub trait View<'a, 'view>: Sized {
type Prototype;
type PayloadInfo;
type Data;
type Item;
type Iter;
/// Creates a new instance of `Rlp` reader
fn new(bytes: &'a [u8]) -> Self;
/// The raw data of the RLP.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// let dog = rlp.at(1).as_raw();
/// assert_eq!(dog, &[0x83, b'd', b'o', b'g']);
/// }
/// ```
fn as_raw(&'view self) -> &'a [u8];
/// Get the prototype of the RLP.
fn prototype(&self) -> Self::Prototype;
fn payload_info(&self) -> Self::PayloadInfo;
fn data(&'view self) -> Self::Data;
/// Returns number of RLP items.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// assert_eq!(rlp.item_count(), 2);
/// let view = rlp.at(1);
/// assert_eq!(view.item_count(), 0);
/// }
/// ```
fn item_count(&self) -> usize;
/// Returns the number of bytes in the data, or zero if it isn't data.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// assert_eq!(rlp.size(), 0);
/// let view = rlp.at(1);
/// assert_eq!(view.size(), 3);
/// }
/// ```
fn size(&self) -> usize;
/// Get view onto RLP-slice at index.
///
/// Caches offset to given index, so access to successive
/// slices is faster.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// let dog: String = rlp.at(1).as_val();
/// assert_eq!(dog, "dog".to_string());
/// }
fn at(&'view self, index: usize) -> Self::Item;
/// No value
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![];
/// let rlp = Rlp::new(&data);
/// assert!(rlp.is_null());
/// }
/// ```
fn is_null(&self) -> bool;
/// Contains a zero-length string or zero-length list.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc0];
/// let rlp = Rlp::new(&data);
/// assert!(rlp.is_empty());
/// }
/// ```
fn is_empty(&self) -> bool;
/// List value
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// assert!(rlp.is_list());
/// }
/// ```
fn is_list(&self) -> bool;
/// String value
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// assert!(rlp.at(1).is_data());
/// }
/// ```
fn is_data(&self) -> bool;
/// Int value
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc1, 0x10];
/// let rlp = Rlp::new(&data);
/// assert_eq!(rlp.is_int(), false);
/// assert_eq!(rlp.at(0).is_int(), true);
/// }
/// ```
fn is_int(&self) -> bool;
/// Get iterator over rlp-slices
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
/// let rlp = Rlp::new(&data);
/// let strings: Vec<String> = rlp.iter().map(| i | i.as_val()).collect();
/// }
/// ```
fn iter(&'view self) -> Self::Iter;
fn as_val<T>(&self) -> Result<T, DecoderError> where T: Decodable;
fn val_at<T>(&self, index: usize) -> Result<T, DecoderError> where T: Decodable;
}
pub trait Encoder {
fn emit_value(&mut self, bytes: &[u8]) -> ();
fn emit_list<F>(&mut self, f: F) -> () where F: FnOnce(&mut Self) -> ();
fn emit_raw(&mut self, bytes: &[u8]) -> ();
}
pub trait Encodable {
fn encode<E>(&self, encoder: &mut E) -> () where E: Encoder;
}
pub trait Stream: Sized {
/// Initializes instance of empty `Stream`.
fn new() -> Self;
/// Initializes the `Stream` as a list.
fn new_list(len: usize) -> Self;
/// Apends value to the end of stream, chainable.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let mut stream = RlpStream::new_list(2);
/// stream.append(&"cat").append(&"dog");
/// let out = stream.out();
/// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']);
/// }
/// ```
fn append<'a, E>(&'a mut self, object: &E) -> &'a mut Self where E: Encodable;
/// Declare appending the list of given size, chainable.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let mut stream = RlpStream::new_list(2);
/// stream.append_list(2).append(&"cat").append(&"dog");
/// stream.append(&"");
/// let out = stream.out();
/// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]);
/// }
/// ```
fn append_list<'a>(&'a mut self, len: usize) -> &'a mut Self;
/// Apends null to the end of stream, chainable.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let mut stream = RlpStream::new_list(2);
/// stream.append_empty_data().append_empty_data();
/// let out = stream.out();
/// assert_eq!(out, vec![0xc2, 0x80, 0x80]);
/// }
/// ```
fn append_empty_data<'a>(&'a mut self) -> &'a mut Self;
/// Appends raw (pre-serialised) RLP data. Use with caution. Chainable.
fn append_raw<'a>(&'a mut self, bytes: &[u8], item_count: usize) -> &'a mut Self;
/// Clear the output stream so far.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let mut stream = RlpStream::new_list(3);
/// stream.append(&"cat");
/// stream.clear();
/// stream.append(&"dog");
/// let out = stream.out();
/// assert_eq!(out, vec![0x83, b'd', b'o', b'g']);
/// }
fn clear(&mut self);
/// Returns true if stream doesnt expect any more items.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::rlp::*;
///
/// fn main () {
/// let mut stream = RlpStream::new_list(2);
/// stream.append(&"cat");
/// assert_eq!(stream.is_finished(), false);
/// stream.append(&"dog");
/// assert_eq!(stream.is_finished(), true);
/// let out = stream.out();
/// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']);
/// }
fn is_finished(&self) -> bool;
fn as_raw(&self) -> &[u8];
/// Streams out encoded bytes.
///
/// panic! if stream is not finished.
fn out(self) -> Vec<u8>;
}

353
src/rlp/tests.rs Normal file
View File

@ -0,0 +1,353 @@
extern crate json_tests;
use self::json_tests::execute_tests_from_directory;
use self::json_tests::rlp as rlptest;
use std::{fmt, cmp};
use std::str::FromStr;
use rlp;
use rlp::{UntrustedRlp, RlpStream, View, Stream};
use uint::U256;
#[test]
fn rlp_at() {
let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
{
let rlp = UntrustedRlp::new(&data);
assert!(rlp.is_list());
//let animals = <Vec<String> as rlp::Decodable>::decode_untrusted(&rlp).unwrap();
let animals: Vec<String> = rlp.as_val().unwrap();
assert_eq!(animals, vec!["cat".to_string(), "dog".to_string()]);
let cat = rlp.at(0).unwrap();
assert!(cat.is_data());
assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']);
//assert_eq!(String::decode_untrusted(&cat).unwrap(), "cat".to_string());
assert_eq!(cat.as_val::<String>().unwrap(), "cat".to_string());
let dog = rlp.at(1).unwrap();
assert!(dog.is_data());
assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']);
//assert_eq!(String::decode_untrusted(&dog).unwrap(), "dog".to_string());
assert_eq!(dog.as_val::<String>().unwrap(), "dog".to_string());
let cat_again = rlp.at(0).unwrap();
assert!(cat_again.is_data());
assert_eq!(cat_again.as_raw(), &[0x83, b'c', b'a', b't']);
//assert_eq!(String::decode_untrusted(&cat_again).unwrap(), "cat".to_string());
assert_eq!(cat_again.as_val::<String>().unwrap(), "cat".to_string());
}
}
#[test]
fn rlp_at_err() {
let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o'];
{
let rlp = UntrustedRlp::new(&data);
assert!(rlp.is_list());
let cat_err = rlp.at(0).unwrap_err();
assert_eq!(cat_err, rlp::DecoderError::RlpIsTooShort);
let dog_err = rlp.at(1).unwrap_err();
assert_eq!(dog_err, rlp::DecoderError::RlpIsTooShort);
}
}
#[test]
fn rlp_iter() {
let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'];
{
let rlp = UntrustedRlp::new(&data);
let mut iter = rlp.iter();
let cat = iter.next().unwrap();
assert!(cat.is_data());
assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']);
let dog = iter.next().unwrap();
assert!(dog.is_data());
assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']);
let none = iter.next();
assert!(none.is_none());
let cat_again = rlp.at(0).unwrap();
assert!(cat_again.is_data());
assert_eq!(cat_again.as_raw(), &[0x83, b'c', b'a', b't']);
}
}
struct ETestPair<T>(T, Vec<u8>) where T: rlp::Encodable;
fn run_encode_tests<T>(tests: Vec<ETestPair<T>>)
where T: rlp::Encodable
{
for t in &tests {
let res = rlp::encode(&t.0);
assert_eq!(res, &t.1[..]);
}
}
#[test]
fn encode_u16() {
let tests = vec![
ETestPair(0u16, vec![0x80u8]),
ETestPair(0x100, vec![0x82, 0x01, 0x00]),
ETestPair(0xffff, vec![0x82, 0xff, 0xff]),
];
run_encode_tests(tests);
}
#[test]
fn encode_u32() {
let tests = vec![
ETestPair(0u32, vec![0x80u8]),
ETestPair(0x10000, vec![0x83, 0x01, 0x00, 0x00]),
ETestPair(0xffffff, vec![0x83, 0xff, 0xff, 0xff]),
];
run_encode_tests(tests);
}
#[test]
fn encode_u64() {
let tests = vec![
ETestPair(0u64, vec![0x80u8]),
ETestPair(0x1000000, vec![0x84, 0x01, 0x00, 0x00, 0x00]),
ETestPair(0xFFFFFFFF, vec![0x84, 0xff, 0xff, 0xff, 0xff]),
];
run_encode_tests(tests);
}
#[test]
fn encode_u256() {
let tests = vec![ETestPair(U256::from(0u64), vec![0x80u8]),
ETestPair(U256::from(0x1000000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]),
ETestPair(U256::from(0xffffffffu64),
vec![0x84, 0xff, 0xff, 0xff, 0xff]),
ETestPair(U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000\
000100000000000012f0")
.unwrap(),
vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0,
0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])];
run_encode_tests(tests);
}
#[test]
fn encode_str() {
let tests = vec![ETestPair("cat", vec![0x83, b'c', b'a', b't']),
ETestPair("dog", vec![0x83, b'd', b'o', b'g']),
ETestPair("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k']),
ETestPair("", vec![0x80]),
ETestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit",
vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i',
b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o',
b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e',
b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c',
b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i',
b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ',
b'e', b'l', b'i', b't'])];
run_encode_tests(tests);
}
#[test]
fn encode_address() {
use hash::*;
let tests = vec![
ETestPair(Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap(),
vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde,
0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46,
0xb3, 0x7d, 0x11, 0x06])
];
run_encode_tests(tests);
}
/// Vec<u8> (Bytes) is treated as a single value
#[test]
fn encode_vector_u8() {
let tests = vec![
ETestPair(vec![], vec![0x80]),
ETestPair(vec![0u8], vec![0]),
ETestPair(vec![0x15], vec![0x15]),
ETestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]),
];
run_encode_tests(tests);
}
#[test]
fn encode_vector_u64() {
let tests = vec![
ETestPair(vec![], vec![0xc0]),
ETestPair(vec![15u64], vec![0xc1, 0x0f]),
ETestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]),
ETestPair(vec![0xffffffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]),
];
run_encode_tests(tests);
}
#[test]
fn encode_vector_str() {
let tests = vec![ETestPair(vec!["cat", "dog"],
vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])];
run_encode_tests(tests);
}
#[test]
fn encode_vector_of_vectors_str() {
let tests = vec![ETestPair(vec![vec!["cat"]], vec![0xc5, 0xc4, 0x83, b'c', b'a', b't'])];
run_encode_tests(tests);
}
struct DTestPair<T>(T, Vec<u8>) where T: rlp::Decodable + fmt::Debug + cmp::Eq;
fn run_decode_tests<T>(tests: Vec<DTestPair<T>>) where T: rlp::Decodable + fmt::Debug + cmp::Eq {
for t in &tests {
let res: T = rlp::decode(&t.1);
assert_eq!(res, t.0);
}
}
/// Vec<u8> (Bytes) is treated as a single value
#[test]
fn decode_vector_u8() {
let tests = vec![
DTestPair(vec![], vec![0x80]),
DTestPair(vec![0u8], vec![0]),
DTestPair(vec![0x15], vec![0x15]),
DTestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]),
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_u16() {
let tests = vec![
DTestPair(0u16, vec![0u8]),
DTestPair(0x100, vec![0x82, 0x01, 0x00]),
DTestPair(0xffff, vec![0x82, 0xff, 0xff]),
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_u32() {
let tests = vec![
DTestPair(0u32, vec![0u8]),
DTestPair(0x10000, vec![0x83, 0x01, 0x00, 0x00]),
DTestPair(0xffffff, vec![0x83, 0xff, 0xff, 0xff]),
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_u64() {
let tests = vec![
DTestPair(0u64, vec![0u8]),
DTestPair(0x1000000, vec![0x84, 0x01, 0x00, 0x00, 0x00]),
DTestPair(0xFFFFFFFF, vec![0x84, 0xff, 0xff, 0xff, 0xff]),
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_u256() {
let tests = vec![DTestPair(U256::from(0u64), vec![0x80u8]),
DTestPair(U256::from(0x1000000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]),
DTestPair(U256::from(0xffffffffu64),
vec![0x84, 0xff, 0xff, 0xff, 0xff]),
DTestPair(U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000\
000100000000000012f0")
.unwrap(),
vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0,
0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_str() {
let tests = vec![DTestPair("cat".to_string(), vec![0x83, b'c', b'a', b't']),
DTestPair("dog".to_string(), vec![0x83, b'd', b'o', b'g']),
DTestPair("Marek".to_string(),
vec![0x85, b'M', b'a', b'r', b'e', b'k']),
DTestPair("".to_string(), vec![0x80]),
DTestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit"
.to_string(),
vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i',
b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o',
b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e',
b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c',
b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i',
b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ',
b'e', b'l', b'i', b't'])];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_address() {
use hash::*;
let tests = vec![
DTestPair(Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap(),
vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde,
0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46,
0xb3, 0x7d, 0x11, 0x06])
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_vector_u64() {
let tests = vec![
DTestPair(vec![], vec![0xc0]),
DTestPair(vec![15u64], vec![0xc1, 0x0f]),
DTestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]),
DTestPair(vec![0xffffffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]),
];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_vector_str() {
let tests = vec![DTestPair(vec!["cat".to_string(), "dog".to_string()],
vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])];
run_decode_tests(tests);
}
#[test]
fn decode_untrusted_vector_of_vectors_str() {
let tests = vec![DTestPair(vec![vec!["cat".to_string()]],
vec![0xc5, 0xc4, 0x83, b'c', b'a', b't'])];
run_decode_tests(tests);
}
#[test]
fn test_rlp_json() {
println!("Json rlp test: ");
execute_tests_from_directory::<rlptest::RlpStreamTest, _>("json-tests/json/rlp/stream/*.json", &mut | file, input, output | {
println!("file: {}", file);
let mut stream = RlpStream::new();
for operation in input.into_iter() {
match operation {
rlptest::Operation::Append(ref v) => stream.append(v),
rlptest::Operation::AppendList(len) => stream.append_list(len),
rlptest::Operation::AppendRaw(ref raw, len) => stream.append_raw(raw, len),
rlptest::Operation::AppendEmpty => stream.append_empty_data()
};
}
assert_eq!(stream.out(), output);
});
}
#[test]
fn test_decoding_array() {
let v = vec![5u16, 2u16];
let res = rlp::encode(&v);
let arr: [u16; 2] = rlp::decode(&res);
assert_eq!(arr[0], 5);
assert_eq!(arr[1], 2);
}

441
src/rlp/untrusted_rlp.rs Normal file
View File

@ -0,0 +1,441 @@
use std::cell::Cell;
use std::fmt;
use rustc_serialize::hex::ToHex;
use bytes::{FromBytes};
use rlp::{View, Decoder, Decodable, DecoderError};
/// rlp offset
#[derive(Copy, Clone, Debug)]
struct OffsetCache {
index: usize,
offset: usize,
}
impl OffsetCache {
fn new(index: usize, offset: usize) -> OffsetCache {
OffsetCache {
index: index,
offset: offset,
}
}
}
#[derive(Debug)]
pub enum Prototype {
Null,
Data(usize),
List(usize),
}
/// Stores basic information about item
pub struct PayloadInfo {
pub header_len: usize,
pub value_len: usize,
}
impl PayloadInfo {
fn new(header_len: usize, value_len: usize) -> PayloadInfo {
PayloadInfo {
header_len: header_len,
value_len: value_len,
}
}
}
/// Data-oriented view onto rlp-slice.
///
/// This is immutable structere. No operations change it.
///
/// Should be used in places where, error handling is required,
/// eg. on input
#[derive(Debug)]
pub struct UntrustedRlp<'a> {
bytes: &'a [u8],
offset_cache: Cell<OffsetCache>,
count_cache: Cell<Option<usize>>,
}
impl<'a> Clone for UntrustedRlp<'a> {
fn clone(&self) -> UntrustedRlp<'a> {
UntrustedRlp {
bytes: self.bytes,
offset_cache: self.offset_cache.clone(),
count_cache: self.count_cache.clone(),
}
}
}
impl<'a> fmt::Display for UntrustedRlp<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self.prototype() {
Ok(Prototype::Null) => write!(f, "null"),
Ok(Prototype::Data(_)) => write!(f, "\"0x{}\"", self.data().unwrap().to_hex()),
Ok(Prototype::List(len)) => {
try!(write!(f, "["));
for i in 0..len-1 {
try!(write!(f, "{}, ", self.at(i).unwrap()));
}
try!(write!(f, "{}", self.at(len - 1).unwrap()));
write!(f, "]")
},
Err(err) => write!(f, "{:?}", err)
}
}
}
impl<'a, 'view> View<'a, 'view> for UntrustedRlp<'a> where 'a: 'view {
type Prototype = Result<Prototype, DecoderError>;
type PayloadInfo = Result<PayloadInfo, DecoderError>;
type Data = Result<&'a [u8], DecoderError>;
type Item = Result<UntrustedRlp<'a>, DecoderError>;
type Iter = UntrustedRlpIterator<'a, 'view>;
//returns new instance of `UntrustedRlp`
fn new(bytes: &'a [u8]) -> UntrustedRlp<'a> {
UntrustedRlp {
bytes: bytes,
offset_cache: Cell::new(OffsetCache::new(usize::max_value(), 0)),
count_cache: Cell::new(None)
}
}
fn as_raw(&'view self) -> &'a [u8] {
self.bytes
}
fn prototype(&self) -> Self::Prototype {
// optimize? && return appropriate errors
if self.is_data() {
Ok(Prototype::Data(self.size()))
} else if self.is_list() {
Ok(Prototype::List(self.item_count()))
} else {
Ok(Prototype::Null)
}
}
fn payload_info(&self) -> Self::PayloadInfo {
BasicDecoder::payload_info(self.bytes)
}
fn data(&'view self) -> Self::Data {
let pi = try!(BasicDecoder::payload_info(self.bytes));
Ok(&self.bytes[pi.header_len..(pi.header_len + pi.value_len)])
}
fn item_count(&self) -> usize {
match self.is_list() {
true => match self.count_cache.get() {
Some(c) => c,
None => {
let c = self.iter().count();
self.count_cache.set(Some(c));
c
}
},
false => 0
}
}
fn size(&self) -> usize {
match self.is_data() {
// we can safely unwrap (?) cause its data
true => BasicDecoder::payload_info(self.bytes).unwrap().value_len,
false => 0
}
}
fn at(&'view self, index: usize) -> Self::Item {
if !self.is_list() {
return Err(DecoderError::RlpExpectedToBeList);
}
// move to cached position if it's index is less or equal to
// current search index, otherwise move to beginning of list
let c = self.offset_cache.get();
let (mut bytes, to_skip) = match c.index <= index {
true => (try!(UntrustedRlp::consume(self.bytes, c.offset)), index - c.index),
false => (try!(self.consume_list_prefix()), index),
};
// skip up to x items
bytes = try!(UntrustedRlp::consume_items(bytes, to_skip));
// update the cache
self.offset_cache.set(OffsetCache::new(index, self.bytes.len() - bytes.len()));
// construct new rlp
let found = try!(BasicDecoder::payload_info(bytes));
Ok(UntrustedRlp::new(&bytes[0..found.header_len + found.value_len]))
}
fn is_null(&self) -> bool {
self.bytes.len() == 0
}
fn is_empty(&self) -> bool {
!self.is_null() && (self.bytes[0] == 0xc0 || self.bytes[0] == 0x80)
}
fn is_list(&self) -> bool {
!self.is_null() && self.bytes[0] >= 0xc0
}
fn is_data(&self) -> bool {
!self.is_null() && self.bytes[0] < 0xc0
}
fn is_int(&self) -> bool {
if self.is_null() {
return false;
}
match self.bytes[0] {
0...0x80 => true,
0x81...0xb7 => self.bytes[1] != 0,
b @ 0xb8...0xbf => self.bytes[1 + b as usize - 0xb7] != 0,
_ => false
}
}
fn iter(&'view self) -> Self::Iter {
self.into_iter()
}
fn as_val<T>(&self) -> Result<T, DecoderError> where T: Decodable {
// optimize, so it doesn't use clone (although This clone is cheap)
T::decode(&BasicDecoder::new(self.clone()))
}
fn val_at<T>(&self, index: usize) -> Result<T, DecoderError> where T: Decodable {
try!(self.at(index)).as_val()
}
}
impl<'a> UntrustedRlp<'a> {
/// consumes first found prefix
fn consume_list_prefix(&self) -> Result<&'a [u8], DecoderError> {
let item = try!(BasicDecoder::payload_info(self.bytes));
let bytes = try!(UntrustedRlp::consume(self.bytes, item.header_len));
Ok(bytes)
}
/// consumes fixed number of items
fn consume_items(bytes: &'a [u8], items: usize) -> Result<&'a [u8], DecoderError> {
let mut result = bytes;
for _ in 0..items {
let i = try!(BasicDecoder::payload_info(result));
result = try!(UntrustedRlp::consume(result, (i.header_len + i.value_len)));
}
Ok(result)
}
/// consumes slice prefix of length `len`
fn consume(bytes: &'a [u8], len: usize) -> Result<&'a [u8], DecoderError> {
match bytes.len() >= len {
true => Ok(&bytes[len..]),
false => Err(DecoderError::RlpIsTooShort),
}
}
}
/// Iterator over rlp-slice list elements.
pub struct UntrustedRlpIterator<'a, 'view> where 'a: 'view {
rlp: &'view UntrustedRlp<'a>,
index: usize,
}
impl<'a, 'view> IntoIterator for &'view UntrustedRlp<'a> where 'a: 'view {
type Item = UntrustedRlp<'a>;
type IntoIter = UntrustedRlpIterator<'a, 'view>;
fn into_iter(self) -> Self::IntoIter {
UntrustedRlpIterator {
rlp: self,
index: 0,
}
}
}
impl<'a, 'view> Iterator for UntrustedRlpIterator<'a, 'view> {
type Item = UntrustedRlp<'a>;
fn next(&mut self) -> Option<UntrustedRlp<'a>> {
let index = self.index;
let result = self.rlp.at(index).ok();
self.index += 1;
result
}
}
struct BasicDecoder<'a> {
rlp: UntrustedRlp<'a>
}
impl<'a> BasicDecoder<'a> {
pub fn new(rlp: UntrustedRlp<'a>) -> BasicDecoder<'a> {
BasicDecoder {
rlp: rlp
}
}
/// Return first item info
fn payload_info(bytes: &[u8]) -> Result<PayloadInfo, DecoderError> {
let item = match bytes.first().map(|&x| x) {
None => return Err(DecoderError::RlpIsTooShort),
Some(0...0x7f) => PayloadInfo::new(0, 1),
Some(l @ 0x80...0xb7) => PayloadInfo::new(1, l as usize - 0x80),
Some(l @ 0xb8...0xbf) => {
let len_of_len = l as usize - 0xb7;
let header_len = 1 + len_of_len;
if bytes[1] == 0 { return Err(DecoderError::RlpDataLenWithZeroPrefix); }
let value_len = try!(usize::from_bytes(&bytes[1..header_len]));
PayloadInfo::new(header_len, value_len)
}
Some(l @ 0xc0...0xf7) => PayloadInfo::new(1, l as usize - 0xc0),
Some(l @ 0xf8...0xff) => {
let len_of_len = l as usize - 0xf7;
let header_len = 1 + len_of_len;
let value_len = try!(usize::from_bytes(&bytes[1..header_len]));
if bytes[1] == 0 { return Err(DecoderError::RlpListLenWithZeroPrefix); }
PayloadInfo::new(header_len, value_len)
},
// we cant reach this place, but rust requires _ to be implemented
_ => { unreachable!(); }
};
match item.header_len + item.value_len <= bytes.len() {
true => Ok(item),
false => Err(DecoderError::RlpIsTooShort),
}
}
}
impl<'a> Decoder for BasicDecoder<'a> {
fn read_value<T, F>(&self, f: F) -> Result<T, DecoderError>
where F: FnOnce(&[u8]) -> Result<T, DecoderError> {
let bytes = self.rlp.as_raw();
match bytes.first().map(|&x| x) {
// rlp is too short
None => Err(DecoderError::RlpIsTooShort),
// single byt value
Some(l @ 0...0x7f) => Ok(try!(f(&[l]))),
// 0-55 bytes
Some(l @ 0x80...0xb7) => {
let d = &bytes[1..(1 + l as usize - 0x80)];
if l == 0x81 && d[0] < 0x80 {
return Err(DecoderError::RlpInvalidIndirection);
}
Ok(try!(f(d)))
},
// longer than 55 bytes
Some(l @ 0xb8...0xbf) => {
let len_of_len = l as usize - 0xb7;
let begin_of_value = 1 as usize + len_of_len;
let len = try!(usize::from_bytes(&bytes[1..begin_of_value]));
Ok(try!(f(&bytes[begin_of_value..begin_of_value + len])))
}
// we are reading value, not a list!
_ => Err(DecoderError::RlpExpectedToBeData)
}
}
fn as_raw(&self) -> &[u8] {
self.rlp.as_raw()
}
fn as_list(&self) -> Result<Vec<Self>, DecoderError> {
let v: Vec<BasicDecoder<'a>> = self.rlp.iter()
.map(| i | BasicDecoder::new(i))
.collect();
Ok(v)
}
fn as_rlp<'s>(&'s self) -> &'s UntrustedRlp<'s> {
&self.rlp
}
}
impl<T> Decodable for T where T: FromBytes {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
decoder.read_value(| bytes | {
Ok(try!(T::from_bytes(bytes)))
})
}
}
impl<T> Decodable for Vec<T> where T: Decodable {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let decoders = try!(decoder.as_list());
decoders.iter().map(|d| T::decode(d)).collect()
}
}
impl Decodable for Vec<u8> {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
decoder.read_value(| bytes | {
let mut res = vec![];
res.extend(bytes);
Ok(res)
})
}
}
impl<T> Decodable for Option<T> where T: Decodable {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
decoder.read_value(| bytes | {
let res = match bytes.len() {
0 => None,
_ => Some(try!(T::decode(decoder)))
};
Ok(res)
})
}
}
macro_rules! impl_array_decodable {
($index_type:ty, $len:expr ) => (
impl<T> Decodable for [T; $len] where T: Decodable {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let decoders = try!(decoder.as_list());
let mut result: [T; $len] = unsafe { ::std::mem::uninitialized() };
if decoders.len() != $len {
return Err(DecoderError::RlpIncorrectListLen);
}
for i in 0..decoders.len() {
result[i] = try!(T::decode(&decoders[i]));
}
Ok(result)
}
}
)
}
macro_rules! impl_array_decodable_recursive {
($index_type:ty, ) => ();
($index_type:ty, $len:expr, $($more:expr,)*) => (
impl_array_decodable!($index_type, $len);
impl_array_decodable_recursive!($index_type, $($more,)*);
);
}
impl_array_decodable_recursive!(
u8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 40, 48, 56, 64, 72, 96, 128, 160, 192, 224,
);
#[test]
fn test_rlp_display() {
use rustc_serialize::hex::FromHex;
let data = "f84d0589010efbef67941f79b2a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470".from_hex().unwrap();
let rlp = UntrustedRlp::new(&data);
assert_eq!(format!("{}", rlp), "[\"0x05\", \"0x010efbef67941f79b2\", \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\", \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"]");
}

29
src/semantic_version.rs Normal file
View File

@ -0,0 +1,29 @@
/// A version value with strict meaning. Use `to_u32` to convert to a simple integer.
///
/// # Example
/// ```
/// extern crate ethcore_util as util;
/// use util::semantic_version::*;
///
/// fn main() {
/// assert_eq!(SemanticVersion::new(1, 2, 3).as_u32(), 0x010203);
/// }
/// ```
pub struct SemanticVersion {
/// Major version - API/feature removals & breaking changes.
pub major: u8,
/// Minor version - API/feature additions.
pub minor: u8,
/// Tiny version - bug fixes.
pub tiny: u8,
}
impl SemanticVersion {
/// Create a new object.
pub fn new(major: u8, minor: u8, tiny: u8) -> SemanticVersion { SemanticVersion{major: major, minor: minor, tiny: tiny} }
/// Convert to a `u32` representation.
pub fn as_u32(&self) -> u32 { ((self.major as u32) << 16) + ((self.minor as u32) << 8) + self.tiny as u32 }
}
// TODO: implement Eq, Comparison and Debug/Display for SemanticVersion.

59
src/sha3.rs Normal file
View File

@ -0,0 +1,59 @@
//! Wrapper around tiny-keccak crate.
use std::mem::uninitialized;
use bytes::{BytesConvertable, Populatable};
use hash::{H256, FixedHash};
pub const SHA3_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] );
extern {
fn sha3_256(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) -> i32;
}
/// Types implementing this trait are sha3able.
///
/// ```
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::sha3::*;
/// use util::hash::*;
///
/// fn main() {
/// assert_eq!([0u8; 0].sha3(), H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap());
/// }
/// ```
pub trait Hashable {
/// Calculate SHA3 of this object.
fn sha3(&self) -> H256;
/// Calculate SHA3 of this object and place result into dest.
fn sha3_into(&self, dest: &mut [u8]) {
self.sha3().copy_to(dest);
}
}
impl<T> Hashable for T where T: BytesConvertable {
fn sha3(&self) -> H256 {
unsafe {
let mut ret: H256 = uninitialized();
self.sha3_into(ret.as_slice_mut());
ret
}
}
fn sha3_into(&self, dest: &mut [u8]) {
unsafe {
let input: &[u8] = self.bytes();
sha3_256(dest.as_mut_ptr(), dest.len(), input.as_ptr(), input.len());
}
}
}
#[test]
fn sha3_empty() {
assert_eq!([0u8; 0].sha3(), SHA3_EMPTY);
}
#[test]
fn sha3_as() {
assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8"));
}

67
src/squeeze.rs Normal file
View File

@ -0,0 +1,67 @@
//! Helper module that should be used to randomly squeeze
//! caches to a given size in bytes
//!
//! ```
//! extern crate heapsize;
//! extern crate ethcore_util as util;
//! use std::collections::HashMap;
//! use std::mem::size_of;
//! use heapsize::HeapSizeOf;
//! use util::squeeze::Squeeze;
//!
//! fn main() {
//! let initial_size = 60;
//! let mut map: HashMap<u8, u8> = HashMap::with_capacity(initial_size);
//! assert!(map.capacity() >= initial_size);
//! for i in 0..initial_size {
//! map.insert(i as u8, i as u8);
//! }
//!
//! assert_eq!(map.heap_size_of_children(), map.capacity() * 2 * size_of::<u8>());
//! assert_eq!(map.len(), initial_size);
//! let initial_heap_size = map.heap_size_of_children();
//!
//! // squeeze it to size of key and value
//! map.squeeze(2 * size_of::<u8>());
//! assert_eq!(map.len(), 1);
//!
//! // its likely that heap size was reduced, but we can't be 100% sure
//! assert!(initial_heap_size >= map.heap_size_of_children());
//! }
//! ```
use std::collections::HashMap;
use std::hash::Hash;
use heapsize::HeapSizeOf;
/// Should be used to squeeze collections to certain size in bytes
pub trait Squeeze {
fn squeeze(&mut self, size: usize);
}
impl<K, T> Squeeze for HashMap<K, T> where K: Eq + Hash + Clone + HeapSizeOf, T: HeapSizeOf {
fn squeeze(&mut self, size: usize) {
if self.len() == 0 {
return
}
let size_of_entry = self.heap_size_of_children() / self.capacity();
let all_entries = size_of_entry * self.len();
let mut shrinked_size = all_entries;
while self.len() > 0 && shrinked_size > size {
// could be optimized
let key = self.keys().next().unwrap().clone();
self.remove(&key);
shrinked_size -= size_of_entry;
}
self.shrink_to_fit();
// if we squeezed something, but not enough, squeeze again
if all_entries != shrinked_size && self.heap_size_of_children() > size {
self.squeeze(size);
}
}
}

29
src/standard.rs Normal file
View File

@ -0,0 +1,29 @@
pub use std::io;
pub use std::str;
pub use std::fmt;
pub use std::slice;
pub use std::cmp;
pub use std::ptr;
pub use std::result;
pub use std::option;
pub use std::mem;
pub use std::ops;
pub use std::path::Path;
pub use std::str::{FromStr};
pub use std::io::{Read,Write};
pub use std::hash::{Hash, Hasher};
pub use std::error::Error as StdError;
pub use std::sync::*;
pub use std::ops::*;
pub use std::cmp::*;
pub use std::cell::*;
pub use std::collections::*;
pub use rustc_serialize::json::Json;
pub use rustc_serialize::base64::FromBase64;
pub use rustc_serialize::hex::{FromHex, FromHexError};
pub use heapsize::HeapSizeOf;
pub use itertools::Itertools;

177
src/tinykeccak.c Normal file
View File

@ -0,0 +1,177 @@
#include <stdint.h>
#include <string.h>
/** libkeccak-tiny
*
* A single-file implementation of SHA-3 and SHAKE.
*
* Implementor: David Leon Gil
* License: CC0, attribution kindly requested. Blame taken too,
* but not liability.
*/
#define decshake(bits) \
int shake##bits(uint8_t*, size_t, const uint8_t*, size_t);
#define decsha3(bits) \
int sha3_##bits(uint8_t*, size_t, const uint8_t*, size_t);
decshake(128)
decshake(256)
decsha3(224)
decsha3(256)
decsha3(384)
decsha3(512)
/******** The Keccak-f[1600] permutation ********/
/*** Constants. ***/
static const uint8_t rho[24] = \
{ 1, 3, 6, 10, 15, 21,
28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43,
62, 18, 39, 61, 20, 44};
static const uint8_t pi[24] = \
{10, 7, 11, 17, 18, 3,
5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2,
20, 14, 22, 9, 6, 1};
static const uint64_t RC[24] = \
{1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
/*** Helper macros to unroll the permutation. ***/
#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
#define REPEAT6(e) e e e e e e
#define REPEAT24(e) REPEAT6(e e e e)
#define REPEAT5(e) e e e e e
#define FOR5(v, s, e) \
v = 0; \
REPEAT5(e; v += s;)
/*** Keccak-f[1600] ***/
static inline void keccakf(void* state) {
uint64_t* a = (uint64_t*)state;
uint64_t b[5] = {0};
uint64_t t = 0;
uint8_t x, y;
int i;
for (i = 0; i < 24; i++) {
// Theta
FOR5(x, 1,
b[x] = 0;
FOR5(y, 5,
b[x] ^= a[x + y]; ))
FOR5(x, 1,
FOR5(y, 5,
a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
// Rho and pi
t = a[1];
x = 0;
REPEAT24(b[0] = a[pi[x]];
a[pi[x]] = rol(t, rho[x]);
t = b[0];
x++; )
// Chi
FOR5(y,
5,
FOR5(x, 1,
b[x] = a[y + x];)
FOR5(x, 1,
a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
// Iota
a[0] ^= RC[i];
}
}
/******** The FIPS202-defined functions. ********/
/*** Some helper macros. ***/
#define _(S) do { S } while (0)
#define FOR(i, ST, L, S) \
_({size_t i; for (i = 0; i < L; i += ST) { S; }})
#define mkapply_ds(NAME, S) \
static inline void NAME(uint8_t* dst, \
const uint8_t* src, \
size_t len) { \
FOR(i, 1, len, S); \
}
#define mkapply_sd(NAME, S) \
static inline void NAME(const uint8_t* src, \
uint8_t* dst, \
size_t len) { \
FOR(i, 1, len, S); \
}
mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
mkapply_sd(setout, dst[i] = src[i]) // setout
#define P keccakf
#define Plen 200
// Fold P*F over the full blocks of an input.
#define foldP(I, L, F) \
while (L >= rate) { \
F(a, I, rate); \
P(a); \
I += rate; \
L -= rate; \
}
/** The sponge-based hash construction. **/
static inline int hash(uint8_t* out, size_t outlen,
const uint8_t* in, size_t inlen,
size_t rate, uint8_t delim) {
if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
return -1;
}
uint8_t a[Plen] = {0};
// Absorb input.
foldP(in, inlen, xorin);
// Xor in the DS and pad frame.
a[inlen] ^= delim;
a[rate - 1] ^= 0x80;
// Xor in the last block.
xorin(a, in, inlen);
// Apply P
P(a);
// Squeeze output.
foldP(out, outlen, setout);
setout(a, out, outlen);
memset(a, 0, 200);
return 0;
}
/*** Helper macros to define SHA3 and SHAKE instances. ***/
#define defshake(bits) \
int shake##bits(uint8_t* out, size_t outlen, \
const uint8_t* in, size_t inlen) { \
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x1f); \
}
#define defsha3(bits) \
int sha3_##bits(uint8_t* out, size_t outlen, \
const uint8_t* in, size_t inlen) { \
if (outlen > (bits/8)) { \
return -1; \
} \
return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x01); \
}
/*** FIPS202 SHAKE VOFs ***/
defshake(128)
defshake(256)
/*** FIPS202 SHA3 FOFs ***/
defsha3(224)
defsha3(256)
defsha3(384)
defsha3(512)

80
src/trie/journal.rs Normal file
View File

@ -0,0 +1,80 @@
use sha3::*;
use hash::H256;
use bytes::*;
use rlp::*;
use hashdb::*;
/// Type of operation for the backing database - either a new node or a node deletion.
#[derive(Debug)]
enum Operation {
New(H256, Bytes),
Delete(H256),
}
/// How many insertions and removals were done in an `apply` operation.
pub struct Score {
/// Number of insertions.
pub inserts: usize,
/// Number of removals.
pub removes: usize,
}
/// A journal of operations on the backing database.
#[derive(Debug)]
pub struct Journal (Vec<Operation>);
impl Journal {
/// Create a new, empty, object.
pub fn new() -> Journal { Journal(vec![]) }
/// Given the RLP that encodes a node, append a reference to that node `out` and leave `journal`
/// such that the reference is valid, once applied.
pub fn new_node(&mut self, rlp: Bytes, out: &mut RlpStream) {
if rlp.len() >= 32 {
let rlp_sha3 = rlp.sha3();
trace!("new_node: reference node {:?} => {:?}", rlp_sha3, rlp.pretty());
out.append(&rlp_sha3);
self.0.push(Operation::New(rlp_sha3, rlp));
}
else {
trace!("new_node: inline node {:?}", rlp.pretty());
out.append_raw(&rlp, 1);
}
}
/// Given the RLP that encodes a now-unused node, leave `journal` in such a state that it is noted.
pub fn delete_node_sha3(&mut self, old_sha3: H256) {
trace!("delete_node: {:?}", old_sha3);
self.0.push(Operation::Delete(old_sha3));
}
/// Register an RLP-encoded node for deletion (given a slice), if it needs to be deleted.
pub fn delete_node(&mut self, old: &[u8]) {
let r = Rlp::new(old);
if r.is_data() && r.size() == 32 {
self.delete_node_sha3(r.as_val());
}
}
/// Apply this journal to the HashDB `db` and return the number of insertions and removals done.
pub fn apply(self, db: &mut HashDB) -> Score {
trace!("applying {:?} changes", self.0.len());
let mut ret = Score{inserts: 0, removes: 0};
for d in self.0.into_iter() {
match d {
Operation::Delete(h) => {
trace!("TrieDBMut::apply --- {:?}", &h);
db.remove(&h);
ret.removes += 1;
},
Operation::New(h, d) => {
trace!("TrieDBMut::apply +++ {:?} -> {:?}", &h, d.pretty());
db.emplace(h, d);
ret.inserts += 1;
}
}
}
ret
}
}

15
src/trie/mod.rs Normal file
View File

@ -0,0 +1,15 @@
pub mod trietraits;
pub mod standardmap;
pub mod journal;
pub mod node;
pub mod triedb;
pub mod triedbmut;
pub mod sectriedb;
pub mod sectriedbmut;
pub use self::trietraits::*;
pub use self::standardmap::*;
pub use self::triedbmut::*;
pub use self::triedb::*;
pub use self::sectriedbmut::*;
pub use self::sectriedb::*;

121
src/trie/node.rs Normal file
View File

@ -0,0 +1,121 @@
use hash::*;
use nibbleslice::*;
use bytes::*;
use rlp::*;
use super::journal::*;
/// Type of node in the trie and essential information thereof.
#[derive(Eq, PartialEq, Debug)]
pub enum Node<'a> {
Empty,
Leaf(NibbleSlice<'a>, &'a[u8]),
Extension(NibbleSlice<'a>, &'a[u8]),
Branch([&'a[u8]; 16], Option<&'a [u8]>)
}
impl<'a> Node<'a> {
/// Decode the `node_rlp` and return the Node.
pub fn decoded(node_rlp: &'a [u8]) -> Node<'a> {
let r = Rlp::new(node_rlp);
match r.prototype() {
// either leaf or extension - decode first item with NibbleSlice::???
// and use is_leaf return to figure out which.
// if leaf, second item is a value (is_data())
// if extension, second item is a node (either SHA3 to be looked up and
// fed back into this function or inline RLP which can be fed back into this function).
Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0).data()) {
(slice, true) => Node::Leaf(slice, r.at(1).data()),
(slice, false) => Node::Extension(slice, r.at(1).as_raw()),
},
// branch - first 16 are nodes, 17th is a value (or empty).
Prototype::List(17) => {
let mut nodes: [&'a [u8]; 16] = unsafe { ::std::mem::uninitialized() };
for i in 0..16 {
nodes[i] = r.at(i).as_raw();
}
Node::Branch(nodes, if r.at(16).is_empty() { None } else { Some(r.at(16).data()) })
},
// an empty branch index.
Prototype::Data(0) => Node::Empty,
// something went wrong.
_ => panic!("Rlp is not valid.")
}
}
/// Encode the node into RLP.
///
/// Will always return the direct node RLP even if it's 32 or more bytes. To get the
/// RLP which would be valid for using in another node, use `encoded_and_added()`.
pub fn encoded(&self) -> Bytes {
match *self {
Node::Leaf(ref slice, ref value) => {
let mut stream = RlpStream::new_list(2);
stream.append(&slice.encoded(true));
stream.append(value);
stream.out()
},
Node::Extension(ref slice, ref raw_rlp) => {
let mut stream = RlpStream::new_list(2);
stream.append(&slice.encoded(false));
stream.append_raw(raw_rlp, 1);
stream.out()
},
Node::Branch(ref nodes, ref value) => {
let mut stream = RlpStream::new_list(17);
for i in 0..16 {
stream.append_raw(nodes[i], 1);
}
match *value {
Some(n) => { stream.append(&n); },
None => { stream.append_empty_data(); },
}
stream.out()
},
Node::Empty => {
let mut stream = RlpStream::new();
stream.append_empty_data();
stream.out()
}
}
}
/// Encode the node, adding it to `journal` if necessary and return the RLP valid for
/// insertion into a parent node.
pub fn encoded_and_added(&self, journal: &mut Journal) -> Bytes {
let mut stream = RlpStream::new();
match *self {
Node::Leaf(ref slice, ref value) => {
stream.append_list(2);
stream.append(&slice.encoded(true));
stream.append(value);
},
Node::Extension(ref slice, ref raw_rlp) => {
stream.append_list(2);
stream.append(&slice.encoded(false));
stream.append_raw(raw_rlp, 1);
},
Node::Branch(ref nodes, ref value) => {
stream.append_list(17);
for i in 0..16 {
stream.append_raw(nodes[i], 1);
}
match *value {
Some(n) => { stream.append(&n); },
None => { stream.append_empty_data(); },
}
},
Node::Empty => {
stream.append_empty_data();
}
}
let node = stream.out();
match node.len() {
0 ... 31 => node,
_ => {
let mut stream = RlpStream::new();
journal.new_node(node, &mut stream);
stream.out()
}
}
}
}

59
src/trie/sectriedb.rs Normal file
View File

@ -0,0 +1,59 @@
use hash::*;
use sha3::*;
use hashdb::*;
use rlp::*;
use super::triedb::*;
use super::trietraits::*;
/// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
///
/// Use it as a `Trie` trait object. You can use `raw()` to get the backing TrieDB object.
pub struct SecTrieDB<'db> {
raw: TrieDB<'db>
}
impl<'db> SecTrieDB<'db> {
/// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block.
/// This guarantees the trie is built correctly.
pub fn new(db: &'db HashDB, root: &'db H256) -> Self {
SecTrieDB { raw: TrieDB::new(db, root) }
}
/// Get a reference to the underlying raw TrieDB struct.
pub fn raw(&self) -> &TrieDB {
&self.raw
}
/// Get a mutable reference to the underlying raw TrieDB struct.
pub fn raw_mut(&mut self) -> &TrieDB {
&mut self.raw
}
}
impl<'db> Trie for SecTrieDB<'db> {
fn root(&self) -> &H256 { self.raw.root() }
fn contains(&self, key: &[u8]) -> bool {
self.raw.contains(&key.sha3())
}
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
self.raw.get(&key.sha3())
}
}
#[test]
fn trie_to_sectrie() {
use memorydb::*;
use super::triedbmut::*;
let mut memdb = MemoryDB::new();
let mut root = H256::new();
{
let mut t = TrieDBMut::new(&mut memdb, &mut root);
t.insert(&(&[0x01u8, 0x23]).sha3(), &[0x01u8, 0x23]);
}
let t = SecTrieDB::new(&memdb, &root);
assert_eq!(t.get(&[0x01u8, 0x23]).unwrap(), &[0x01u8, 0x23]);
}

65
src/trie/sectriedbmut.rs Normal file
View File

@ -0,0 +1,65 @@
use hash::*;
use sha3::*;
use hashdb::*;
use rlp::*;
use super::triedbmut::*;
use super::trietraits::*;
/// A mutable `Trie` implementation which hashes keys and uses a generic `HashDB` backing database.
///
/// Use it as a `Trie` or `TrieMut` trait object. You can use `raw()` to get the backing TrieDBMut object.
pub struct SecTrieDBMut<'db> {
raw: TrieDBMut<'db>
}
impl<'db> SecTrieDBMut<'db> {
/// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block.
/// This guarantees the trie is built correctly.
pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self {
SecTrieDBMut { raw: TrieDBMut::new(db, root) }
}
/// Create a new trie with the backing database `db` and `root`
/// Panics, if `root` does not exist
pub fn from_existing(db: &'db mut HashDB, root: &'db mut H256) -> Self {
SecTrieDBMut { raw: TrieDBMut::from_existing(db, root) }
}
}
impl<'db> Trie for SecTrieDBMut<'db> {
fn root(&self) -> &H256 { self.raw.root() }
fn contains(&self, key: &[u8]) -> bool {
self.raw.contains(&key.sha3())
}
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
self.raw.get(&key.sha3())
}
}
impl<'db> TrieMut for SecTrieDBMut<'db> {
fn insert(&mut self, key: &[u8], value: &[u8]) {
self.raw.insert(&key.sha3(), value);
}
fn remove(&mut self, key: &[u8]) {
self.raw.remove(&key.sha3());
}
}
#[test]
fn sectrie_to_trie() {
use memorydb::*;
use super::triedb::*;
let mut memdb = MemoryDB::new();
let mut root = H256::new();
{
let mut t = SecTrieDBMut::new(&mut memdb, &mut root);
t.insert(&[0x01u8, 0x23], &[0x01u8, 0x23]);
}
let t = TrieDB::new(&memdb, &root);
assert_eq!(t.get(&(&[0x01u8, 0x23]).sha3()).unwrap(), &[0x01u8, 0x23]);
}

75
src/trie/standardmap.rs Normal file
View File

@ -0,0 +1,75 @@
//! Key-value datastore with a modified Merkle tree.
extern crate rand;
use bytes::*;
use sha3::*;
use hash::*;
/// Alphabet to use when creating words for insertion into tries.
pub enum Alphabet {
All,
Low,
Mid,
Custom(Bytes),
}
/// Standard test map for profiling tries.
pub struct StandardMap {
alphabet: Alphabet,
min_key: usize,
journal_key: usize,
count: usize,
}
impl StandardMap {
/// Get a bunch of random bytes, at least `min_count` bytes, at most `min_count` + `journal_count` bytes.
/// `seed` is mutated pseudoramdonly and used.
fn random_bytes(min_count: usize, journal_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + journal_count <= 32);
*seed = seed.sha3();
let r = min_count + (seed.bytes()[31] as usize % (journal_count + 1));
seed.bytes()[0..r].to_vec()
}
/// Get a random value. Equal chance of being 1 byte as of 32. `seed` is mutated pseudoramdonly and used.
fn random_value(seed: &mut H256) -> Bytes {
*seed = seed.sha3();
match seed.bytes()[0] % 2 {
1 => vec![seed.bytes()[31];1],
_ => seed.bytes().to_vec(),
}
}
/// Get a random word of, at least `min_count` bytes, at most `min_count` + `journal_count` bytes.
/// Each byte is an item from `alphabet`. `seed` is mutated pseudoramdonly and used.
fn random_word(alphabet: &[u8], min_count: usize, journal_count: usize, seed: &mut H256) -> Vec<u8> {
assert!(min_count + journal_count <= 32);
*seed = seed.sha3();
let r = min_count + (seed.bytes()[31] as usize % (journal_count + 1));
let mut ret: Vec<u8> = Vec::with_capacity(r);
for i in 0..r {
ret.push(alphabet[seed.bytes()[i] as usize % alphabet.len()]);
}
ret
}
/// Create the standard map (set of keys and values) for the object's fields.
pub fn make(&self) -> Vec<(Bytes, Bytes)> {
let low = b"abcdef";
let mid = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_";
let mut d: Vec<(Bytes, Bytes)> = Vec::new();
let mut seed = H256::new();
for _ in 0..self.count {
let k = match self.alphabet {
Alphabet::All => Self::random_bytes(self.min_key, self.journal_key, &mut seed),
Alphabet::Low => Self::random_word(low, self.min_key, self.journal_key, &mut seed),
Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, &mut seed),
Alphabet::Custom(ref a) => Self::random_word(&a, self.min_key, self.journal_key, &mut seed),
};
let v = Self::random_value(&mut seed);
d.push((k, v))
}
d
}
}

220
src/trie/triedb.rs Normal file
View File

@ -0,0 +1,220 @@
use common::*;
use hashdb::*;
use nibbleslice::*;
use rlp::*;
use super::trietraits::*;
use super::node::*;
/// A `Trie` implementation using a generic `HashDB` backing database.
///
/// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys`
/// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get
/// which items in the backing database do not belong to this trie. If this is the only trie in the
/// backing database, then `db_items_remaining()` should be empty.
///
/// # Example
/// ```
/// extern crate ethcore_util as util;
/// use util::trie::*;
/// use util::hashdb::*;
/// use util::memorydb::*;
/// use util::hash::*;
/// use util::rlp::*;
///
/// fn main() {
/// let mut memdb = MemoryDB::new();
/// let mut root = H256::new();
/// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar");
/// let t = TrieDB::new(&memdb, &root);
/// assert!(t.contains(b"foo"));
/// assert_eq!(t.get(b"foo").unwrap(), b"bar");
/// assert!(t.db_items_remaining().is_empty());
/// }
/// ```
pub struct TrieDB<'db> {
db: &'db HashDB,
root: &'db H256,
pub hash_count: usize,
}
impl<'db> TrieDB<'db> {
/// Create a new trie with the backing database `db` and `root`
/// Panics, if `root` does not exist
pub fn new(db: &'db HashDB, root: &'db H256) -> Self {
if !db.exists(root) {
flush(format!("Trie root not found {}", root));
panic!("Trie root not found!");
}
TrieDB {
db: db,
root: root,
hash_count: 0
}
}
/// Get the backing database.
pub fn db(&'db self) -> &'db HashDB {
self.db
}
/// Determine all the keys in the backing database that belong to the trie.
pub fn keys(&self) -> Vec<H256> {
let mut ret: Vec<H256> = Vec::new();
ret.push(self.root.clone());
self.accumulate_keys(self.root_node(), &mut ret);
ret
}
/// Convert a vector of hashes to a hashmap of hash to occurances.
pub fn to_map(hashes: Vec<H256>) -> HashMap<H256, u32> {
let mut r: HashMap<H256, u32> = HashMap::new();
for h in hashes.into_iter() {
let c = *r.get(&h).unwrap_or(&0);
r.insert(h, c + 1);
}
r
}
/// Determine occurances of items in the backing database which are not related to this
/// trie.
pub fn db_items_remaining(&self) -> HashMap<H256, i32> {
let mut ret = self.db.keys();
for (k, v) in Self::to_map(self.keys()).into_iter() {
let keycount = *ret.get(&k).unwrap_or(&0);
match keycount <= v as i32 {
true => ret.remove(&k),
_ => ret.insert(k, keycount - v as i32),
};
}
ret
}
/// Recursion helper for `keys`.
fn accumulate_keys(&self, node: Node, acc: &mut Vec<H256>) {
let mut handle_payload = |payload| {
let p = Rlp::new(payload);
if p.is_data() && p.size() == 32 {
acc.push(p.as_val());
}
self.accumulate_keys(self.get_node(payload), acc);
};
match node {
Node::Extension(_, payload) => handle_payload(payload),
Node::Branch(payloads, _) => for payload in payloads.iter() { handle_payload(payload) },
_ => {},
}
}
/// Get the root node's RLP.
fn root_node(&self) -> Node {
Node::decoded(self.db.lookup(&self.root).expect("Trie root not found!"))
}
/// Get the root node as a `Node`.
fn get_node<'a>(&'a self, node: &'a [u8]) -> Node {
Node::decoded(self.get_raw_or_lookup(node))
}
/// Indentation helper for `formal_all`.
fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result {
for _ in 0..size {
try!(write!(f, " "));
}
Ok(())
}
/// Recursion helper for implementation of formatting trait.
fn fmt_all(&self, node: Node, f: &mut fmt::Formatter, deepness: usize) -> fmt::Result {
match node {
Node::Leaf(slice, value) => try!(writeln!(f, "'{:?}: {:?}.", slice, value.pretty())),
Node::Extension(ref slice, ref item) => {
try!(write!(f, "'{:?} ", slice));
try!(self.fmt_all(self.get_node(item), f, deepness));
},
Node::Branch(ref nodes, ref value) => {
try!(writeln!(f, ""));
match value {
&Some(v) => {
try!(self.fmt_indent(f, deepness + 1));
try!(writeln!(f, "=: {:?}", v.pretty()))
},
&None => {}
}
for i in 0..16 {
match self.get_node(nodes[i]) {
Node::Empty => {},
n => {
try!(self.fmt_indent(f, deepness + 1));
try!(write!(f, "'{:x} ", i));
try!(self.fmt_all(n, f, deepness + 1));
}
}
}
},
// empty
Node::Empty => {
try!(writeln!(f, "<empty>"));
}
};
Ok(())
}
/// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists.
fn do_lookup<'a, 'key>(&'a self, key: &NibbleSlice<'key>) -> Option<&'a [u8]> where 'a: 'key {
let root_rlp = self.db.lookup(&self.root).expect("Trie root not found!");
self.get_from_node(&root_rlp, key)
}
/// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no
/// value exists for the key.
///
/// Note: Not a public API; use Trie trait functions.
fn get_from_node<'a, 'key>(&'a self, node: &'a [u8], key: &NibbleSlice<'key>) -> Option<&'a [u8]> where 'a: 'key {
match Node::decoded(node) {
Node::Leaf(ref slice, ref value) if key == slice => Some(value),
Node::Extension(ref slice, ref item) if key.starts_with(slice) => {
self.get_from_node(self.get_raw_or_lookup(item), &key.mid(slice.len()))
},
Node::Branch(ref nodes, value) => match key.is_empty() {
true => value,
false => self.get_from_node(self.get_raw_or_lookup(nodes[key.at(0) as usize]), &key.mid(1))
},
_ => None
}
}
/// Given some node-describing data `node`, return the actual node RLP.
/// This could be a simple identity operation in the case that the node is sufficiently small, but
/// may require a database lookup.
fn get_raw_or_lookup<'a>(&'a self, node: &'a [u8]) -> &'a [u8] {
// check if its sha3 + len
let r = Rlp::new(node);
match r.is_data() && r.size() == 32 {
true => self.db.lookup(&r.as_val::<H256>()).expect("Not found!"),
false => node
}
}
}
impl<'db> Trie for TrieDB<'db> {
fn root(&self) -> &H256 { &self.root }
fn contains(&self, key: &[u8]) -> bool {
self.get(key).is_some()
}
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key {
self.do_lookup(&NibbleSlice::new(key))
}
}
impl<'db> fmt::Debug for TrieDB<'db> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(f, "c={:?} [", self.hash_count));
let root_rlp = self.db.lookup(&self.root).expect("Trie root not found!");
try!(self.fmt_all(Node::decoded(root_rlp), f, 0));
writeln!(f, "]")
}
}

1100
src/trie/triedbmut.rs Normal file

File diff suppressed because it is too large Load Diff

29
src/trie/trietraits.rs Normal file
View File

@ -0,0 +1,29 @@
use hash::H256;
use rlp::SHA3_NULL_RLP;
/// A key-value datastore implemented as a database-backed modified Merkle tree.
pub trait Trie {
/// Return the root of the trie.
fn root(&self) -> &H256;
/// Is the trie empty?
fn is_empty(&self) -> bool { *self.root() == SHA3_NULL_RLP }
/// Does the trie contain a given key?
fn contains(&self, key: &[u8]) -> bool;
/// What is the value of the given key in this trie?
fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Option<&'a [u8]> where 'a: 'key;
}
/// A key-value datastore implemented as a database-backed modified Merkle tree.
pub trait TrieMut: Trie {
/// Insert a `key`/`value` pair into the trie. An `empty` value is equivalent to removing
/// `key` from the trie.
fn insert(&mut self, key: &[u8], value: &[u8]);
/// Remove a `key` from the trie. Equivalent to making it equal to the empty
/// value.
fn remove(&mut self, key: &[u8]);
}

337
src/triehash.rs Normal file
View File

@ -0,0 +1,337 @@
//! Generetes trie root.
//!
//! This module should be used to generate trie root hash.
use std::collections::BTreeMap;
use std::cmp;
use hash::*;
use sha3::*;
use rlp;
use rlp::{RlpStream, Stream};
use vector::SharedPrefix;
/// Generates a trie root hash for a vector of values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![From::from("doe"), From::from("reindeer")];
/// let root = "e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3";
/// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn ordered_trie_root(input: Vec<Vec<u8>>) -> H256 {
let gen_input = input
// first put elements into btree to sort them by nibbles
// optimize it later
.into_iter()
.enumerate()
.fold(BTreeMap::new(), | mut acc, (i, vec) | { acc.insert(rlp::encode(&i), vec); acc })
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a trie root hash for a vector of key-values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3";
/// assert_eq!(trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.fold(BTreeMap::new(), | mut acc, (k, v) | {
acc.insert(k, v);
acc
})
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a key-hashed (secure) trie root hash for a vector of key-values.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585";
/// assert_eq!(sec_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn sec_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.fold(BTreeMap::new(), | mut acc, (k, v) | {
acc.insert(k.sha3().to_vec(), v);
acc
})
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
fn gen_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let mut stream = RlpStream::new();
hash256rlp(&input, 0, &mut stream);
stream.out().sha3()
}
/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1.
///
/// The "termination marker" and "leaf-node" specifier are completely equivalent.
///
/// Input values are in range `[0, 0xf]`.
///
/// ```markdown
/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4
/// [0,1,2,3,4,5] 0x00012345 // 6 > 4
/// [1,2,3,4,5] 0x112345 // 5 > 3
/// [0,0,1,2,3,4] 0x00001234 // 6 > 3
/// [0,1,2,3,4] 0x101234 // 5 > 3
/// [1,2,3,4] 0x001234 // 4 > 3
/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4
/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4
/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4
/// [1,2,3,4,5,T] 0x312345 // 5 > 3
/// [1,2,3,4,T] 0x201234 // 4 > 3
/// ```
fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
let inlen = nibbles.len();
let oddness_factor = inlen % 2;
// next even number divided by two
let reslen = (inlen + 2) >> 1;
let mut res = vec![];
res.reserve(reslen);
let first_byte = {
let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4;
if oddness_factor == 1 {
bits += nibbles[0];
}
bits
};
res.push(first_byte);
let mut offset = oddness_factor;
while offset < inlen {
let byte = (nibbles[offset] << 4) + nibbles[offset + 1];
res.push(byte);
offset += 2;
}
res
}
/// Converts slice of bytes to nibbles.
fn as_nibbles(bytes: &[u8]) -> Vec<u8> {
let mut res = vec![];
res.reserve(bytes.len() * 2);
for i in 0..bytes.len() {
res.push(bytes[i] >> 4);
res.push((bytes[i] << 4) >> 4);
}
res
}
fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let inlen = input.len();
// in case of empty slice, just append empty data
if inlen == 0 {
stream.append_empty_data();
return;
}
// take slices
let key: &Vec<u8> = &input[0].0;
let value: &[u8] = &input[0].1;
// if the slice contains just one item, append the suffix of the key
// and then append value
if inlen == 1 {
stream.append_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..], true));
stream.append(&value);
return;
}
// get length of the longest shared prefix in slice keys
let shared_prefix = input.iter()
// skip first element
.skip(1)
// get minimum number of shared nibbles between first and each successive
.fold(key.len(), | acc, &(ref k, _) | {
cmp::min(key.shared_prefix_len(&k), acc)
});
// if shared prefix is higher than current prefix append its
// new part of the key to the stream
// then recursively append suffixes of all items who had this key
if shared_prefix > pre_len {
stream.append_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..shared_prefix], false));
hash256aux(input, shared_prefix, stream);
return;
}
// an item for every possible nibble/suffix
// + 1 for data
stream.append_list(17);
// if first key len is equal to prefix_len, move to next element
let mut begin = match pre_len == key.len() {
true => 1,
false => 0
};
// iterate over all possible nibbles
for i in 0..16 {
// cout how many successive elements have same next nibble
let len = match begin < input.len() {
true => input[begin..].iter()
.take_while(| pair | pair.0[pre_len] == i )
.count(),
false => 0
};
// if at least 1 successive element has the same nibble
// append their suffixes
match len {
0 => { stream.append_empty_data(); },
_ => hash256aux(&input[begin..(begin + len)], pre_len + 1, stream)
}
begin += len;
}
// if fist key len is equal prefix, append it's value
match pre_len == key.len() {
true => { stream.append(&value); },
false => { stream.append_empty_data(); }
};
}
fn hash256aux(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let mut s = RlpStream::new();
hash256rlp(input, pre_len, &mut s);
let out = s.out();
match out.len() {
0...31 => stream.append_raw(&out, 1),
_ => stream.append(&out.sha3())
};
}
#[test]
fn test_nibbles() {
let v = vec![0x31, 0x23, 0x45];
let e = vec![3, 1, 2, 3, 4, 5];
assert_eq!(as_nibbles(&v), e);
// A => 65 => 0x41 => [4, 1]
let v: Vec<u8> = From::from("A");
let e = vec![4, 1];
assert_eq!(as_nibbles(&v), e);
}
#[test]
fn test_hex_prefix_encode() {
let v = vec![0, 0, 1, 2, 3, 4, 5];
let e = vec![0x10, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x00, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x20, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4, 5];
let e = vec![0x31, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4];
let e = vec![0x00, 0x12, 0x34];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![4, 1];
let e = vec![0x20, 0x41];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
}
#[cfg(test)]
mod tests {
extern crate json_tests;
use self::json_tests::*;
use hash::*;
use triehash::*;
#[test]
fn test_triehash_out_of_order() {
assert!(trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
]) ==
trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
]));
}
#[test]
fn test_triehash_json() {
execute_tests_from_directory::<trie::TriehashTest, _>("json-tests/json/trie/*.json", &mut | file, input, output | {
println!("file: {}, output: {:?}", file, output);
assert_eq!(trie_root(input), H256::from_slice(&output));
});
}
}

1362
src/uint.rs Normal file

File diff suppressed because it is too large Load Diff

85
src/vector.rs Normal file
View File

@ -0,0 +1,85 @@
//! vector util functions
use std::ptr;
pub trait InsertSlice<T> {
fn insert_slice(&mut self, index: usize, elements: &[T]);
}
/// based on `insert` function implementation from standard library
impl<T> InsertSlice<T> for Vec<T> {
fn insert_slice(&mut self, index: usize, elements: &[T]) {
let e_len = elements.len();
if e_len == 0 {
return;
}
let len = self.len();
assert!(index <= len);
// space for the new element
self.reserve(e_len);
unsafe {
{
let p = self.as_mut_ptr().offset(index as isize);
let ep = elements.as_ptr().offset(0);
// shift everything by e_len, to make space
ptr::copy(p, p.offset(e_len as isize), len - index);
// write new element
ptr::copy(ep, p, e_len);
}
self.set_len(len + e_len);
}
}
}
/// Returns len of prefix shared with elem
///
/// ```rust
/// extern crate ethcore_util as util;
/// use util::vector::SharedPrefix;
///
/// fn main () {
/// let a = vec![1,2,3,3,5];
/// let b = vec![1,2,3];
/// assert_eq!(a.shared_prefix_len(&b), 3);
/// }
/// ```
pub trait SharedPrefix <T> {
fn shared_prefix_len(&self, elem: &[T]) -> usize;
}
impl <T> SharedPrefix<T> for Vec<T> where T: Eq {
fn shared_prefix_len(&self, elem: &[T]) -> usize {
use std::cmp;
let len = cmp::min(self.len(), elem.len());
(0..len).take_while(|&i| self[i] == elem[i]).count()
}
}
#[cfg(test)]
mod test {
use vector::SharedPrefix;
#[test]
fn test_shared_prefix() {
let a = vec![1,2,3,4,5,6];
let b = vec![4,2,3,4,5,6];
assert_eq!(a.shared_prefix_len(&b), 0);
}
#[test]
fn test_shared_prefix2() {
let a = vec![1,2,3,3,5];
let b = vec![1,2,3];
assert_eq!(a.shared_prefix_len(&b), 3);
}
#[test]
fn test_shared_prefix3() {
let a = vec![1,2,3,4,5,6];
let b = vec![1,2,3,4,5,6];
assert_eq!(a.shared_prefix_len(&b), 6);
}
}