This commit is contained in:
arkpar 2016-02-02 17:00:32 +01:00
commit 81e339a77a
26 changed files with 350 additions and 222 deletions

View File

@ -4,11 +4,13 @@ language: rust
branches: branches:
only: only:
- master - master
- /^beta-.*$/
- /^stable-.*$/
matrix: matrix:
fast_finish: true fast_finish: true
include: include:
- rust: nightly - rust: nightly
env: FEATURES="--features rpc" env: FEATURES="--features ethcore/json-tests" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}"
cache: cache:
apt: true apt: true
directories: directories:
@ -27,30 +29,24 @@ before_script: |
sudo apt-get update && sudo apt-get update &&
sudo apt-get install -y --force-yes librocksdb sudo apt-get install -y --force-yes librocksdb
script: script:
- cargo build --release --verbose - cargo build --release --verbose ${FEATURES}
- cargo test --release -p ethash --verbose - cargo test --release --verbose ${FEATURES} ${TARGETS}
- cargo test --release -p ethcore-util --verbose - cargo bench --no-run ${FEATURES} ${TARGETS}
- cargo test --release -p ethcore --verbose - tar cvzf parity${ARCHIVE_SUFFIX}.tar.gz -C target/release parity
- cargo test --release -p ethcore-rpc --verbose ${FEATURES}
- cargo test --release --verbose ${FEATURES}
- cargo bench --no-run ${FEATURES}
after_success: | after_success: |
wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz && wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
tar xzf master.tar.gz && mkdir kcov-master/build && cd kcov-master/build && cmake .. && make && make install DESTDIR=../tmp && cd ../.. && tar xzf master.tar.gz && mkdir kcov-master/build && cd kcov-master/build && cmake .. && make && make install DESTDIR=../tmp && cd ../.. &&
cargo test --no-run -p ethcore-util && cargo test --no-run ${KCOV_FEATURES} ${TARGETS} &&
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_util-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_util-* &&
cargo test --no-run -p ethash &&
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethash-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethash-* &&
cargo test --no-run -p ethcore --no-default-features &&
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore-* &&
cargo test --no-run -p ethcore-rpc ${FEATURES} && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethsync-* &&
./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_rpc-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/deps/ethcore_rpc-* &&
cargo test --no-run ${FEATURES} &&
./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${COVERALLS_TOKEN} --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/parity-* && ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${COVERALLS_TOKEN} --exclude-pattern /.cargo,/root/.multirust target/kcov target/debug/parity-* &&
[ $TRAVIS_BRANCH = master ] && [ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] && [ $TRAVIS_PULL_REQUEST = false ] &&
[ $TRAVIS_RUST_VERSION = nightly ] && [ $TRAVIS_RUST_VERSION = nightly ] &&
cargo doc ${FEATURES} --no-deps --verbose -p ethcore -p ethcore-util -p ethcore-rpc -p parity -p ethash -p ethsync && cargo doc --no-deps --verbose ${KCOV_FEATURES} ${TARGETS} &&
echo '<meta http-equiv=refresh content=0;url=ethcore/index.html>' > target/doc/index.html && echo '<meta http-equiv=refresh content=0;url=ethcore/index.html>' > target/doc/index.html &&
pip install --user ghp-import && pip install --user ghp-import &&
/home/travis/.local/bin/ghp-import -n target/doc /home/travis/.local/bin/ghp-import -n target/doc
@ -58,5 +54,14 @@ after_success: |
#git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages #git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
env: env:
global: global:
secure: 3sUjNi9mhdL5h1GTm8LONnDN/SYvUHT+WSkMl93h3nYiLCQXk8eZaPS98AS7oOaTsfW4UvnwckVFCFl49ttInsv4cd/TkAxmrJHe6kPyS9/4NWUdmP8BjicbBvL/ioSdXMECMEYzPDLV+I3KhtC2LcB6ceDEl/XwMOJlzbGf7RbtcXGVQgMLqSYY1YKjQA4vbT5nFgIS/sZu3Z9yFgN0GafnihKcizqoHhdJjs/zxmX+qJepnC6o3V6KcFnS7QHhM1JOr85twE6S422UlvNaEb5ovwLPqmOl5+fA+6shbx4AxFTY6E9Iors+OVY/JliFhrqOdCt0i2P1FUHN4kbGZQkf0rphN/ZOI2uKNFTOyXiPvppfo/ZemKmcqkwkqP9+lf5QqYmtE6hsAYagxn49xJZILl8tAYbdqxF5gxa+TEVrfsBFtz/Sv3q8QhKQNPAmjEcKyMatyEreLUIFEpFTGIco8jN4eXeSoLRdJ+Z75ihttfQWhNfUDgNL30iQLy0AgFSsh/cyb5M8y9lxrGDzDTogvaiKGwr/V45sPkcXWCkmOgMdINqBB6ZtdL3bGHdyjmYj+y3btjf3aP11k++BL0fXIaKn25aS/p/9iyGb1FyGCM03o4ZRQ3YhTOvfMRfRGf6nWbaMx9upv8o5ShSdysewhrnh3082r7u896ny1Ho= - secure: 3sUjNi9mhdL5h1GTm8LONnDN/SYvUHT+WSkMl93h3nYiLCQXk8eZaPS98AS7oOaTsfW4UvnwckVFCFl49ttInsv4cd/TkAxmrJHe6kPyS9/4NWUdmP8BjicbBvL/ioSdXMECMEYzPDLV+I3KhtC2LcB6ceDEl/XwMOJlzbGf7RbtcXGVQgMLqSYY1YKjQA4vbT5nFgIS/sZu3Z9yFgN0GafnihKcizqoHhdJjs/zxmX+qJepnC6o3V6KcFnS7QHhM1JOr85twE6S422UlvNaEb5ovwLPqmOl5+fA+6shbx4AxFTY6E9Iors+OVY/JliFhrqOdCt0i2P1FUHN4kbGZQkf0rphN/ZOI2uKNFTOyXiPvppfo/ZemKmcqkwkqP9+lf5QqYmtE6hsAYagxn49xJZILl8tAYbdqxF5gxa+TEVrfsBFtz/Sv3q8QhKQNPAmjEcKyMatyEreLUIFEpFTGIco8jN4eXeSoLRdJ+Z75ihttfQWhNfUDgNL30iQLy0AgFSsh/cyb5M8y9lxrGDzDTogvaiKGwr/V45sPkcXWCkmOgMdINqBB6ZtdL3bGHdyjmYj+y3btjf3aP11k++BL0fXIaKn25aS/p/9iyGb1FyGCM03o4ZRQ3YhTOvfMRfRGf6nWbaMx9upv8o5ShSdysewhrnh3082r7u896ny1Ho=
secure: 0/FeVvFl3AhBW0TCPoujY9zOAYoUNMlAz3XjC04vlc4Ksfx0lGU3KFi97LlALxMWV0lfwQc7ixSe2vTgQVQuLVSU9XEW40fQgEjJlmLca2RcRx1kfzJDypuWSiCME7MWmLPH0ac4COdTDS1z5WGggv5YB7GQPCzFvcmOOaPYtF29ngCtkyB2HmNkY/W3omHFEk7Si6bsmOSHZiOAhivPl6ixnGpFyTEKPyraMMqPIj5rbEGkzgeLTiXf2ur143n/tnSr8tmP1MfQi9yS8/ONidMqnxUeuLkeNnb82zj9pVJhVXq0xF44WXJ8Za1jm0ByiTakgqpm8Juk822qjvtNulJ1XZW/fyZQZaN1dy3uq5Ud3W8wS9M7VIVl8CoXozzDpIsdPeUAtkAxeHBsZqL1vAH2yC1YJA7HPySMYzCjYqkJ2r62xYk0gXmNXphfU+F/X/rHzHsTMJPONJ54HQwu12m7zVlKIYBGHgEXg/HAM/g4ljUzl6WWR/nHH/tQM8ND/8FpHluJSZJWacq/1QNhVdTq2x6cqws2fs5A7nVpccR9+6RRgYgv6+YS2LxvFzByuZveGGoKif+uMECXN876j40araUqU528Yz9i8bHJlnM3coRBndaLNWByLcUyXCB9r9IUosUu41rr+L2mVzkSDm0GicuNCzqvzYQ9Q6QY4uQ= - secure: 0/FeVvFl3AhBW0TCPoujY9zOAYoUNMlAz3XjC04vlc4Ksfx0lGU3KFi97LlALxMWV0lfwQc7ixSe2vTgQVQuLVSU9XEW40fQgEjJlmLca2RcRx1kfzJDypuWSiCME7MWmLPH0ac4COdTDS1z5WGggv5YB7GQPCzFvcmOOaPYtF29ngCtkyB2HmNkY/W3omHFEk7Si6bsmOSHZiOAhivPl6ixnGpFyTEKPyraMMqPIj5rbEGkzgeLTiXf2ur143n/tnSr8tmP1MfQi9yS8/ONidMqnxUeuLkeNnb82zj9pVJhVXq0xF44WXJ8Za1jm0ByiTakgqpm8Juk822qjvtNulJ1XZW/fyZQZaN1dy3uq5Ud3W8wS9M7VIVl8CoXozzDpIsdPeUAtkAxeHBsZqL1vAH2yC1YJA7HPySMYzCjYqkJ2r62xYk0gXmNXphfU+F/X/rHzHsTMJPONJ54HQwu12m7zVlKIYBGHgEXg/HAM/g4ljUzl6WWR/nHH/tQM8ND/8FpHluJSZJWacq/1QNhVdTq2x6cqws2fs5A7nVpccR9+6RRgYgv6+YS2LxvFzByuZveGGoKif+uMECXN876j40araUqU528Yz9i8bHJlnM3coRBndaLNWByLcUyXCB9r9IUosUu41rr+L2mVzkSDm0GicuNCzqvzYQ9Q6QY4uQ=
deploy:
provider: releases
api_key:
secure: ATorsRujvWN9y4bZlUdp2I0hvh3pKkpkrr/oyQyt8ZssE7ORx1+lYgTdYocHyx53uBrGFjRQbSCvdDsrs8c1v2Dh2872TmMQMWgLaeeS6bPiNw7WkJuH1hvvTNAiFCfuT9nnntFvMuKcUpBHQ1eeuEU
skip_cleanup: true
file: parity${ARCHIVE_SUFFIX}.tar.gz
on:
tags: true

View File

@ -19,6 +19,7 @@ ethsync = { path = "sync" }
ethcore-rpc = { path = "rpc", optional = true } ethcore-rpc = { path = "rpc", optional = true }
[features] [features]
default = ["rpc"]
rpc = ["ethcore-rpc"] rpc = ["ethcore-rpc"]
[[bin]] [[bin]]

4
cov.sh
View File

@ -15,7 +15,7 @@ if ! type kcov > /dev/null; then
exit 1 exit 1
fi fi
cargo test --no-run || exit $? cargo test -p ethcore --no-run || exit $?
mkdir -p target/coverage mkdir -p target/coverage
kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1 --include-pattern src --verify target/coverage target/debug/ethcore* kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1 --include-pattern src --verify target/coverage target/debug/deps/ethcore*
xdg-open target/coverage/index.html xdg-open target/coverage/index.html

View File

@ -3,7 +3,7 @@ description = "Ethcore library"
homepage = "http://ethcore.io" homepage = "http://ethcore.io"
license = "GPL-3.0" license = "GPL-3.0"
name = "ethcore" name = "ethcore"
version = "0.1.0" version = "0.9.0"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
[dependencies] [dependencies]
@ -25,7 +25,6 @@ crossbeam = "0.1.5"
lazy_static = "0.1" lazy_static = "0.1"
[features] [features]
default = ["json-tests"]
jit = ["evmjit"] jit = ["evmjit"]
evm-debug = [] evm-debug = []
json-tests = [] json-tests = []

View File

@ -129,6 +129,9 @@ struct CacheManager {
/// ///
/// **Does not do input data verification.** /// **Does not do input data verification.**
pub struct BlockChain { pub struct BlockChain {
pref_cache_size: usize,
max_cache_size: usize,
best_block: RwLock<BestBlock>, best_block: RwLock<BestBlock>,
// block cache // block cache
@ -190,9 +193,7 @@ impl BlockProvider for BlockChain {
} }
} }
const COLLECTION_QUEUE_SIZE: usize = 2; const COLLECTION_QUEUE_SIZE: usize = 8;
const MIN_CACHE_SIZE: usize = 1;
const MAX_CACHE_SIZE: usize = 1024 * 1024;
impl BlockChain { impl BlockChain {
/// Create new instance of blockchain from given Genesis /// Create new instance of blockchain from given Genesis
@ -237,6 +238,8 @@ impl BlockChain {
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
let bc = BlockChain { let bc = BlockChain {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
best_block: RwLock::new(BestBlock::new()), best_block: RwLock::new(BestBlock::new()),
blocks: RwLock::new(HashMap::new()), blocks: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()),
@ -288,6 +291,12 @@ impl BlockChain {
bc bc
} }
/// Set the cache configuration.
pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) {
self.pref_cache_size = pref_cache_size;
self.max_cache_size = max_cache_size;
}
/// Returns a tree route between `from` and `to`, which is a tuple of: /// Returns a tree route between `from` and `to`, which is a tuple of:
/// ///
/// - a vector of hashes of all blocks, ordered from `from` to `to`. /// - a vector of hashes of all blocks, ordered from `from` to `to`.
@ -339,12 +348,12 @@ impl BlockChain {
Some(h) => h, Some(h) => h,
None => return None, None => return None,
}; };
Some(self._tree_route((&from_details, &from), (&to_details, &to))) Some(self.tree_route_aux((&from_details, &from), (&to_details, &to)))
} }
/// Similar to `tree_route` function, but can be used to return a route /// Similar to `tree_route` function, but can be used to return a route
/// between blocks which may not be in database yet. /// between blocks which may not be in database yet.
fn _tree_route(&self, from: (&BlockDetails, &H256), to: (&BlockDetails, &H256)) -> TreeRoute { fn tree_route_aux(&self, from: (&BlockDetails, &H256), to: (&BlockDetails, &H256)) -> TreeRoute {
let mut from_branch = vec![]; let mut from_branch = vec![];
let mut to_branch = vec![]; let mut to_branch = vec![];
@ -465,7 +474,7 @@ impl BlockChain {
// find the route between old best block and the new one // find the route between old best block and the new one
let best_hash = self.best_block_hash(); let best_hash = self.best_block_hash();
let best_details = self.block_details(&best_hash).expect("best block hash is invalid!"); let best_details = self.block_details(&best_hash).expect("best block hash is invalid!");
let route = self._tree_route((&best_details, &best_hash), (&details, &hash)); let route = self.tree_route_aux((&best_details, &best_hash), (&details, &hash));
match route.blocks.len() { match route.blocks.len() {
// its our parent // its our parent
@ -581,36 +590,37 @@ impl BlockChain {
} }
/// Ticks our cache system and throws out any old data. /// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self, force: bool) { pub fn collect_garbage(&self) {
// TODO: check time. if self.cache_size().total() < self.pref_cache_size { return; }
let timeout = true;
let t = self.cache_size().total(); for _ in 0..COLLECTION_QUEUE_SIZE {
if t < MIN_CACHE_SIZE || (!timeout && (!force || t < MAX_CACHE_SIZE)) { return; } {
let mut cache_man = self.cache_man.write().unwrap();
let mut blocks = self.blocks.write().unwrap();
let mut block_details = self.block_details.write().unwrap();
let mut block_hashes = self.block_hashes.write().unwrap();
let mut transaction_addresses = self.transaction_addresses.write().unwrap();
let mut block_logs = self.block_logs.write().unwrap();
let mut blocks_blooms = self.blocks_blooms.write().unwrap();
let mut cache_man = self.cache_man.write().unwrap(); for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
let mut blocks = self.blocks.write().unwrap(); cache_man.in_use.remove(&id);
let mut block_details = self.block_details.write().unwrap(); match id {
let mut block_hashes = self.block_hashes.write().unwrap(); CacheID::Block(h) => { blocks.remove(&h); },
let mut transaction_addresses = self.transaction_addresses.write().unwrap(); CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); },
let mut block_logs = self.block_logs.write().unwrap(); CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); },
let mut blocks_blooms = self.blocks_blooms.write().unwrap(); CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); },
CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); },
_ => panic!(),
}
}
cache_man.cache_usage.push_front(HashSet::new());
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { // TODO: handle block_hashes properly.
cache_man.in_use.remove(&id); block_hashes.clear();
match id {
CacheID::Block(h) => { blocks.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); },
CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); },
CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); },
_ => panic!(),
} }
if self.cache_size().total() < self.max_cache_size { break; }
} }
cache_man.cache_usage.push_front(HashSet::new());
// TODO: handle block_hashes properly.
block_hashes.clear();
// TODO: m_lastCollection = chrono::system_clock::now(); // TODO: m_lastCollection = chrono::system_clock::now();
} }
@ -786,7 +796,7 @@ mod tests {
assert!(bc.cache_size().blocks > 1024 * 1024); assert!(bc.cache_size().blocks > 1024 * 1024);
for _ in 0..2 { for _ in 0..2 {
bc.collect_garbage(true); bc.collect_garbage();
} }
assert!(bc.cache_size().blocks < 1024 * 1024); assert!(bc.cache_size().blocks < 1024 * 1024);
} }

View File

@ -298,7 +298,12 @@ impl Client {
/// Tick the client. /// Tick the client.
pub fn tick(&self) { pub fn tick(&self) {
self.chain.read().unwrap().collect_garbage(false); self.chain.read().unwrap().collect_garbage();
}
/// Set up the cache behaviour.
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size);
} }
} }

View File

@ -35,9 +35,15 @@ Usage:
parity [options] <enode>... parity [options] <enode>...
Options: Options:
-l --logging LOGGING Specify the logging level -l --logging LOGGING Specify the logging level.
-h --help Show this screen. -j --jsonrpc Enable the JSON-RPC API sever.
"); --jsonrpc-url URL Specify URL for JSON-RPC API server (default: 127.0.0.1:8545).
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes (default: 16384).
--cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes (default: 262144).
-h --help Show this screen.
", flag_cache_pref_size: usize, flag_cache_max_size: usize);
fn setup_log(init: &str) { fn setup_log(init: &str) {
let mut builder = LogBuilder::new(); let mut builder = LogBuilder::new();
@ -54,7 +60,7 @@ fn setup_log(init: &str) {
#[cfg(feature = "rpc")] #[cfg(feature = "rpc")]
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>) { fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str) {
use rpc::v1::*; use rpc::v1::*;
let mut server = rpc::HttpServer::new(1); let mut server = rpc::HttpServer::new(1);
@ -62,11 +68,11 @@ fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>) {
server.add_delegate(EthClient::new(client.clone()).to_delegate()); server.add_delegate(EthClient::new(client.clone()).to_delegate());
server.add_delegate(EthFilterClient::new(client).to_delegate()); server.add_delegate(EthFilterClient::new(client).to_delegate());
server.add_delegate(NetClient::new(sync).to_delegate()); server.add_delegate(NetClient::new(sync).to_delegate());
server.start_async("127.0.0.1:3030"); server.start_async(url);
} }
#[cfg(not(feature = "rpc"))] #[cfg(not(feature = "rpc"))]
fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>) { fn setup_rpc_server(_client: Arc<Client>, _sync: Arc<EthSync>, _url: &str) {
} }
fn main() { fn main() {
@ -83,8 +89,11 @@ fn main() {
net_settings.boot_nodes = init_nodes; net_settings.boot_nodes = init_nodes;
let mut service = ClientService::start(spec, net_settings).unwrap(); let mut service = ClientService::start(spec, net_settings).unwrap();
let client = service.client().clone(); let client = service.client().clone();
client.configure_cache(args.flag_cache_pref_size, args.flag_cache_max_size);
let sync = EthSync::register(service.network(), client); let sync = EthSync::register(service.network(), client);
setup_rpc_server(service.client(), sync.clone()); if args.flag_jsonrpc {
setup_rpc_server(service.client(), sync.clone(), &args.flag_jsonrpc_url);
}
let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: sync }); let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: sync });
service.io().register_handler(io_handler).expect("Error registering IO handler"); service.io().register_handler(io_handler).expect("Error registering IO handler");
@ -169,3 +178,7 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
} }
} }
/// Parity needs at least 1 test to generate coverage reports correctly.
#[test]
fn if_works() {
}

View File

@ -42,3 +42,8 @@ impl HttpServer {
server.start_async(addr) server.start_async(addr)
} }
} }
/// Lib needs at least 1 test to generate coverage reports correctly.
#[test]
fn if_works() {
}

View File

@ -3,7 +3,7 @@ description = "Ethcore utility library"
homepage = "http://ethcore.io" homepage = "http://ethcore.io"
license = "GPL-3.0" license = "GPL-3.0"
name = "ethcore-util" name = "ethcore-util"
version = "0.1.0" version = "0.9.0"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
[dependencies] [dependencies]
@ -28,3 +28,4 @@ sha3 = { path = "sha3" }
serde = "0.6.7" serde = "0.6.7"
clippy = "0.0.37" clippy = "0.0.37"
json-tests = { path = "json-tests" } json-tests = { path = "json-tests" }
target_info = "0.1.0"

View File

@ -63,9 +63,9 @@ fn bench_stream_nested_empty_lists(b: &mut Bencher) {
b.iter(|| { b.iter(|| {
// [ [], [[]], [ [], [[]] ] ] // [ [], [[]], [ [], [[]] ] ]
let mut stream = RlpStream::new_list(3); let mut stream = RlpStream::new_list(3);
stream.append_list(0); stream.begin_list(0);
stream.append_list(1).append_list(0); stream.begin_list(1).begin_list(0);
stream.append_list(2).append_list(0).append_list(1).append_list(0); stream.begin_list(2).begin_list(0).begin_list(1).begin_list(0);
let _ = stream.out(); let _ = stream.out();
}); });
} }
@ -89,7 +89,7 @@ fn bench_stream_1000_empty_lists(b: &mut Bencher) {
b.iter(|| { b.iter(|| {
let mut stream = RlpStream::new_list(1000); let mut stream = RlpStream::new_list(1000);
for _ in 0..1000 { for _ in 0..1000 {
stream.append_list(0); stream.begin_list(0);
} }
let _ = stream.out(); let _ = stream.out();
}); });

View File

@ -42,6 +42,84 @@ fn random_value(seed: &mut H256) -> Bytes {
} }
} }
#[bench]
fn trie_insertions_32_mir_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Mirror,
count: 1000,
};
let d = st.make();
let mut hash_count = 0usize;
b.iter(&mut ||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
hash_count = t.hash_count;
});
// println!("hash_count: {}", hash_count);
}
#[bench]
fn triehash_insertions_32_mir_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Mirror,
count: 1000,
};
let d = st.make();
b.iter(&mut ||{
trie_root(d.clone()).clone();
});
}
#[bench]
fn trie_insertions_32_ran_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Random,
count: 1000,
};
let d = st.make();
let mut hash_count = 0usize;
let mut r = H256::new();
b.iter(&mut ||{
let mut memdb = MemoryDB::new();
let mut root = H256::new();
let mut t = TrieDBMut::new(&mut memdb, &mut root);
for i in d.iter() {
t.insert(&i.0, &i.1);
}
hash_count = t.hash_count;
r = t.root().clone();
});
// println!("result: {}", hash_count);
}
#[bench]
fn triehash_insertions_32_ran_1k(b: &mut Bencher) {
let st = StandardMap {
alphabet: Alphabet::All,
min_key: 32,
journal_key: 0,
value_mode: ValueMode::Random,
count: 1000,
};
let d = st.make();
b.iter(&mut ||{
trie_root(d.clone()).clone();
});
}
#[bench] #[bench]
fn trie_insertions_six_high(b: &mut Bencher) { fn trie_insertions_six_high(b: &mut Bencher) {
let mut d: Vec<(Bytes, Bytes)> = Vec::new(); let mut d: Vec<(Bytes, Bytes)> = Vec::new();

View File

@ -1,3 +1,5 @@
//! Utils common types and macros global reexport.
pub use standard::*; pub use standard::*;
pub use from_json::*; pub use from_json::*;
pub use error::*; pub use error::*;

View File

@ -1,3 +1,5 @@
//! Ethcore crypto.
use hash::*; use hash::*;
use bytes::*; use bytes::*;
use uint::*; use uint::*;

View File

@ -1,3 +1,5 @@
//! Coversion from json.
use standard::*; use standard::*;
#[macro_export] #[macro_export]

View File

@ -1,3 +1,5 @@
//! Calculates heapsize of util types.
use uint::*; use uint::*;
use hash::*; use hash::*;

View File

@ -1,41 +1,41 @@
/// General IO module. //! General IO module.
/// //!
/// Example usage for craeting a network service and adding an IO handler: //! Example usage for creating a network service and adding an IO handler:
/// //!
/// ```rust //! ```rust
/// extern crate ethcore_util; //! extern crate ethcore_util;
/// use ethcore_util::*; //! use ethcore_util::*;
/// //!
/// struct MyHandler; //! struct MyHandler;
/// //!
/// #[derive(Clone)] //! #[derive(Clone)]
/// struct MyMessage { //! struct MyMessage {
/// data: u32 //! data: u32
/// } //! }
/// //!
/// impl IoHandler<MyMessage> for MyHandler { //! impl IoHandler<MyMessage> for MyHandler {
/// fn initialize(&self, io: &IoContext<MyMessage>) { //! fn initialize(&self, io: &IoContext<MyMessage>) {
/// io.register_timer(0, 1000).unwrap(); //! io.register_timer(0, 1000).unwrap();
/// } //! }
/// //!
/// fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) { //! fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
/// println!("Timeout {}", timer); //! println!("Timeout {}", timer);
/// } //! }
/// //!
/// fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) { //! fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
/// println!("Message {}", message.data); //! println!("Message {}", message.data);
/// } //! }
/// } //! }
/// //!
/// fn main () { //! fn main () {
/// let mut service = IoService::<MyMessage>::start().expect("Error creating network service"); //! let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
/// service.register_handler(Arc::new(MyHandler)).unwrap(); //! service.register_handler(Arc::new(MyHandler)).unwrap();
/// //!
/// // Wait for quit condition //! // Wait for quit condition
/// // ... //! // ...
/// // Drop the service //! // Drop the service
/// } //! }
/// ``` //! ```
mod service; mod service;
mod worker; mod worker;

View File

@ -8,32 +8,58 @@
//! Ethcore-util library //! Ethcore-util library
//! //!
//! ### Rust version: //! ### Rust version:
//! - beta
//! - nightly //! - nightly
//! //!
//! ### Supported platforms: //! ### Supported platforms:
//! - OSX //! - OSX
//! - Linux //! - Linux
//! //!
//! ### Dependencies: //! ### Building:
//! - RocksDB 3.13
//! //!
//! ### Dependencies Installation: //! - Ubuntu 14.04 and later:
//! //!
//! ```bash
//! # install rocksdb
//! add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main"
//! apt-get update
//! apt-get install -y --force-yes librocksdb
//!
//! # install multirust
//! curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes
//!
//! # install nightly and make it default
//! multirust update nightly && multirust default nightly
//!
//! # export rust LIBRARY_PATH
//! export LIBRARY_PATH=/usr/local/lib
//!
//! # download and build parity
//! git clone https://github.com/ethcore/parity
//! cd parity
//! cargo build --release
//! ```
//!
//! - OSX: //! - OSX:
//! //!
//! ```bash //! ```bash
//! # install rocksdb && multirust
//! brew update
//! brew install rocksdb //! brew install rocksdb
//! ``` //! brew install multirust
//! //!
//! - From source: //! # install nightly and make it default
//! multirust update nightly && multirust default nightly
//! //!
//! ```bash //! # export rust LIBRARY_PATH
//! wget https://github.com/facebook/rocksdb/archive/rocksdb-3.13.tar.gz //! export LIBRARY_PATH=/usr/local/lib
//! tar xvf rocksdb-3.13.tar.gz && cd rocksdb-rocksdb-3.13 && make shared_lib //!
//! sudo make install //! # download and build parity
//! git clone https://github.com/ethcore/parity
//! cd parity
//! cargo build --release
//! ``` //! ```
extern crate target_info;
extern crate slab; extern crate slab;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate mio; extern crate mio;
@ -57,46 +83,34 @@ extern crate serde;
#[macro_use] #[macro_use]
extern crate log as rlog; extern crate log as rlog;
/// TODO [Gav Wood] Please document me
pub mod standard; pub mod standard;
#[macro_use] #[macro_use]
/// TODO [Gav Wood] Please document me
pub mod from_json; pub mod from_json;
#[macro_use] #[macro_use]
/// TODO [Gav Wood] Please document me
pub mod common; pub mod common;
pub mod error; pub mod error;
pub mod hash; pub mod hash;
pub mod uint; pub mod uint;
pub mod bytes; pub mod bytes;
pub mod rlp; pub mod rlp;
/// TODO [Gav Wood] Please document me
pub mod misc; pub mod misc;
/// TODO [Gav Wood] Please document me mod json_aid;
pub mod json_aid;
pub mod vector; pub mod vector;
pub mod sha3; pub mod sha3;
pub mod hashdb; pub mod hashdb;
pub mod memorydb; pub mod memorydb;
pub mod overlaydb; pub mod overlaydb;
pub mod journaldb; pub mod journaldb;
/// TODO [Gav Wood] Please document me mod math;
pub mod math;
pub mod chainfilter; pub mod chainfilter;
/// TODO [Gav Wood] Please document me
pub mod crypto; pub mod crypto;
pub mod triehash; pub mod triehash;
/// TODO [Gav Wood] Please document me
pub mod trie; pub mod trie;
pub mod nibbleslice; pub mod nibbleslice;
/// TODO [Gav Wood] Please document me mod heapsizeof;
pub mod heapsizeof;
pub mod squeeze; pub mod squeeze;
/// TODO [Gav Wood] Please document me
pub mod semantic_version; pub mod semantic_version;
/// TODO [Gav Wood] Please document me
pub mod io; pub mod io;
/// TODO [Gav Wood] Please document me
pub mod network; pub mod network;
pub mod log; pub mod log;
@ -114,7 +128,6 @@ pub use crypto::*;
pub use triehash::*; pub use triehash::*;
pub use trie::*; pub use trie::*;
pub use nibbleslice::*; pub use nibbleslice::*;
pub use heapsizeof::*;
pub use squeeze::*; pub use squeeze::*;
pub use semantic_version::*; pub use semantic_version::*;
pub use network::*; pub use network::*;

View File

@ -1,4 +1,6 @@
/// log2 //! Common math functions.
/// Returns log2.
pub fn log2(x: usize) -> u32 { pub fn log2(x: usize) -> u32 {
if x <= 1 { if x <= 1 {
return 0; return 0;

View File

@ -1,3 +1,5 @@
//! Diff misc.
use common::*; use common::*;
#[derive(Debug,Clone,PartialEq,Eq)] #[derive(Debug,Clone,PartialEq,Eq)]

View File

@ -7,6 +7,7 @@ use std::ops::*;
use mio::*; use mio::*;
use mio::tcp::*; use mio::tcp::*;
use mio::udp::*; use mio::udp::*;
use target_info::Target;
use hash::*; use hash::*;
use crypto::*; use crypto::*;
use sha3::Hashable; use sha3::Hashable;
@ -294,7 +295,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
config: config, config: config,
nonce: H256::random(), nonce: H256::random(),
protocol_version: 4, protocol_version: 4,
client_version: "parity".to_owned(), client_version: format!("Parity/{}/{}-{}-{}", env!("CARGO_PKG_VERSION"), Target::arch(), Target::env(), Target::os()),
listen_port: 0, listen_port: 0,
capabilities: Vec::new(), capabilities: Vec::new(),
}), }),

View File

@ -1,52 +1,53 @@
/// Network and general IO module. //! Network and general IO module.
/// Example usage for craeting a network service and adding an IO handler: //!
/// //! Example usage for craeting a network service and adding an IO handler:
/// ```rust //!
/// extern crate ethcore_util as util; //! ```rust
/// use util::*; //! extern crate ethcore_util as util;
/// //! use util::*;
/// struct MyHandler; //!
/// //! struct MyHandler;
/// #[derive(Clone)] //!
/// struct MyMessage { //! #[derive(Clone)]
/// data: u32 //! struct MyMessage {
/// } //! data: u32
/// //! }
/// impl NetworkProtocolHandler<MyMessage> for MyHandler { //!
/// fn initialize(&self, io: &NetworkContext<MyMessage>) { //! impl NetworkProtocolHandler<MyMessage> for MyHandler {
/// io.register_timer(0, 1000); //! fn initialize(&self, io: &NetworkContext<MyMessage>) {
/// } //! io.register_timer(0, 1000);
/// //! }
/// fn read(&self, io: &NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) { //!
/// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); //! fn read(&self, io: &NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
/// } //! println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
/// //! }
/// fn connected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) { //!
/// println!("Connected {}", peer); //! fn connected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
/// } //! println!("Connected {}", peer);
/// //! }
/// fn disconnected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) { //!
/// println!("Disconnected {}", peer); //! fn disconnected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
/// } //! println!("Disconnected {}", peer);
/// //! }
/// fn timeout(&self, io: &NetworkContext<MyMessage>, timer: TimerToken) { //!
/// println!("Timeout {}", timer); //! fn timeout(&self, io: &NetworkContext<MyMessage>, timer: TimerToken) {
/// } //! println!("Timeout {}", timer);
/// //! }
/// fn message(&self, io: &NetworkContext<MyMessage>, message: &MyMessage) { //!
/// println!("Message {}", message.data); //! fn message(&self, io: &NetworkContext<MyMessage>, message: &MyMessage) {
/// } //! println!("Message {}", message.data);
/// } //! }
/// //! }
/// fn main () { //!
/// let mut service = NetworkService::<MyMessage>::start(NetworkConfiguration::new()).expect("Error creating network service"); //! fn main () {
/// service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); //! let mut service = NetworkService::<MyMessage>::start(NetworkConfiguration::new()).expect("Error creating network service");
/// //! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]);
/// // Wait for quit condition //!
/// // ... //! // Wait for quit condition
/// // Drop the service //! // ...
/// } //! // Drop the service
/// ``` //! }
//! ```
mod host; mod host;
mod connection; mod connection;
mod handshake; mod handshake;

View File

@ -1,3 +1,5 @@
//! Semantic version formatting and comparing.
/// A version value with strict meaning. Use `to_u32` to convert to a simple integer. /// A version value with strict meaning. Use `to_u32` to convert to a simple integer.
/// ///
/// # Example /// # Example

View File

@ -1,3 +1,5 @@
//! Std lib global reexports.
pub use std::io; pub use std::io;
pub use std::fs; pub use std::fs;
pub use std::str; pub use std::str;

View File

@ -1,3 +1,5 @@
//! Trie interface and implementation.
/// TODO [Gav Wood] Please document me /// TODO [Gav Wood] Please document me
pub mod trietraits; pub mod trietraits;
pub mod standardmap; pub mod standardmap;

View File

@ -17,12 +17,26 @@ pub enum Alphabet {
Custom(Bytes), Custom(Bytes),
} }
/// Means of determining the value.
pub enum ValueMode {
/// Same as the key.
Mirror,
/// Randomly (50:50) 1 or 32 byte randomly string.
Random,
}
/// Standard test map for profiling tries. /// Standard test map for profiling tries.
pub struct StandardMap { pub struct StandardMap {
alphabet: Alphabet, /// The alphabet to use for keys.
min_key: usize, pub alphabet: Alphabet,
journal_key: usize, /// Minimum size of key.
count: usize, pub min_key: usize,
/// Delta size of key.
pub journal_key: usize,
/// Mode of value generation.
pub value_mode: ValueMode,
/// Number of keys.
pub count: usize,
} }
impl StandardMap { impl StandardMap {
@ -71,7 +85,7 @@ impl StandardMap {
Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, &mut seed), Alphabet::Mid => Self::random_word(mid, self.min_key, self.journal_key, &mut seed),
Alphabet::Custom(ref a) => Self::random_word(&a, self.min_key, self.journal_key, &mut seed), Alphabet::Custom(ref a) => Self::random_word(&a, self.min_key, self.journal_key, &mut seed),
}; };
let v = Self::random_value(&mut seed); let v = match self.value_mode { ValueMode::Mirror => k.clone(), ValueMode::Random => Self::random_value(&mut seed) };
d.push((k, v)) d.push((k, v))
} }
d d

View File

@ -1,40 +1,4 @@
//! vector util functions //! Vector extensions.
use std::ptr;
/// TODO [debris] Please document me
pub trait InsertSlice<T> {
/// TODO [debris] Please document me
fn insert_slice(&mut self, index: usize, elements: &[T]);
}
/// based on `insert` function implementation from standard library
impl<T> InsertSlice<T> for Vec<T> {
fn insert_slice(&mut self, index: usize, elements: &[T]) {
let e_len = elements.len();
if e_len == 0 {
return;
}
let len = self.len();
assert!(index <= len);
// space for the new element
self.reserve(e_len);
unsafe {
{
let p = self.as_mut_ptr().offset(index as isize);
let ep = elements.as_ptr().offset(0);
// shift everything by e_len, to make space
ptr::copy(p, p.offset(e_len as isize), len - index);
// write new element
ptr::copy(ep, p, e_len);
}
self.set_len(len + e_len);
}
}
}
/// Returns len of prefix shared with elem /// Returns len of prefix shared with elem
/// ///