Merge pull request #516 from ethcore/sync

Better memory management
This commit is contained in:
Gav Wood 2016-02-27 15:54:19 +01:00
commit b3f09596b1
16 changed files with 297 additions and 122 deletions

10
Cargo.lock generated
View File

@ -14,6 +14,7 @@ dependencies = [
"ethsync 0.9.99",
"fdlimit 0.1.0",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -255,6 +256,7 @@ dependencies = [
"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethcore 0.9.99",
"ethcore-util 0.9.99",
"heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
@ -541,6 +543,14 @@ dependencies = [
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "number_prefix"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "odds"
version = "0.2.12"

View File

@ -20,6 +20,7 @@ ethcore-rpc = { path = "rpc", optional = true }
fdlimit = { path = "util/fdlimit" }
daemonize = "0.2"
ethcore-devtools = { path = "devtools" }
number_prefix = "0.2"
[features]
default = ["rpc"]

View File

@ -28,6 +28,31 @@ use service::*;
use client::BlockStatus;
use util::panics::*;
known_heap_size!(0, UnVerifiedBlock, VerifyingBlock, PreVerifiedBlock);
const MIN_MEM_LIMIT: usize = 16384;
const MIN_QUEUE_LIMIT: usize = 512;
/// Block queue configuration
#[derive(Debug)]
pub struct BlockQueueConfig {
/// Maximum number of blocks to keep in unverified queue.
/// When the limit is reached, is_full returns true.
pub max_queue_size: usize,
/// Maximum heap memory to use.
/// When the limit is reached, is_full returns true.
pub max_mem_use: usize,
}
impl Default for BlockQueueConfig {
fn default() -> Self {
BlockQueueConfig {
max_queue_size: 30000,
max_mem_use: 50 * 1024 * 1024,
}
}
}
/// Block queue status
#[derive(Debug)]
pub struct BlockQueueInfo {
@ -37,6 +62,12 @@ pub struct BlockQueueInfo {
pub verified_queue_size: usize,
/// Number of blocks being verified
pub verifying_queue_size: usize,
/// Configured maximum number of blocks in the queue
pub max_queue_size: usize,
/// Configured maximum number of bytes to use
pub max_mem_use: usize,
/// Heap memory used in bytes
pub mem_used: usize,
}
impl BlockQueueInfo {
@ -48,7 +79,8 @@ impl BlockQueueInfo {
/// Indicates that queue is full
pub fn is_full(&self) -> bool {
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > MAX_UNVERIFIED_QUEUE_SIZE
self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size > self.max_queue_size ||
self.mem_used > self.max_mem_use
}
/// Indicates that queue is empty
@ -68,7 +100,9 @@ pub struct BlockQueue {
deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>,
empty: Arc<Condvar>,
processing: RwLock<HashSet<H256>>
processing: RwLock<HashSet<H256>>,
max_queue_size: usize,
max_mem_use: usize,
}
struct UnVerifiedBlock {
@ -106,11 +140,9 @@ struct Verification {
bad: HashSet<H256>,
}
const MAX_UNVERIFIED_QUEUE_SIZE: usize = 50000;
impl BlockQueue {
/// Creates a new queue instance.
pub fn new(engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
let verification = Arc::new(Mutex::new(Verification::default()));
let more_to_verify = Arc::new(Condvar::new());
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
@ -133,7 +165,7 @@ impl BlockQueue {
.name(format!("Verifier #{}", i))
.spawn(move || {
panic_handler.catch_panic(move || {
BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)
BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty)
}).unwrap()
})
.expect("Error starting block verification thread")
@ -149,6 +181,8 @@ impl BlockQueue {
deleting: deleting.clone(),
processing: RwLock::new(HashSet::new()),
empty: empty.clone(),
max_queue_size: max(config.max_queue_size, MIN_QUEUE_LIMIT),
max_mem_use: max(config.max_mem_use, MIN_MEM_LIMIT),
}
}
@ -340,8 +374,26 @@ impl BlockQueue {
verified_queue_size: verification.verified.len(),
unverified_queue_size: verification.unverified.len(),
verifying_queue_size: verification.verifying.len(),
max_queue_size: self.max_queue_size,
max_mem_use: self.max_mem_use,
mem_used:
verification.unverified.heap_size_of_children()
+ verification.verifying.heap_size_of_children()
+ verification.verified.heap_size_of_children(),
// TODO: https://github.com/servo/heapsize/pull/50
//+ self.processing.read().unwrap().heap_size_of_children(),
}
}
pub fn collect_garbage(&self) {
{
let mut verification = self.verification.lock().unwrap();
verification.unverified.shrink_to_fit();
verification.verifying.shrink_to_fit();
verification.verified.shrink_to_fit();
}
self.processing.write().unwrap().shrink_to_fit();
}
}
impl MayPanic for BlockQueue {
@ -373,7 +425,7 @@ mod tests {
fn get_test_queue() -> BlockQueue {
let spec = get_test_spec();
let engine = spec.to_engine().unwrap();
BlockQueue::new(Arc::new(engine), IoChannel::disconnected())
BlockQueue::new(BlockQueueConfig::default(), Arc::new(engine), IoChannel::disconnected())
}
#[test]
@ -381,7 +433,7 @@ mod tests {
// TODO better test
let spec = Spec::new_test();
let engine = spec.to_engine().unwrap();
let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected());
let _ = BlockQueue::new(BlockQueueConfig::default(), Arc::new(engine), IoChannel::disconnected());
}
#[test]
@ -437,4 +489,19 @@ mod tests {
assert!(queue.queue_info().is_empty());
}
#[test]
fn test_mem_limit() {
let spec = get_test_spec();
let engine = spec.to_engine().unwrap();
let mut config = BlockQueueConfig::default();
config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000
let mut queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected());
assert!(!queue.queue_info().is_full());
let mut blocks = get_good_dummy_block_seq(50);
for b in blocks.drain(..) {
queue.import_block(b).unwrap();
}
assert!(queue.queue_info().is_full());
}
}

View File

@ -27,6 +27,24 @@ use chainfilter::{ChainFilter, BloomIndex, FilterDataSource};
const BLOOM_INDEX_SIZE: usize = 16;
const BLOOM_LEVELS: u8 = 3;
/// Blockchain configuration.
#[derive(Debug)]
pub struct BlockChainConfig {
/// Preferred cache size in bytes.
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
}
impl Default for BlockChainConfig {
fn default() -> Self {
BlockChainConfig {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
}
}
}
/// Represents a tree route between `from` block and `to` block:
pub struct TreeRoute {
/// A vector of hashes of all blocks, ordered from `from` to `to`.
@ -110,7 +128,7 @@ struct ExtrasUpdate {
impl CacheSize {
/// Total amount used by the cache.
fn total(&self) -> usize { self.blocks + self.block_details + self.transaction_addresses + self.block_logs + self.blocks_blooms }
pub fn total(&self) -> usize { self.blocks + self.block_details + self.transaction_addresses + self.block_logs + self.blocks_blooms }
}
/// Information about best block gathered together
@ -308,33 +326,7 @@ const COLLECTION_QUEUE_SIZE: usize = 8;
impl BlockChain {
/// Create new instance of blockchain from given Genesis
///
/// ```rust
/// extern crate ethcore_util as util;
/// extern crate ethcore;
/// use std::env;
/// use std::str::FromStr;
/// use ethcore::spec::*;
/// use ethcore::blockchain::*;
/// use ethcore::ethereum;
/// use util::hash::*;
/// use util::uint::*;
///
/// fn main() {
/// let spec = ethereum::new_frontier();
///
/// let mut dir = env::temp_dir();
/// dir.push(H32::random().hex());
///
/// let bc = BlockChain::new(&spec.genesis_block(), &dir);
///
/// let genesis_hash = "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3";
/// assert_eq!(bc.genesis_hash(), H256::from_str(genesis_hash).unwrap());
/// assert!(bc.is_known(&bc.genesis_hash()));
/// assert_eq!(bc.genesis_hash(), bc.block_hash(0).unwrap());
/// }
/// ```
pub fn new(genesis: &[u8], path: &Path) -> BlockChain {
pub fn new(config: BlockChainConfig, genesis: &[u8], path: &Path) -> BlockChain {
// open extras db
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
@ -349,8 +341,8 @@ impl BlockChain {
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
let bc = BlockChain {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
pref_cache_size: config.pref_cache_size,
max_cache_size: config.max_cache_size,
best_block: RwLock::new(BestBlock::new()),
blocks: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()),
@ -813,7 +805,7 @@ mod tests {
use std::str::FromStr;
use rustc_serialize::hex::FromHex;
use util::hash::*;
use blockchain::{BlockProvider, BlockChain};
use blockchain::{BlockProvider, BlockChain, BlockChainConfig};
use tests::helpers::*;
use devtools::*;
@ -822,7 +814,7 @@ mod tests {
let genesis = "f901fcf901f7a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0925002c3260b44e44c3edebad1cc442142b03020209df1ab8bb86752edbd2cd7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080832fefd8808454c98c8142a0363659b251bf8b819179874c8cce7b9b983d7f3704cbb58a3b334431f7032871889032d09c281e1236c0c0".from_hex().unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
let genesis_hash = H256::from_str("3caa2203f3d7c136c0295ed128a7d31cea520b1ca5e27afe17d0853331798942").unwrap();
@ -867,7 +859,7 @@ mod tests {
let best_block_hash = H256::from_str("c208f88c9f5bf7e00840439742c12e5226d9752981f3ec0521bdcb6dd08af277").unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
bc.insert_block(&b1, vec![]);
bc.insert_block(&b2, vec![]);
bc.insert_block(&b3a, vec![]);
@ -946,14 +938,14 @@ mod tests {
let temp = RandomTempPath::new();
{
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
assert_eq!(bc.best_block_hash(), genesis_hash);
bc.insert_block(&b1, vec![]);
assert_eq!(bc.best_block_hash(), b1_hash);
}
{
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
assert_eq!(bc.best_block_hash(), b1_hash);
}
}
@ -1006,7 +998,7 @@ mod tests {
let b1_hash = H256::from_str("f53f268d23a71e85c7d6d83a9504298712b84c1a2ba220441c86eeda0bf0b6e3").unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
bc.insert_block(&b1, vec![]);
let transactions = bc.transactions(&b1_hash).unwrap();
@ -1042,7 +1034,7 @@ mod tests {
let bloom_ba = H2048::from_str("00000000000000000000000000000000000000000000020000000800000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000008000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(&genesis, temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);

View File

@ -18,7 +18,7 @@
use util::*;
use util::panics::*;
use blockchain::{BlockChain, BlockProvider, CacheSize};
use blockchain::{BlockChain, BlockProvider};
use views::BlockView;
use error::*;
use header::{BlockNumber, Header};
@ -26,7 +26,7 @@ use state::State;
use spec::Spec;
use engine::Engine;
use views::HeaderView;
use block_queue::{BlockQueue, BlockQueueInfo};
use block_queue::BlockQueue;
use service::{NetSyncMessage, SyncMessage};
use env_info::LastHashes;
use verification::*;
@ -35,7 +35,8 @@ use transaction::LocalizedTransaction;
use extras::TransactionAddress;
use filter::Filter;
use log_entry::LocalizedLogEntry;
pub use blockchain::TreeRoute;
pub use block_queue::{BlockQueueConfig, BlockQueueInfo};
pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize};
/// Uniquely identifies block.
#[derive(Debug, PartialEq, Clone)]
@ -74,7 +75,16 @@ pub enum BlockStatus {
Unknown,
}
/// Information about the blockchain gthered together.
/// Client configuration. Includes configs for all sub-systems.
#[derive(Debug, Default)]
pub struct ClientConfig {
/// Block queue configuration.
pub queue: BlockQueueConfig,
/// Blockchain configuration.
pub blockchain: BlockChainConfig,
}
/// Information about the blockchain gathered together.
#[derive(Debug)]
pub struct BlockChainInfo {
/// Blockchain difficulty.
@ -190,14 +200,14 @@ const CLIENT_DB_VER_STR: &'static str = "4.0";
impl Client {
/// Create a new client with given spec and DB path.
pub fn new(spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> {
pub fn new(config: ClientConfig, spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> {
let mut dir = path.to_path_buf();
dir.push(H64::from(spec.genesis_header().hash()).hex());
//TODO: sec/fat: pruned/full versioning
dir.push(format!("v{}-sec-pruned", CLIENT_DB_VER_STR));
let path = dir.as_path();
let gb = spec.genesis_block();
let chain = Arc::new(RwLock::new(BlockChain::new(&gb, path)));
let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path)));
let mut state_path = path.to_path_buf();
state_path.push("state");
@ -207,7 +217,7 @@ impl Client {
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
}
let block_queue = BlockQueue::new(engine.clone(), message_channel);
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel);
let panic_handler = PanicHandler::new_in_arc();
panic_handler.forward_from(&block_queue);
@ -356,7 +366,7 @@ impl Client {
}
/// Get info on the cache.
pub fn cache_info(&self) -> CacheSize {
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
self.chain.read().unwrap().cache_size()
}
@ -368,6 +378,7 @@ impl Client {
/// Tick the client.
pub fn tick(&self) {
self.chain.read().unwrap().collect_garbage();
self.block_queue.read().unwrap().collect_garbage();
}
/// Set up the cache behaviour.

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use super::test_common::*;
use client::{BlockChainClient,Client};
use client::{BlockChainClient, Client, ClientConfig};
use pod_state::*;
use block::Block;
use ethereum;
@ -53,7 +53,7 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec<String> {
let temp = RandomTempPath::new();
{
let client = Client::new(spec, temp.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), spec, temp.as_path(), IoChannel::disconnected()).unwrap();
assert_eq!(client.chain_info().best_block_hash, genesis_hash);
for (b, is_valid) in blocks.into_iter() {
if Block::is_good(&b) {

View File

@ -75,7 +75,7 @@
#[macro_use] extern crate ethcore_util as util;
#[macro_use] extern crate lazy_static;
extern crate rustc_serialize;
extern crate heapsize;
#[macro_use] extern crate heapsize;
extern crate crypto;
extern crate time;
extern crate env_logger;
@ -86,8 +86,6 @@ extern crate crossbeam;
#[cfg(feature = "jit" )] extern crate evmjit;
pub mod block;
pub mod blockchain;
pub mod block_queue;
pub mod client;
pub mod error;
pub mod ethereum;
@ -121,6 +119,8 @@ mod substate;
mod executive;
mod externalities;
mod verification;
mod block_queue;
mod blockchain;
#[cfg(test)]
mod tests;

View File

@ -20,7 +20,7 @@ use util::*;
use util::panics::*;
use spec::Spec;
use error::*;
use client::Client;
use client::{Client, ClientConfig};
/// Message type for external and internal events
#[derive(Clone)]
@ -48,14 +48,14 @@ pub struct ClientService {
impl ClientService {
/// Start the service in a separate thread.
pub fn start(spec: Spec, net_config: NetworkConfiguration, db_path: &Path) -> Result<ClientService, Error> {
pub fn start(config: ClientConfig, spec: Spec, net_config: NetworkConfiguration, db_path: &Path) -> Result<ClientService, Error> {
let panic_handler = PanicHandler::new_in_arc();
let mut net_service = try!(NetworkService::start(net_config));
panic_handler.forward_from(&net_service);
info!("Starting {}", net_service.host_info());
info!("Configured for {} using {} engine", spec.name, spec.engine_name);
let client = try!(Client::new(spec, db_path, net_service.io().channel()));
let client = try!(Client::new(config, spec, db_path, net_service.io().channel()));
panic_handler.forward_from(client.deref());
let client_io = Arc::new(ClientIoHandler {
client: client.clone()
@ -135,12 +135,13 @@ mod tests {
use tests::helpers::*;
use util::network::*;
use devtools::*;
use client::ClientConfig;
#[test]
fn it_can_be_started() {
let spec = get_test_spec();
let temp_path = RandomTempPath::new();
let service = ClientService::start(spec, NetworkConfiguration::new_with_port(40456), &temp_path.as_path());
let service = ClientService::start(ClientConfig::default(), spec, NetworkConfiguration::new_with_port(40456), &temp_path.as_path());
assert!(service.is_ok());
}
}

View File

@ -58,6 +58,8 @@ pub struct Spec {
/// Known nodes on the network in enode format.
pub nodes: Vec<String>,
/// Network ID
pub network_id: U256,
/// Parameters concerning operation of the specific engine we're using.
/// Maps the parameter name to an RLP-encoded value.
@ -120,6 +122,9 @@ impl Spec {
/// Get the known knodes of the network in enode format.
pub fn nodes(&self) -> &Vec<String> { &self.nodes }
/// Get the configured Network ID.
pub fn network_id(&self) -> U256 { self.network_id }
/// Get the header of the genesis block.
pub fn genesis_header(&self) -> Header {
Header {
@ -250,6 +255,7 @@ impl FromJson for Spec {
engine_name: json["engineName"].as_string().unwrap().to_owned(),
engine_params: json_to_rlp_map(&json["params"]),
nodes: nodes,
network_id: U256::from_str(&json["params"]["networkID"].as_string().unwrap()[2..]).unwrap(),
builtins: builtins,
parent_hash: H256::from_str(&genesis["parentHash"].as_string().unwrap()[2..]).unwrap(),
author: Address::from_str(&genesis["author"].as_string().unwrap()[2..]).unwrap(),

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use client::{BlockChainClient, Client, BlockId};
use client::{BlockChainClient, Client, ClientConfig, BlockId};
use tests::helpers::*;
use common::*;
use devtools::*;
@ -22,14 +22,14 @@ use devtools::*;
#[test]
fn created() {
let dir = RandomTempPath::new();
let client_result = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected());
let client_result = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected());
assert!(client_result.is_ok());
}
#[test]
fn imports_from_empty() {
let dir = RandomTempPath::new();
let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
client.import_verified_blocks(&IoChannel::disconnected());
client.flush_queue();
}
@ -37,7 +37,7 @@ fn imports_from_empty() {
#[test]
fn imports_good_block() {
let dir = RandomTempPath::new();
let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let good_block = get_good_dummy_block();
if let Err(_) = client.import_block(good_block) {
panic!("error importing block being good by definition");
@ -52,7 +52,7 @@ fn imports_good_block() {
#[test]
fn query_none_block() {
let dir = RandomTempPath::new();
let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let non_existant = client.block_header(BlockId::Number(188));
assert!(non_existant.is_none());
@ -104,5 +104,5 @@ fn can_collect_garbage() {
let client_result = generate_dummy_client(100);
let client = client_result.reference();
client.tick();
assert!(client.cache_info().blocks < 100 * 1024);
assert!(client.blockchain_cache_info().blocks < 100 * 1024);
}

View File

@ -14,10 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use client::{BlockChainClient, Client};
use client::{BlockChainClient, Client, ClientConfig};
use common::*;
use spec::*;
use blockchain::{BlockChain};
use blockchain::{BlockChain, BlockChainConfig};
use state::*;
use evm::{Schedule, Factory};
use engine::*;
@ -134,7 +134,7 @@ pub fn create_test_block_with_data(header: &Header, transactions: &[&SignedTrans
pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>> {
let dir = RandomTempPath::new();
let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let test_spec = get_test_spec();
let test_engine = test_spec.to_engine().unwrap();
let state_root = test_engine.spec().genesis_header().state_root;
@ -172,7 +172,7 @@ pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult<Arc<Client>
pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<Client>> {
let dir = RandomTempPath::new();
let client = Client::new(get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
let client = Client::new(ClientConfig::default(), get_test_spec(), dir.as_path(), IoChannel::disconnected()).unwrap();
for block in &blocks {
if let Err(_) = client.import_block(block.clone()) {
panic!("panic importing block which is well-formed");
@ -189,7 +189,7 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> GuardedTempResult<Arc<
pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(&create_unverifiable_block(0, H256::zero()), temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block(block_order, bc.best_block_hash()), vec![]);
}
@ -202,7 +202,7 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult<BlockCh
pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(&create_unverifiable_block(0, H256::zero()), temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
for block_order in 1..block_number {
bc.insert_block(&create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]);
}
@ -215,7 +215,7 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes
pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
let temp = RandomTempPath::new();
let bc = BlockChain::new(&create_unverifiable_block(0, H256::zero()), temp.as_path());
let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), temp.as_path());
GuardedTempResult::<BlockChain> {
_temp: temp,
@ -250,6 +250,25 @@ pub fn get_temp_state_in(path: &Path) -> State {
State::new(journal_db, U256::from(0u8))
}
pub fn get_good_dummy_block_seq(count: usize) -> Vec<Bytes> {
let test_spec = get_test_spec();
let test_engine = test_spec.to_engine().unwrap();
let mut parent = test_engine.spec().genesis_header().hash();
let mut r = Vec::new();
for i in 1 .. count + 1 {
let mut block_header = Header::new();
block_header.gas_limit = decode(test_engine.spec().engine_params.get("minGasLimit").unwrap());
block_header.difficulty = decode(test_engine.spec().engine_params.get("minimumDifficulty").unwrap());
block_header.timestamp = i as u64;
block_header.number = i as u64;
block_header.parent_hash = parent;
block_header.state_root = test_engine.spec().genesis_header().state_root;
parent = block_header.hash();
r.push(create_test_block(&block_header));
}
r
}
pub fn get_good_dummy_block() -> Bytes {
let mut block_header = Header::new();
let test_spec = get_test_spec();

View File

@ -31,6 +31,7 @@ extern crate ctrlc;
extern crate fdlimit;
extern crate daemonize;
extern crate time;
extern crate number_prefix;
#[cfg(feature = "rpc")]
extern crate ethcore_rpc as rpc;
@ -47,10 +48,10 @@ use ethcore::spec::*;
use ethcore::client::*;
use ethcore::service::{ClientService, NetSyncMessage};
use ethcore::ethereum;
use ethcore::blockchain::CacheSize;
use ethsync::EthSync;
use ethsync::{EthSync, SyncConfig};
use docopt::Docopt;
use daemonize::Daemonize;
use number_prefix::{binary_prefix, Standalone, Prefixed};
const USAGE: &'static str = r#"
Parity. Ethereum Client.
@ -78,6 +79,7 @@ Options:
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
--cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144].
--queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800].
-j --jsonrpc Enable the JSON-RPC API sever.
--jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545].
@ -106,6 +108,7 @@ struct Args {
flag_node_key: Option<String>,
flag_cache_pref_size: usize,
flag_cache_max_size: usize,
flag_queue_max_size: usize,
flag_jsonrpc: bool,
flag_jsonrpc_url: String,
flag_jsonrpc_cors: String,
@ -283,14 +286,19 @@ impl Configuration {
let spec = self.spec();
let net_settings = self.net_settings(&spec);
let mut sync_config = SyncConfig::default();
sync_config.network_id = spec.network_id();
// Build client
let mut service = ClientService::start(spec, net_settings, &Path::new(&self.path())).unwrap();
let mut client_config = ClientConfig::default();
client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size;
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
client_config.queue.max_mem_use = self.args.flag_queue_max_size;
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
let client = service.client().clone();
client.configure_cache(self.args.flag_cache_pref_size, self.args.flag_cache_max_size);
// Sync
let sync = EthSync::register(service.network(), client);
let sync = EthSync::register(service.network(), sync_config, client);
// Setup rpc
if self.args.flag_jsonrpc {
@ -331,7 +339,7 @@ fn main() {
struct Informant {
chain_info: RwLock<Option<BlockChainInfo>>,
cache_info: RwLock<Option<CacheSize>>,
cache_info: RwLock<Option<BlockChainCacheSize>>,
report: RwLock<Option<ClientReport>>,
}
@ -346,18 +354,26 @@ impl Default for Informant {
}
impl Informant {
fn format_bytes(b: usize) -> String {
match binary_prefix(b as f64) {
Standalone(bytes) => format!("{} bytes", bytes),
Prefixed(prefix, n) => format!("{:.0} {}B", n, prefix),
}
}
pub fn tick(&self, client: &Client, sync: &EthSync) {
// 5 seconds betwen calls. TODO: calculate this properly.
let dur = 5usize;
let chain_info = client.chain_info();
let queue_info = client.queue_info();
let cache_info = client.cache_info();
let cache_info = client.blockchain_cache_info();
let report = client.report();
let sync_info = sync.status();
if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) {
println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// {} ({}) bl {} ({}) ex ]",
if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) {
println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]",
chain_info.best_block_number,
chain_info.best_block_hash,
(report.blocks_imported - last_report.blocks_imported) / dur,
@ -370,10 +386,9 @@ impl Informant {
queue_info.unverified_queue_size,
queue_info.verified_queue_size,
cache_info.blocks,
cache_info.blocks as isize - last_cache_info.blocks as isize,
cache_info.block_details,
cache_info.block_details as isize - last_cache_info.block_details as isize
Informant::format_bytes(cache_info.total()),
Informant::format_bytes(queue_info.mem_used),
Informant::format_bytes(sync_info.mem_used),
);
}

View File

@ -15,6 +15,7 @@ log = "0.3"
env_logger = "0.3"
time = "0.1.34"
rand = "0.3.13"
heapsize = "0.3"
[features]
default = []

View File

@ -39,7 +39,9 @@ use ethcore::error::*;
use ethcore::block::Block;
use io::SyncIo;
use time;
use std::option::Option;
use super::SyncConfig;
known_heap_size!(0, PeerInfo, Header, HeaderId);
impl ToUsize for BlockNumber {
fn to_usize(&self) -> usize {
@ -80,9 +82,7 @@ const NODE_DATA_PACKET: u8 = 0x0e;
const GET_RECEIPTS_PACKET: u8 = 0x0f;
const RECEIPTS_PACKET: u8 = 0x10;
const NETWORK_ID: U256 = ONE_U256; //TODO: get this from parent
const CONNECTION_TIMEOUT_SEC: f64 = 10f64;
const CONNECTION_TIMEOUT_SEC: f64 = 5f64;
struct Header {
/// Header data
@ -135,6 +135,8 @@ pub struct SyncStatus {
pub num_peers: usize,
/// Total number of active peers
pub num_active_peers: usize,
/// Heap memory used in bytes
pub mem_used: usize,
}
#[derive(PartialEq, Eq, Debug, Clone)]
@ -203,13 +205,17 @@ pub struct ChainSync {
have_common_block: bool,
/// Last propagated block number
last_send_block_number: BlockNumber,
/// Max blocks to download ahead
max_download_ahead_blocks: usize,
/// Network ID
network_id: U256,
}
type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
impl ChainSync {
/// Create a new instance of syncing strategy.
pub fn new() -> ChainSync {
pub fn new(config: SyncConfig) -> ChainSync {
ChainSync {
state: SyncState::NotSynced,
starting_block: 0,
@ -226,6 +232,8 @@ impl ChainSync {
syncing_difficulty: U256::from(0u64),
have_common_block: false,
last_send_block_number: 0,
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
network_id: config.network_id,
}
}
@ -241,6 +249,15 @@ impl ChainSync {
blocks_total: match self.highest_block { None => 0, Some(x) => x - self.starting_block },
num_peers: self.peers.len(),
num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(),
mem_used:
// TODO: https://github.com/servo/heapsize/pull/50
// self.downloading_hashes.heap_size_of_children()
//+ self.downloading_bodies.heap_size_of_children()
//+ self.downloading_hashes.heap_size_of_children()
self.headers.heap_size_of_children()
+ self.bodies.heap_size_of_children()
+ self.peers.heap_size_of_children()
+ self.header_ids.heap_size_of_children(),
}
}
@ -275,7 +292,6 @@ impl ChainSync {
self.starting_block = 0;
self.highest_block = None;
self.have_common_block = false;
io.chain().clear_queue();
self.starting_block = io.chain().chain_info().best_block_number;
self.state = SyncState::NotSynced;
}
@ -307,7 +323,7 @@ impl ChainSync {
trace!(target: "sync", "Peer {} genesis hash not matched", peer_id);
return Ok(());
}
if peer.network_id != NETWORK_ID {
if peer.network_id != self.network_id {
io.disable_peer(peer_id);
trace!(target: "sync", "Peer {} network id not matched", peer_id);
return Ok(());
@ -436,7 +452,7 @@ impl ChainSync {
trace!(target: "sync", "Got body {}", n);
}
None => {
debug!(target: "sync", "Ignored unknown block body");
trace!(target: "sync", "Ignored unknown/stale block body");
}
}
}
@ -611,7 +627,7 @@ impl ChainSync {
self.request_headers_by_hash(io, peer_id, &peer_latest, 1, 0, false);
}
else if self.state == SyncState::Blocks && io.chain().block_status(BlockId::Hash(peer_latest)) == BlockStatus::Unknown {
self.request_blocks(io, peer_id);
self.request_blocks(io, peer_id, false);
}
}
@ -620,7 +636,7 @@ impl ChainSync {
}
/// Find some headers or blocks to download for a peer.
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId, ignore_others: bool) {
self.clear_peer_download(peer_id);
if io.chain().queue_info().is_full() {
@ -640,28 +656,34 @@ impl ChainSync {
let mut index: BlockNumber = 0;
while index != items.len() as BlockNumber && needed_bodies.len() < MAX_BODIES_TO_REQUEST {
let block = start + index;
if !self.downloading_bodies.contains(&block) && !self.bodies.have_item(&block) {
if ignore_others || (!self.downloading_bodies.contains(&block) && !self.bodies.have_item(&block)) {
needed_bodies.push(items[index as usize].hash.clone());
needed_numbers.push(block);
self.downloading_bodies.insert(block);
}
index += 1;
}
}
}
if !needed_bodies.is_empty() {
let (head, _) = self.headers.range_iter().next().unwrap();
if needed_numbers.first().unwrap() - head > self.max_download_ahead_blocks as BlockNumber {
trace!(target: "sync", "{}: Stalled download ({} vs {}), helping with downloading block bodies", peer_id, needed_numbers.first().unwrap(), head);
self.request_blocks(io, peer_id, true);
return;
}
self.downloading_bodies.extend(needed_numbers.iter());
replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_numbers);
self.request_bodies(io, peer_id, needed_bodies);
}
else {
// check if need to download headers
let mut start = 0usize;
let mut start = 0;
if !self.have_common_block {
// download backwards until common block is found 1 header at a time
let chain_info = io.chain().chain_info();
start = chain_info.best_block_number as usize;
start = chain_info.best_block_number;
if !self.headers.is_empty() {
start = min(start, self.headers.range_iter().next().unwrap().0 as usize - 1);
start = min(start, self.headers.range_iter().next().unwrap().0 - 1);
}
if start == 0 {
self.have_common_block = true; //reached genesis
@ -672,6 +694,7 @@ impl ChainSync {
if self.have_common_block {
let mut headers: Vec<BlockNumber> = Vec::new();
let mut prev = self.current_base_block() + 1;
let head = self.headers.range_iter().next().map(|(h, _)| h);
for (next, ref items) in self.headers.range_iter() {
if !headers.is_empty() {
break;
@ -682,9 +705,8 @@ impl ChainSync {
}
let mut block = prev;
while block < next && headers.len() < MAX_HEADERS_TO_REQUEST {
if !self.downloading_headers.contains(&(block as BlockNumber)) {
if ignore_others || !self.downloading_headers.contains(&(block as BlockNumber)) {
headers.push(block as BlockNumber);
self.downloading_headers.insert(block as BlockNumber);
}
block += 1;
}
@ -692,17 +714,23 @@ impl ChainSync {
}
if !headers.is_empty() {
start = headers[0] as usize;
start = headers[0];
if head.is_some() && start > head.unwrap() && start - head.unwrap() > self.max_download_ahead_blocks as BlockNumber {
trace!(target: "sync", "{}: Stalled download ({} vs {}), helping with downloading headers", peer_id, start, head.unwrap());
self.request_blocks(io, peer_id, true);
return;
}
let count = headers.len();
self.downloading_headers.extend(headers.iter());
replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, headers);
assert!(!self.headers.have_item(&(start as BlockNumber)));
self.request_headers_by_number(io, peer_id, start as BlockNumber, count, 0, false);
assert!(!self.headers.have_item(&start));
self.request_headers_by_number(io, peer_id, start, count, 0, false);
}
}
else {
// continue search for common block
self.downloading_headers.insert(start as BlockNumber);
self.request_headers_by_number(io, peer_id, start as BlockNumber, 1, 0, false);
self.downloading_headers.insert(start);
self.request_headers_by_number(io, peer_id, start, 1, 0, false);
}
}
}
@ -894,7 +922,7 @@ impl ChainSync {
let mut packet = RlpStream::new_list(5);
let chain = io.chain().chain_info();
packet.append(&(PROTOCOL_VERSION as u32));
packet.append(&NETWORK_ID); //TODO: network id
packet.append(&self.network_id);
packet.append(&chain.total_difficulty);
packet.append(&chain.best_block_hash);
packet.append(&chain.genesis_hash);
@ -1220,6 +1248,7 @@ impl ChainSync {
mod tests {
use tests::helpers::*;
use super::*;
use ::SyncConfig;
use util::*;
use super::{PeerInfo, PeerAsking};
use ethcore::header::*;
@ -1333,7 +1362,7 @@ mod tests {
}
fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync {
let mut sync = ChainSync::new();
let mut sync = ChainSync::new(SyncConfig::default());
sync.peers.insert(0,
PeerInfo {
protocol_version: 0,

View File

@ -34,15 +34,15 @@
//! use std::env;
//! use std::sync::Arc;
//! use util::network::{NetworkService, NetworkConfiguration};
//! use ethcore::client::Client;
//! use ethsync::EthSync;
//! use ethcore::client::{Client, ClientConfig};
//! use ethsync::{EthSync, SyncConfig};
//! use ethcore::ethereum;
//!
//! fn main() {
//! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap();
//! let dir = env::temp_dir();
//! let client = Client::new(ethereum::new_frontier(), &dir, service.io().channel()).unwrap();
//! EthSync::register(&mut service, client);
//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap();
//! EthSync::register(&mut service, SyncConfig::default(), client);
//! }
//! ```
@ -54,12 +54,15 @@ extern crate ethcore;
extern crate env_logger;
extern crate time;
extern crate rand;
#[macro_use]
extern crate heapsize;
use std::ops::*;
use std::sync::*;
use ethcore::client::Client;
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId};
use util::TimerToken;
use util::{U256, ONE_U256};
use chain::ChainSync;
use ethcore::service::SyncMessage;
use io::NetSyncIo;
@ -71,6 +74,23 @@ mod range_collection;
#[cfg(test)]
mod tests;
/// Sync configuration
pub struct SyncConfig {
/// Max blocks to download ahead
pub max_download_ahead_blocks: usize,
/// Network ID
pub network_id: U256,
}
impl Default for SyncConfig {
fn default() -> SyncConfig {
SyncConfig {
max_download_ahead_blocks: 20000,
network_id: ONE_U256,
}
}
}
/// Ethereum network protocol handler
pub struct EthSync {
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
@ -83,10 +103,10 @@ pub use self::chain::{SyncStatus, SyncState};
impl EthSync {
/// Creates and register protocol with the network service
pub fn register(service: &mut NetworkService<SyncMessage>, chain: Arc<Client>) -> Arc<EthSync> {
pub fn register(service: &mut NetworkService<SyncMessage>, config: SyncConfig, chain: Arc<Client>) -> Arc<EthSync> {
let sync = Arc::new(EthSync {
chain: chain,
sync: RwLock::new(ChainSync::new()),
sync: RwLock::new(ChainSync::new(config)),
});
service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler");
sync

View File

@ -15,12 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId};
use ethcore::block_queue::BlockQueueInfo;
use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo};
use ethcore::header::{Header as BlockHeader, BlockNumber};
use ethcore::error::*;
use io::SyncIo;
use chain::{ChainSync};
use chain::ChainSync;
use ::SyncConfig;
use ethcore::receipt::Receipt;
use ethcore::transaction::LocalizedTransaction;
use ethcore::filter::Filter;
@ -251,6 +251,9 @@ impl BlockChainClient for TestBlockChainClient {
verified_queue_size: 0,
unverified_queue_size: 0,
verifying_queue_size: 0,
max_queue_size: 0,
max_mem_use: 0,
mem_used: 0,
}
}
@ -340,7 +343,7 @@ impl TestNet {
for _ in 0..n {
net.peers.push(TestPeer {
chain: TestBlockChainClient::new(),
sync: ChainSync::new(),
sync: ChainSync::new(SyncConfig::default()),
queue: VecDeque::new(),
});
}