Merge branch 'master' into rpc-signing-extend

This commit is contained in:
Nikolay Volf 2016-03-05 14:31:39 +03:00
commit 9e5bf94a5a
25 changed files with 525 additions and 177 deletions

View File

@ -13,6 +13,8 @@ matrix:
allow_failures: allow_failures:
- rust: nightly - rust: nightly
include: include:
- rust: stable
env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}"
- rust: beta - rust: beta
env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}"
- rust: nightly - rust: nightly
@ -52,7 +54,7 @@ after_success: |
./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* && ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* &&
[ $TRAVIS_BRANCH = master ] && [ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] && [ $TRAVIS_PULL_REQUEST = false ] &&
[ $TRAVIS_RUST_VERSION = beta ] && [ $TRAVIS_RUST_VERSION = stable ] &&
cargo doc --no-deps --verbose ${KCOV_FEATURES} ${TARGETS} && cargo doc --no-deps --verbose ${KCOV_FEATURES} ${TARGETS} &&
echo '<meta http-equiv=refresh content=0;url=ethcore/index.html>' > target/doc/index.html && echo '<meta http-equiv=refresh content=0;url=ethcore/index.html>' > target/doc/index.html &&
pip install --user ghp-import && pip install --user ghp-import &&

19
Cargo.lock generated
View File

@ -146,6 +146,14 @@ dependencies = [
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "deque"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "docopt" name = "docopt"
version = "0.6.78" version = "0.6.78"
@ -285,6 +293,7 @@ dependencies = [
"heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -655,6 +664,16 @@ dependencies = [
"libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "rayon"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "regex" name = "regex"
version = "0.1.54" version = "0.1.54"

View File

@ -220,8 +220,8 @@ impl<'x> OpenBlock<'x> {
/// NOTE Will check chain constraints and the uncle number but will NOT check /// NOTE Will check chain constraints and the uncle number but will NOT check
/// that the header itself is actually valid. /// that the header itself is actually valid.
pub fn push_uncle(&mut self, valid_uncle_header: Header) -> Result<(), BlockError> { pub fn push_uncle(&mut self, valid_uncle_header: Header) -> Result<(), BlockError> {
if self.block.base.uncles.len() >= self.engine.maximum_uncle_count() { if self.block.base.uncles.len() + 1 > self.engine.maximum_uncle_count() {
return Err(BlockError::TooManyUncles(OutOfBounds{min: None, max: Some(self.engine.maximum_uncle_count()), found: self.block.base.uncles.len()})); return Err(BlockError::TooManyUncles(OutOfBounds{min: None, max: Some(self.engine.maximum_uncle_count()), found: self.block.base.uncles.len() + 1}));
} }
// TODO: check number // TODO: check number
// TODO: check not a direct ancestor (use last_hashes for that) // TODO: check not a direct ancestor (use last_hashes for that)

View File

@ -78,7 +78,7 @@ pub trait BlockProvider {
} }
/// Get a list of uncles for a given block. /// Get a list of uncles for a given block.
/// Returns None if block deos not exist. /// Returns None if block does not exist.
fn uncles(&self, hash: &H256) -> Option<Vec<Header>> { fn uncles(&self, hash: &H256) -> Option<Vec<Header>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncles()) self.block(hash).map(|bytes| BlockView::new(&bytes).uncles())
} }
@ -227,6 +227,24 @@ impl BlockProvider for BlockChain {
const COLLECTION_QUEUE_SIZE: usize = 8; const COLLECTION_QUEUE_SIZE: usize = 8;
pub struct AncestryIter<'a> {
current: H256,
chain: &'a BlockChain,
}
impl<'a> Iterator for AncestryIter<'a> {
type Item = H256;
fn next(&mut self) -> Option<H256> {
if self.current.is_zero() {
Option::None
} else {
let mut n = self.chain.block_details(&self.current).unwrap().parent;
mem::swap(&mut self.current, &mut n);
Some(n)
}
}
}
impl BlockChain { impl BlockChain {
/// Create new instance of blockchain from given Genesis /// Create new instance of blockchain from given Genesis
pub fn new(config: BlockChainConfig, genesis: &[u8], path: &Path) -> BlockChain { pub fn new(config: BlockChainConfig, genesis: &[u8], path: &Path) -> BlockChain {
@ -474,10 +492,35 @@ impl BlockChain {
self.extras_db.write(batch).unwrap(); self.extras_db.write(batch).unwrap();
} }
/// Given a block's `parent`, find every block header which represents a valid uncle. /// Iterator that lists `first` and then all of `first`'s ancestors, by hash.
pub fn find_uncle_headers(&self, _parent: &H256) -> Vec<Header> { pub fn ancestry_iter(&self, first: H256) -> Option<AncestryIter> {
// TODO if self.is_known(&first) {
Vec::new() Some(AncestryIter {
current: first,
chain: &self,
})
} else {
None
}
}
/// Given a block's `parent`, find every block header which represents a valid possible uncle.
pub fn find_uncle_headers(&self, parent: &H256, uncle_generations: usize) -> Option<Vec<Header>> {
if !self.is_known(parent) { return None; }
let mut excluded = HashSet::new();
for a in self.ancestry_iter(parent.clone()).unwrap().take(uncle_generations) {
excluded.extend(self.uncle_hashes(&a).unwrap().into_iter());
excluded.insert(a);
}
let mut ret = Vec::new();
for a in self.ancestry_iter(parent.clone()).unwrap().skip(1).take(uncle_generations) {
ret.extend(self.block_details(&a).unwrap().children.iter()
.filter_map(|h| if excluded.contains(h) { None } else { self.block_header(h) })
);
}
Some(ret)
} }
/// Get inserted block info which is critical to preapre extras updates. /// Get inserted block info which is critical to preapre extras updates.
@ -818,6 +861,66 @@ mod tests {
assert_eq!(bc.block_hash(2), None); assert_eq!(bc.block_hash(2), None);
} }
#[test]
fn check_ancestry_iter() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
let mut block_hashes = vec![genesis_hash.clone()];
for _ in 0..10 {
let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3());
bc.insert_block(&block, vec![]);
}
block_hashes.reverse();
assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).unwrap().collect::<Vec<_>>(), block_hashes)
}
#[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_find_uncles() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let b1b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b1a = canon_chain.generate(&mut finalizer).unwrap();
let b2b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b2a = canon_chain.generate(&mut finalizer).unwrap();
let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b3a = canon_chain.generate(&mut finalizer).unwrap();
let b4b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b4a = canon_chain.generate(&mut finalizer).unwrap();
let b5b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b5a = canon_chain.generate(&mut finalizer).unwrap();
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
bc.insert_block(&b1a, vec![]);
bc.insert_block(&b1b, vec![]);
bc.insert_block(&b2a, vec![]);
bc.insert_block(&b2b, vec![]);
bc.insert_block(&b3a, vec![]);
bc.insert_block(&b3b, vec![]);
bc.insert_block(&b4a, vec![]);
bc.insert_block(&b4b, vec![]);
bc.insert_block(&b5a, vec![]);
bc.insert_block(&b5b, vec![]);
assert_eq!(
[&b4b, &b3b, &b2b].iter().map(|b| BlockView::new(b).header()).collect::<Vec<_>>(),
bc.find_uncle_headers(&BlockView::new(&b4a).header_view().sha3(), 3).unwrap()
);
// TODO: insert block that already includes one of them as an uncle to check it's not allowed.
}
#[test] #[test]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_small_fork() { fn test_small_fork() {

View File

@ -17,6 +17,7 @@
//! Blockchain database client. //! Blockchain database client.
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::atomic::AtomicBool;
use util::*; use util::*;
use util::panics::*; use util::panics::*;
use blockchain::{BlockChain, BlockProvider}; use blockchain::{BlockChain, BlockProvider};
@ -78,12 +79,24 @@ pub enum BlockStatus {
} }
/// Client configuration. Includes configs for all sub-systems. /// Client configuration. Includes configs for all sub-systems.
#[derive(Debug, Default)] #[derive(Debug)]
pub struct ClientConfig { pub struct ClientConfig {
/// Block queue configuration. /// Block queue configuration.
pub queue: BlockQueueConfig, pub queue: BlockQueueConfig,
/// Blockchain configuration. /// Blockchain configuration.
pub blockchain: BlockChainConfig, pub blockchain: BlockChainConfig,
/// Prefer journal rather than archive.
pub prefer_journal: bool,
}
impl Default for ClientConfig {
fn default() -> ClientConfig {
ClientConfig {
queue: Default::default(),
blockchain: Default::default(),
prefer_journal: false,
}
}
} }
/// Information about the blockchain gathered together. /// Information about the blockchain gathered together.
@ -125,6 +138,9 @@ pub trait BlockChainClient : Sync + Send {
/// Get block total difficulty. /// Get block total difficulty.
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>; fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
/// Get address nonce.
fn nonce(&self, address: &Address) -> U256;
/// Get block hash. /// Get block hash.
fn block_hash(&self, id: BlockId) -> Option<H256>; fn block_hash(&self, id: BlockId) -> Option<H256>;
@ -200,6 +216,7 @@ pub struct Client<V = CanonVerifier> where V: Verifier {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
// for sealing... // for sealing...
sealing_enabled: AtomicBool,
sealing_block: Mutex<Option<ClosedBlock>>, sealing_block: Mutex<Option<ClosedBlock>>,
author: RwLock<Address>, author: RwLock<Address>,
extra_data: RwLock<Bytes>, extra_data: RwLock<Bytes>,
@ -219,11 +236,11 @@ impl Client<CanonVerifier> {
impl<V> Client<V> where V: Verifier { impl<V> Client<V> where V: Verifier {
/// Create a new client with given spec and DB path and custom verifier. /// Create a new client with given spec and DB path and custom verifier.
pub fn new_with_verifier(config: ClientConfig, spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> { pub fn new_with_verifier(config: ClientConfig, spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client<V>>, Error> {
let mut dir = path.to_path_buf(); let mut dir = path.to_path_buf();
dir.push(H64::from(spec.genesis_header().hash()).hex()); dir.push(H64::from(spec.genesis_header().hash()).hex());
//TODO: sec/fat: pruned/full versioning //TODO: sec/fat: pruned/full versioning
dir.push(format!("v{}-sec-pruned", CLIENT_DB_VER_STR)); dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" }));
let path = dir.as_path(); let path = dir.as_path();
let gb = spec.genesis_block(); let gb = spec.genesis_block();
let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path))); let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path)));
@ -231,7 +248,7 @@ impl<V> Client<V> where V: Verifier {
state_path.push("state"); state_path.push("state");
let engine = Arc::new(try!(spec.to_engine())); let engine = Arc::new(try!(spec.to_engine()));
let mut state_db = JournalDB::new(state_path.to_str().unwrap()); let mut state_db = JournalDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal);
if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) {
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
} }
@ -251,6 +268,7 @@ impl<V> Client<V> where V: Verifier {
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
panic_handler: panic_handler, panic_handler: panic_handler,
sealing_enabled: AtomicBool::new(false),
sealing_block: Mutex::new(None), sealing_block: Mutex::new(None),
author: RwLock::new(Address::new()), author: RwLock::new(Address::new()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
@ -297,7 +315,7 @@ impl<V> Client<V> where V: Verifier {
} }
// Verify Block Family // Verify Block Family
let verify_family_result = verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref()); let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref());
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
@ -350,18 +368,14 @@ impl<V> Client<V> where V: Verifier {
bad_blocks.insert(header.hash()); bad_blocks.insert(header.hash());
continue; continue;
} }
let closed_block = self.check_and_close_block(&block); let closed_block = self.check_and_close_block(&block);
if let Err(_) = closed_block { if let Err(_) = closed_block {
bad_blocks.insert(header.hash()); bad_blocks.insert(header.hash());
break; break;
} }
// Insert block
let closed_block = closed_block.unwrap();
self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone());
good_blocks.push(header.hash()); good_blocks.push(header.hash());
// Are we committing an era?
let ancient = if header.number() >= HISTORY { let ancient = if header.number() >= HISTORY {
let n = header.number() - HISTORY; let n = header.number() - HISTORY;
let chain = self.chain.read().unwrap(); let chain = self.chain.read().unwrap();
@ -371,10 +385,16 @@ impl<V> Client<V> where V: Verifier {
}; };
// Commit results // Commit results
let closed_block = closed_block.unwrap();
let receipts = closed_block.block().receipts().clone();
closed_block.drain() closed_block.drain()
.commit(header.number(), &header.hash(), ancient) .commit(header.number(), &header.hash(), ancient)
.expect("State DB commit failed."); .expect("State DB commit failed.");
// And update the chain
self.chain.write().unwrap()
.insert_block(&block.bytes, receipts);
self.report.write().unwrap().accrue_block(&block); self.report.write().unwrap().accrue_block(&block);
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
} }
@ -393,12 +413,12 @@ impl<V> Client<V> where V: Verifier {
if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() {
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
good: good_blocks, good: good_blocks,
bad: bad_blocks, retracted: bad_blocks,
})).unwrap(); })).unwrap();
} }
} }
if self.chain_info().best_block_hash != original_best { if self.chain_info().best_block_hash != original_best && self.sealing_enabled.load(atomic::Ordering::Relaxed) {
self.prepare_sealing(); self.prepare_sealing();
} }
@ -481,7 +501,7 @@ impl<V> Client<V> where V: Verifier {
self.extra_data() self.extra_data()
); );
self.chain.read().unwrap().find_uncle_headers(&h).into_iter().foreach(|h| { b.push_uncle(h).unwrap(); }); self.chain.read().unwrap().find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); });
// TODO: push transactions. // TODO: push transactions.
@ -493,6 +513,8 @@ impl<V> Client<V> where V: Verifier {
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
pub fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> { pub fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
if self.sealing_block.lock().unwrap().is_none() { if self.sealing_block.lock().unwrap().is_none() {
self.sealing_enabled.store(true, atomic::Ordering::Relaxed);
// TODO: Above should be on a timer that resets after two blocks have arrived without being asked for.
self.prepare_sealing(); self.prepare_sealing();
} }
&self.sealing_block &self.sealing_block
@ -564,6 +586,10 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
} }
fn nonce(&self, address: &Address) -> U256 {
self.state().nonce(address)
}
fn block_hash(&self, id: BlockId) -> Option<H256> { fn block_hash(&self, id: BlockId) -> Option<H256> {
let chain = self.chain.read().unwrap(); let chain = self.chain.read().unwrap();
Self::block_hash(&chain, id) Self::block_hash(&chain, id)

View File

@ -47,6 +47,8 @@ pub trait Engine : Sync + Send {
fn maximum_extra_data_size(&self) -> usize { decode(&self.spec().engine_params.get("maximumExtraDataSize").unwrap()) } fn maximum_extra_data_size(&self) -> usize { decode(&self.spec().engine_params.get("maximumExtraDataSize").unwrap()) }
/// Maximum number of uncles a block is allowed to declare. /// Maximum number of uncles a block is allowed to declare.
fn maximum_uncle_count(&self) -> usize { 2 } fn maximum_uncle_count(&self) -> usize { 2 }
/// The number of generations back that uncles can be.
fn maximum_uncle_age(&self) -> usize { 6 }
/// The nonce with which accounts begin. /// The nonce with which accounts begin.
fn account_start_nonce(&self) -> U256 { decode(&self.spec().engine_params.get("accountStartNonce").unwrap()) } fn account_start_nonce(&self) -> U256 { decode(&self.spec().engine_params.get("accountStartNonce").unwrap()) }

View File

@ -29,7 +29,7 @@ pub type BlockNumber = u64;
/// which is non-specific. /// which is non-specific.
/// ///
/// Doesn't do all that much on its own. /// Doesn't do all that much on its own.
#[derive(Debug, Clone)] #[derive(Debug, Clone, Eq)]
pub struct Header { pub struct Header {
// TODO: make all private. // TODO: make all private.
/// Parent hash. /// Parent hash.
@ -70,6 +70,25 @@ pub struct Header {
pub bare_hash: RefCell<Option<H256>>, pub bare_hash: RefCell<Option<H256>>,
} }
impl PartialEq for Header {
fn eq(&self, c: &Header) -> bool {
self.parent_hash == c.parent_hash &&
self.timestamp == c.timestamp &&
self.number == c.number &&
self.author == c.author &&
self.transactions_root == c.transactions_root &&
self.uncles_hash == c.uncles_hash &&
self.extra_data == c.extra_data &&
self.state_root == c.state_root &&
self.receipts_root == c.receipts_root &&
self.log_bloom == c.log_bloom &&
self.gas_used == c.gas_used &&
self.gas_limit == c.gas_limit &&
self.difficulty == c.difficulty &&
self.seal == c.seal
}
}
impl Default for Header { impl Default for Header {
fn default() -> Self { fn default() -> Self {
Header { Header {

View File

@ -30,7 +30,7 @@ pub enum SyncMessage {
/// Hashes of blocks imported to blockchain /// Hashes of blocks imported to blockchain
good: Vec<H256>, good: Vec<H256>,
/// Hashes of blocks not imported to blockchain /// Hashes of blocks not imported to blockchain
bad: Vec<H256>, retracted: Vec<H256>,
}, },
/// A block is ready /// A block is ready
BlockVerified, BlockVerified,

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider;
use engine::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::Verifier;
@ -22,6 +24,10 @@ use super::verification;
pub struct CanonVerifier; pub struct CanonVerifier;
impl Verifier for CanonVerifier { impl Verifier for CanonVerifier {
fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error> {
verification::verify_block_family(header, bytes, engine, bc)
}
fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> { fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> {
verification::verify_block_final(expected, got) verification::verify_block_final(expected, got)
} }

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider;
use engine::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
use super::Verifier; use super::Verifier;
@ -21,6 +23,10 @@ use super::Verifier;
pub struct NoopVerifier; pub struct NoopVerifier;
impl Verifier for NoopVerifier { impl Verifier for NoopVerifier {
fn verify_block_family(_header: &Header, _bytes: &[u8], _engine: &Engine, _bc: &BlockProvider) -> Result<(), Error> {
Ok(())
}
fn verify_block_final(_expected: &Header, _got: &Header) -> Result<(), Error> { fn verify_block_final(_expected: &Header, _got: &Header) -> Result<(), Error> {
Ok(()) Ok(())
} }

View File

@ -78,7 +78,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) ->
} }
/// Phase 3 verification. Check block information against parent and uncles. /// Phase 3 verification. Check block information against parent and uncles.
pub fn verify_block_family<BC>(header: &Header, bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error> {
// TODO: verify timestamp // TODO: verify timestamp
let parent = try!(bc.block_header(&header.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash.clone())))); let parent = try!(bc.block_header(&header.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash.clone()))));
try!(verify_parent(&header, &parent)); try!(verify_parent(&header, &parent));
@ -94,7 +94,7 @@ pub fn verify_block_family<BC>(header: &Header, bytes: &[u8], engine: &Engine, b
excluded.insert(header.hash()); excluded.insert(header.hash());
let mut hash = header.parent_hash.clone(); let mut hash = header.parent_hash.clone();
excluded.insert(hash.clone()); excluded.insert(hash.clone());
for _ in 0..6 { for _ in 0..engine.maximum_uncle_age() {
match bc.block_details(&hash) { match bc.block_details(&hash) {
Some(details) => { Some(details) => {
excluded.insert(details.parent.clone()); excluded.insert(details.parent.clone());
@ -121,7 +121,7 @@ pub fn verify_block_family<BC>(header: &Header, bytes: &[u8], engine: &Engine, b
// (8 Invalid) // (8 Invalid)
let depth = if header.number > uncle.number { header.number - uncle.number } else { 0 }; let depth = if header.number > uncle.number { header.number - uncle.number } else { 0 };
if depth > 6 { if depth > engine.maximum_uncle_age() as u64 {
return Err(From::from(BlockError::UncleTooOld(OutOfBounds { min: Some(header.number - depth), max: Some(header.number - 1), found: uncle.number }))); return Err(From::from(BlockError::UncleTooOld(OutOfBounds { min: Some(header.number - depth), max: Some(header.number - 1), found: uncle.number })));
} }
else if depth < 1 { else if depth < 1 {

View File

@ -14,10 +14,13 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use blockchain::BlockProvider;
use engine::Engine;
use error::Error; use error::Error;
use header::Header; use header::Header;
/// Should be used to verify blocks. /// Should be used to verify blocks.
pub trait Verifier: Send + Sync { pub trait Verifier: Send + Sync {
fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error>;
fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error>; fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error>;
} }

View File

@ -65,6 +65,7 @@ Usage:
Options: Options:
--chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file
or frontier, mainnet, morden, or testnet [default: frontier]. or frontier, mainnet, morden, or testnet [default: frontier].
--archive Client should not prune the state/storage trie.
-d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
@ -102,6 +103,7 @@ struct Args {
flag_chain: String, flag_chain: String,
flag_db_path: String, flag_db_path: String,
flag_keys_path: String, flag_keys_path: String,
flag_archive: bool,
flag_no_bootstrap: bool, flag_no_bootstrap: bool,
flag_listen_address: String, flag_listen_address: String,
flag_public_address: Option<String>, flag_public_address: Option<String>,
@ -312,6 +314,7 @@ impl Configuration {
let mut client_config = ClientConfig::default(); let mut client_config = ClientConfig::default();
client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size;
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
client_config.prefer_journal = !self.args.flag_archive;
client_config.queue.max_mem_use = self.args.flag_queue_max_size; client_config.queue.max_mem_use = self.args.flag_queue_max_size;
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
let client = service.client().clone(); let client = service.client().clone();

View File

@ -17,6 +17,7 @@ time = "0.1.34"
rand = "0.3.13" rand = "0.3.13"
heapsize = "0.3" heapsize = "0.3"
rustc-serialize = "0.3" rustc-serialize = "0.3"
rayon = "0.3.1"
[features] [features]
default = [] default = []

View File

@ -30,14 +30,17 @@
/// ///
use util::*; use util::*;
use rayon::prelude::*;
use std::mem::{replace}; use std::mem::{replace};
use ethcore::views::{HeaderView}; use ethcore::views::{HeaderView, BlockView};
use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::header::{BlockNumber, Header as BlockHeader};
use ethcore::client::{BlockChainClient, BlockStatus, BlockId}; use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo};
use range_collection::{RangeCollection, ToUsize, FromUsize}; use range_collection::{RangeCollection, ToUsize, FromUsize};
use ethcore::error::*; use ethcore::error::*;
use ethcore::block::Block; use ethcore::block::Block;
use ethcore::transaction::SignedTransaction;
use io::SyncIo; use io::SyncIo;
use transaction_queue::TransactionQueue;
use time; use time;
use super::SyncConfig; use super::SyncConfig;
@ -209,6 +212,8 @@ pub struct ChainSync {
max_download_ahead_blocks: usize, max_download_ahead_blocks: usize,
/// Network ID /// Network ID
network_id: U256, network_id: U256,
/// Transactions Queue
transaction_queue: Mutex<TransactionQueue>,
} }
type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>; type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
@ -234,6 +239,7 @@ impl ChainSync {
last_send_block_number: 0, last_send_block_number: 0,
max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks),
network_id: config.network_id, network_id: config.network_id,
transaction_queue: Mutex::new(TransactionQueue::new()),
} }
} }
@ -292,6 +298,7 @@ impl ChainSync {
self.starting_block = 0; self.starting_block = 0;
self.highest_block = None; self.highest_block = None;
self.have_common_block = false; self.have_common_block = false;
self.transaction_queue.lock().unwrap().clear();
self.starting_block = io.chain().chain_info().best_block_number; self.starting_block = io.chain().chain_info().best_block_number;
self.state = SyncState::NotSynced; self.state = SyncState::NotSynced;
} }
@ -475,7 +482,7 @@ impl ChainSync {
peer.latest_number = Some(header.number()); peer.latest_number = Some(header.number());
} }
// TODO: Decompose block and add to self.headers and self.bodies instead // TODO: Decompose block and add to self.headers and self.bodies instead
if header.number == From::from(self.current_base_block() + 1) { if header.number <= From::from(self.current_base_block() + 1) {
match io.chain().import_block(block_rlp.as_raw().to_vec()) { match io.chain().import_block(block_rlp.as_raw().to_vec()) {
Err(Error::Import(ImportError::AlreadyInChain)) => { Err(Error::Import(ImportError::AlreadyInChain)) => {
trace!(target: "sync", "New block already in chain {:?}", h); trace!(target: "sync", "New block already in chain {:?}", h);
@ -484,7 +491,10 @@ impl ChainSync {
trace!(target: "sync", "New block already queued {:?}", h); trace!(target: "sync", "New block already queued {:?}", h);
}, },
Ok(_) => { Ok(_) => {
self.last_imported_block = Some(header.number); if self.current_base_block() < header.number {
self.last_imported_block = Some(header.number);
self.remove_downloaded_blocks(header.number);
}
trace!(target: "sync", "New block queued {:?}", h); trace!(target: "sync", "New block queued {:?}", h);
}, },
Err(Error::Block(BlockError::UnknownParent(p))) => { Err(Error::Block(BlockError::UnknownParent(p))) => {
@ -918,8 +928,16 @@ impl ChainSync {
} }
} }
/// Called when peer sends us new transactions /// Called when peer sends us new transactions
fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
Ok(()) let chain = io.chain();
let item_count = r.item_count();
trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count);
let fetch_latest_nonce = |a : &Address| chain.nonce(a);
for i in 0..item_count {
let tx: SignedTransaction = try!(r.val_at(i));
self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce);
}
Ok(())
} }
/// Send Status message /// Send Status message
@ -1156,9 +1174,7 @@ impl ChainSync {
} }
/// returns peer ids that have less blocks than our chain /// returns peer ids that have less blocks than our chain
fn get_lagging_peers(&mut self, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> { fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<(PeerId, BlockNumber)> {
let chain = io.chain();
let chain_info = chain.chain_info();
let latest_hash = chain_info.best_block_hash; let latest_hash = chain_info.best_block_hash;
let latest_number = chain_info.best_block_number; let latest_number = chain_info.best_block_number;
self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)| self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)|
@ -1177,9 +1193,9 @@ impl ChainSync {
} }
/// propagates latest block to lagging peers /// propagates latest block to lagging peers
fn propagate_blocks(&mut self, local_best: &H256, best_number: BlockNumber, io: &mut SyncIo) -> usize { fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize {
let updated_peers = { let updated_peers = {
let lagging_peers = self.get_lagging_peers(io); let lagging_peers = self.get_lagging_peers(chain_info, io);
// sqrt(x)/x scaled to max u32 // sqrt(x)/x scaled to max u32
let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32; let fraction = (self.peers.len() as f64).powf(-0.5).mul(u32::max_value() as f64).round() as u32;
@ -1196,30 +1212,30 @@ impl ChainSync {
for peer_id in updated_peers { for peer_id in updated_peers {
let rlp = ChainSync::create_latest_block_rlp(io.chain()); let rlp = ChainSync::create_latest_block_rlp(io.chain());
self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp); self.send_packet(io, peer_id, NEW_BLOCK_PACKET, rlp);
self.peers.get_mut(&peer_id).unwrap().latest_hash = local_best.clone(); self.peers.get_mut(&peer_id).unwrap().latest_hash = chain_info.best_block_hash.clone();
self.peers.get_mut(&peer_id).unwrap().latest_number = Some(best_number); self.peers.get_mut(&peer_id).unwrap().latest_number = Some(chain_info.best_block_number);
sent = sent + 1; sent = sent + 1;
} }
sent sent
} }
/// propagates new known hashes to all peers /// propagates new known hashes to all peers
fn propagate_new_hashes(&mut self, local_best: &H256, best_number: BlockNumber, io: &mut SyncIo) -> usize { fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo) -> usize {
let updated_peers = self.get_lagging_peers(io); let updated_peers = self.get_lagging_peers(chain_info, io);
let mut sent = 0; let mut sent = 0;
let last_parent = HeaderView::new(&io.chain().block_header(BlockId::Hash(local_best.clone())).unwrap()).parent_hash(); let last_parent = HeaderView::new(&io.chain().block_header(BlockId::Hash(chain_info.best_block_hash.clone())).unwrap()).parent_hash();
for (peer_id, peer_number) in updated_peers { for (peer_id, peer_number) in updated_peers {
let mut peer_best = self.peers.get(&peer_id).unwrap().latest_hash.clone(); let mut peer_best = self.peers.get(&peer_id).unwrap().latest_hash.clone();
if best_number - peer_number > MAX_PEERS_PROPAGATION as BlockNumber { if chain_info.best_block_number - peer_number > MAX_PEERS_PROPAGATION as BlockNumber {
// If we think peer is too far behind just send one latest hash // If we think peer is too far behind just send one latest hash
peer_best = last_parent.clone(); peer_best = last_parent.clone();
} }
sent = sent + match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &local_best) { sent = sent + match ChainSync::create_new_hashes_rlp(io.chain(), &peer_best, &chain_info.best_block_hash) {
Some(rlp) => { Some(rlp) => {
{ {
let peer = self.peers.get_mut(&peer_id).unwrap(); let peer = self.peers.get_mut(&peer_id).unwrap();
peer.latest_hash = local_best.clone(); peer.latest_hash = chain_info.best_block_hash.clone();
peer.latest_number = Some(best_number); peer.latest_number = Some(chain_info.best_block_number);
} }
self.send_packet(io, peer_id, NEW_BLOCK_HASHES_PACKET, rlp); self.send_packet(io, peer_id, NEW_BLOCK_HASHES_PACKET, rlp);
1 1
@ -1239,14 +1255,45 @@ impl ChainSync {
pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) { pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) {
let chain = io.chain().chain_info(); let chain = io.chain().chain_info();
if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let blocks = self.propagate_blocks(&chain.best_block_hash, chain.best_block_number, io); let blocks = self.propagate_blocks(&chain, io);
let hashes = self.propagate_new_hashes(&chain.best_block_hash, chain.best_block_number, io); let hashes = self.propagate_new_hashes(&chain, io);
if blocks != 0 || hashes != 0 { if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
} }
} }
self.last_send_block_number = chain.best_block_number; self.last_send_block_number = chain.best_block_number;
} }
/// called when block is imported to chain, updates transactions queue
pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) {
fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec<SignedTransaction> {
let block = chain
.block(BlockId::Hash(hash.clone()))
// Client should send message after commit to db and inserting to chain.
.expect("Expected in-chain blocks.");
let block = BlockView::new(&block);
block.transactions()
}
let chain = io.chain();
let good = good.par_iter().map(|h| fetch_transactions(chain, h));
let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h));
good.for_each(|txs| {
let mut transaction_queue = self.transaction_queue.lock().unwrap();
let hashes = txs.iter().map(|tx| tx.hash()).collect::<Vec<H256>>();
transaction_queue.remove_all(&hashes, |a| chain.nonce(a));
});
retracted.for_each(|txs| {
// populate sender
for tx in &txs {
let _sender = tx.sender();
}
let mut transaction_queue = self.transaction_queue.lock().unwrap();
transaction_queue.add_all(txs, |a| chain.nonce(a));
});
}
} }
#[cfg(test)] #[cfg(test)]
@ -1387,12 +1434,13 @@ mod tests {
#[test] #[test]
fn finds_lagging_peers() { fn finds_lagging_peers() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10));
let chain_info = client.chain_info();
let io = TestIo::new(&mut client, &mut queue, None); let io = TestIo::new(&mut client, &mut queue, None);
let lagging_peers = sync.get_lagging_peers(&io); let lagging_peers = sync.get_lagging_peers(&chain_info, &io);
assert_eq!(1, lagging_peers.len()) assert_eq!(1, lagging_peers.len())
} }
@ -1400,7 +1448,7 @@ mod tests {
#[test] #[test]
fn calculates_tree_for_lagging_peer() { fn calculates_tree_for_lagging_peer() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(15, false); client.add_blocks(15, EachBlockWith::Uncle);
let start = client.block_hash_delta_minus(4); let start = client.block_hash_delta_minus(4);
let end = client.block_hash_delta_minus(2); let end = client.block_hash_delta_minus(2);
@ -1417,14 +1465,13 @@ mod tests {
#[test] #[test]
fn sends_new_hashes_to_lagging_peer() { fn sends_new_hashes_to_lagging_peer() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let best_hash = client.chain_info().best_block_hash.clone(); let chain_info = client.chain_info();
let best_number = client.chain_info().best_block_number;
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
let peer_count = sync.propagate_new_hashes(&best_hash, best_number, &mut io); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io);
// 1 message should be send // 1 message should be send
assert_eq!(1, io.queue.len()); assert_eq!(1, io.queue.len());
@ -1437,14 +1484,12 @@ mod tests {
#[test] #[test]
fn sends_latest_block_to_lagging_peer() { fn sends_latest_block_to_lagging_peer() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let best_hash = client.chain_info().best_block_hash.clone(); let chain_info = client.chain_info();
let best_number = client.chain_info().best_block_number;
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
let peer_count = sync.propagate_blocks(&chain_info, &mut io);
let peer_count = sync.propagate_blocks(&best_hash, best_number, &mut io);
// 1 message should be send // 1 message should be send
assert_eq!(1, io.queue.len()); assert_eq!(1, io.queue.len());
@ -1457,7 +1502,7 @@ mod tests {
#[test] #[test]
fn handles_peer_new_block_mallformed() { fn handles_peer_new_block_mallformed() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(10, false); client.add_blocks(10, EachBlockWith::Uncle);
let block_data = get_dummy_block(11, client.chain_info().best_block_hash); let block_data = get_dummy_block(11, client.chain_info().best_block_hash);
@ -1475,7 +1520,7 @@ mod tests {
#[test] #[test]
fn handles_peer_new_block() { fn handles_peer_new_block() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(10, false); client.add_blocks(10, EachBlockWith::Uncle);
let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash);
@ -1493,7 +1538,7 @@ mod tests {
#[test] #[test]
fn handles_peer_new_block_empty() { fn handles_peer_new_block_empty() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(10, false); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
@ -1509,7 +1554,7 @@ mod tests {
#[test] #[test]
fn handles_peer_new_hashes() { fn handles_peer_new_hashes() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(10, false); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
@ -1525,7 +1570,7 @@ mod tests {
#[test] #[test]
fn handles_peer_new_hashes_empty() { fn handles_peer_new_hashes_empty() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(10, false); client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
@ -1543,14 +1588,13 @@ mod tests {
#[test] #[test]
fn hashes_rlp_mutually_acceptable() { fn hashes_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let best_hash = client.chain_info().best_block_hash.clone(); let chain_info = client.chain_info();
let best_number = client.chain_info().best_block_number;
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
sync.propagate_new_hashes(&best_hash, best_number, &mut io); sync.propagate_new_hashes(&chain_info, &mut io);
let data = &io.queue[0].data.clone(); let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(&data)); let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(&data));
@ -1562,24 +1606,50 @@ mod tests {
#[test] #[test]
fn block_rlp_mutually_acceptable() { fn block_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let best_hash = client.chain_info().best_block_hash.clone(); let chain_info = client.chain_info();
let best_number = client.chain_info().best_block_number;
let mut io = TestIo::new(&mut client, &mut queue, None); let mut io = TestIo::new(&mut client, &mut queue, None);
sync.propagate_blocks(&best_hash, best_number, &mut io); sync.propagate_blocks(&chain_info, &mut io);
let data = &io.queue[0].data.clone(); let data = &io.queue[0].data.clone();
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(&data)); let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(&data));
assert!(result.is_ok()); assert!(result.is_ok());
} }
#[test]
fn should_add_transactions_to_queue() {
// given
let mut client = TestBlockChainClient::new();
client.add_blocks(98, EachBlockWith::Uncle);
client.add_blocks(1, EachBlockWith::UncleAndTransaction);
client.add_blocks(1, EachBlockWith::Transaction);
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5));
let good_blocks = vec![client.block_hash_delta_minus(2)];
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
let mut queue = VecDeque::new();
let io = TestIo::new(&mut client, &mut queue, None);
// when
sync.chain_new_blocks(&io, &[], &good_blocks);
assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0);
assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1);
sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks);
// then
let status = sync.transaction_queue.lock().unwrap().status();
assert_eq!(status.pending, 1);
assert_eq!(status.future, 0);
}
#[test] #[test]
fn returns_requested_block_headers() { fn returns_requested_block_headers() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let io = TestIo::new(&mut client, &mut queue, None); let io = TestIo::new(&mut client, &mut queue, None);
@ -1603,7 +1673,7 @@ mod tests {
#[test] #[test]
fn returns_requested_block_headers_reverse() { fn returns_requested_block_headers_reverse() {
let mut client = TestBlockChainClient::new(); let mut client = TestBlockChainClient::new();
client.add_blocks(100, false); client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new(); let mut queue = VecDeque::new();
let io = TestIo::new(&mut client, &mut queue, None); let io = TestIo::new(&mut client, &mut queue, None);

View File

@ -54,6 +54,7 @@ extern crate ethcore;
extern crate env_logger; extern crate env_logger;
extern crate time; extern crate time;
extern crate rand; extern crate rand;
extern crate rayon;
#[macro_use] #[macro_use]
extern crate heapsize; extern crate heapsize;
@ -70,8 +71,7 @@ use io::NetSyncIo;
mod chain; mod chain;
mod io; mod io;
mod range_collection; mod range_collection;
// TODO [todr] Made public to suppress dead code warnings mod transaction_queue;
pub mod transaction_queue;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -153,8 +153,14 @@ impl NetworkProtocolHandler<SyncMessage> for EthSync {
} }
fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) { fn message(&self, io: &NetworkContext<SyncMessage>, message: &SyncMessage) {
if let SyncMessage::BlockVerified = *message { match *message {
self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); SyncMessage::BlockVerified => {
self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref()));
},
SyncMessage::NewChainBlocks { ref good, ref retracted } => {
let sync_io = NetSyncIo::new(io, self.chain.deref());
self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted);
}
} }
} }
} }

View File

@ -24,8 +24,8 @@ use super::helpers::*;
fn two_peers() { fn two_peers() {
::env_logger::init().ok(); ::env_logger::init().ok();
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(1).chain.add_blocks(1000, false); net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(1000, false); net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle);
net.sync(); net.sync();
assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some());
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
@ -35,8 +35,8 @@ fn two_peers() {
fn status_after_sync() { fn status_after_sync() {
::env_logger::init().ok(); ::env_logger::init().ok();
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(1).chain.add_blocks(1000, false); net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(1000, false); net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle);
net.sync(); net.sync();
let status = net.peer(0).sync.status(); let status = net.peer(0).sync.status();
assert_eq!(status.state, SyncState::Idle); assert_eq!(status.state, SyncState::Idle);
@ -45,8 +45,8 @@ fn status_after_sync() {
#[test] #[test]
fn takes_few_steps() { fn takes_few_steps() {
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(1).chain.add_blocks(100, false); net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(100, false); net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Uncle);
let total_steps = net.sync(); let total_steps = net.sync();
assert!(total_steps < 7); assert!(total_steps < 7);
} }
@ -56,8 +56,9 @@ fn empty_blocks() {
::env_logger::init().ok(); ::env_logger::init().ok();
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
for n in 0..200 { for n in 0..200 {
net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle };
net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); net.peer_mut(1).chain.add_blocks(5, with.clone());
net.peer_mut(2).chain.add_blocks(5, with);
} }
net.sync(); net.sync();
assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some());
@ -68,14 +69,14 @@ fn empty_blocks() {
fn forked() { fn forked() {
::env_logger::init().ok(); ::env_logger::init().ok();
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(0).chain.add_blocks(300, false); net.peer_mut(0).chain.add_blocks(300, EachBlockWith::Uncle);
net.peer_mut(1).chain.add_blocks(300, false); net.peer_mut(1).chain.add_blocks(300, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(300, false); net.peer_mut(2).chain.add_blocks(300, EachBlockWith::Uncle);
net.peer_mut(0).chain.add_blocks(100, true); //fork net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); //fork
net.peer_mut(1).chain.add_blocks(200, false); net.peer_mut(1).chain.add_blocks(200, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(200, false); net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle);
net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); //fork between 1 and 2
net.peer_mut(2).chain.add_blocks(10, true); net.peer_mut(2).chain.add_blocks(10, EachBlockWith::Nothing);
// peer 1 has the best chain of 601 blocks // peer 1 has the best chain of 601 blocks
let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone();
net.sync(); net.sync();
@ -87,8 +88,8 @@ fn forked() {
#[test] #[test]
fn restart() { fn restart() {
let mut net = TestNet::new(3); let mut net = TestNet::new(3);
net.peer_mut(1).chain.add_blocks(1000, false); net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle);
net.peer_mut(2).chain.add_blocks(1000, false); net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle);
net.sync_steps(8); net.sync_steps(8);
@ -109,8 +110,8 @@ fn status_empty() {
#[test] #[test]
fn status_packet() { fn status_packet() {
let mut net = TestNet::new(2); let mut net = TestNet::new(2);
net.peer_mut(0).chain.add_blocks(100, false); net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle);
net.peer_mut(1).chain.add_blocks(1, false); net.peer_mut(1).chain.add_blocks(1, EachBlockWith::Uncle);
net.start(); net.start();
@ -123,10 +124,10 @@ fn status_packet() {
#[test] #[test]
fn propagate_hashes() { fn propagate_hashes() {
let mut net = TestNet::new(6); let mut net = TestNet::new(6);
net.peer_mut(1).chain.add_blocks(10, false); net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle);
net.sync(); net.sync();
net.peer_mut(0).chain.add_blocks(10, false); net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
net.sync(); net.sync();
net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); //first event just sets the marker
net.trigger_block_verified(0); net.trigger_block_verified(0);
@ -149,10 +150,10 @@ fn propagate_hashes() {
#[test] #[test]
fn propagate_blocks() { fn propagate_blocks() {
let mut net = TestNet::new(2); let mut net = TestNet::new(2);
net.peer_mut(1).chain.add_blocks(10, false); net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle);
net.sync(); net.sync();
net.peer_mut(0).chain.add_blocks(10, false); net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle);
net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); //first event just sets the marker
net.trigger_block_verified(0); net.trigger_block_verified(0);
@ -164,7 +165,7 @@ fn propagate_blocks() {
#[test] #[test]
fn restart_on_malformed_block() { fn restart_on_malformed_block() {
let mut net = TestNet::new(2); let mut net = TestNet::new(2);
net.peer_mut(1).chain.add_blocks(10, false); net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle);
net.peer_mut(1).chain.corrupt_block(6); net.peer_mut(1).chain.corrupt_block(6);
net.sync_steps(10); net.sync_steps(10);

View File

@ -22,7 +22,7 @@ use io::SyncIo;
use chain::ChainSync; use chain::ChainSync;
use ::SyncConfig; use ::SyncConfig;
use ethcore::receipt::Receipt; use ethcore::receipt::Receipt;
use ethcore::transaction::LocalizedTransaction; use ethcore::transaction::{LocalizedTransaction, Transaction, Action};
use ethcore::filter::Filter; use ethcore::filter::Filter;
use ethcore::log_entry::LocalizedLogEntry; use ethcore::log_entry::LocalizedLogEntry;
@ -34,6 +34,14 @@ pub struct TestBlockChainClient {
pub difficulty: RwLock<U256>, pub difficulty: RwLock<U256>,
} }
#[derive(Clone)]
pub enum EachBlockWith {
Nothing,
Uncle,
Transaction,
UncleAndTransaction
}
impl TestBlockChainClient { impl TestBlockChainClient {
pub fn new() -> TestBlockChainClient { pub fn new() -> TestBlockChainClient {
@ -44,30 +52,53 @@ impl TestBlockChainClient {
last_hash: RwLock::new(H256::new()), last_hash: RwLock::new(H256::new()),
difficulty: RwLock::new(From::from(0)), difficulty: RwLock::new(From::from(0)),
}; };
client.add_blocks(1, true); // add genesis block client.add_blocks(1, EachBlockWith::Nothing); // add genesis block
client.genesis_hash = client.last_hash.read().unwrap().clone(); client.genesis_hash = client.last_hash.read().unwrap().clone();
client client
} }
pub fn add_blocks(&mut self, count: usize, empty: bool) { pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) {
let len = self.numbers.read().unwrap().len(); let len = self.numbers.read().unwrap().len();
for n in len..(len + count) { for n in len..(len + count) {
let mut header = BlockHeader::new(); let mut header = BlockHeader::new();
header.difficulty = From::from(n); header.difficulty = From::from(n);
header.parent_hash = self.last_hash.read().unwrap().clone(); header.parent_hash = self.last_hash.read().unwrap().clone();
header.number = n as BlockNumber; header.number = n as BlockNumber;
let mut uncles = RlpStream::new_list(if empty {0} else {1}); let uncles = match with {
if !empty { EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
let mut uncle_header = BlockHeader::new(); let mut uncles = RlpStream::new_list(1);
uncle_header.difficulty = From::from(n); let mut uncle_header = BlockHeader::new();
uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); uncle_header.difficulty = From::from(n);
uncle_header.number = n as BlockNumber; uncle_header.parent_hash = self.last_hash.read().unwrap().clone();
uncles.append(&uncle_header); uncle_header.number = n as BlockNumber;
header.uncles_hash = uncles.as_raw().sha3(); uncles.append(&uncle_header);
} header.uncles_hash = uncles.as_raw().sha3();
uncles
},
_ => RlpStream::new_list(0)
};
let txs = match with {
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
let mut txs = RlpStream::new_list(1);
let keypair = KeyPair::create().unwrap();
let tx = Transaction {
action: Action::Create,
value: U256::from(100),
data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000),
gas_price: U256::one(),
nonce: U256::zero()
};
let signed_tx = tx.sign(&keypair.secret());
txs.append(&signed_tx);
txs.out()
},
_ => rlp::NULL_RLP.to_vec()
};
let mut rlp = RlpStream::new_list(3); let mut rlp = RlpStream::new_list(3);
rlp.append(&header); rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1); rlp.append_raw(&txs, 1);
rlp.append_raw(uncles.as_raw(), 1); rlp.append_raw(uncles.as_raw(), 1);
self.import_block(rlp.as_raw().to_vec()).unwrap(); self.import_block(rlp.as_raw().to_vec()).unwrap();
} }
@ -105,10 +136,14 @@ impl BlockChainClient for TestBlockChainClient {
Some(U256::zero()) Some(U256::zero())
} }
fn block_hash(&self, id: BlockId) -> Option<H256> { fn block_hash(&self, _id: BlockId) -> Option<H256> {
unimplemented!(); unimplemented!();
} }
fn nonce(&self, _address: &Address) -> U256 {
U256::zero()
}
fn code(&self, _address: &Address) -> Option<Bytes> { fn code(&self, _address: &Address) -> Option<Bytes> {
unimplemented!(); unimplemented!();
} }

View File

@ -219,19 +219,19 @@ impl TransactionQueue {
/// Removes all transactions identified by hashes given in slice /// Removes all transactions identified by hashes given in slice
/// ///
/// If gap is introduced marks subsequent transactions as future /// If gap is introduced marks subsequent transactions as future
pub fn remove_all<T>(&mut self, txs: &[H256], fetch_nonce: T) pub fn remove_all<T>(&mut self, transaction_hashes: &[H256], fetch_nonce: T)
where T: Fn(&Address) -> U256 { where T: Fn(&Address) -> U256 {
for tx in txs { for hash in transaction_hashes {
self.remove(&tx, &fetch_nonce); self.remove(&hash, &fetch_nonce);
} }
} }
/// Removes transaction identified by hashes from queue. /// Removes transaction identified by hashes from queue.
/// ///
/// If gap is introduced marks subsequent transactions as future /// If gap is introduced marks subsequent transactions as future
pub fn remove<T>(&mut self, hash: &H256, fetch_nonce: &T) pub fn remove<T>(&mut self, transaction_hash: &H256, fetch_nonce: &T)
where T: Fn(&Address) -> U256 { where T: Fn(&Address) -> U256 {
let transaction = self.by_hash.remove(hash); let transaction = self.by_hash.remove(transaction_hash);
if transaction.is_none() { if transaction.is_none() {
// We don't know this transaction // We don't know this transaction
return; return;
@ -240,7 +240,6 @@ impl TransactionQueue {
let sender = transaction.sender(); let sender = transaction.sender();
let nonce = transaction.nonce(); let nonce = transaction.nonce();
println!("Removing tx: {:?}", transaction.transaction);
// Remove from future // Remove from future
self.future.drop(&sender, &nonce); self.future.drop(&sender, &nonce);
@ -266,7 +265,6 @@ impl TransactionQueue {
// Goes to future or is removed // Goes to future or is removed
let order = self.current.drop(&sender, &k).unwrap(); let order = self.current.drop(&sender, &k).unwrap();
if k >= current_nonce { if k >= current_nonce {
println!("Moving to future: {:?}", order);
self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); self.future.insert(sender.clone(), k, order.update_height(k, current_nonce));
} else { } else {
self.by_hash.remove(&order.hash); self.by_hash.remove(&order.hash);
@ -276,7 +274,7 @@ impl TransactionQueue {
// And now lets check if there is some chain of transactions in future // And now lets check if there is some chain of transactions in future
// that should be placed in current // that should be placed in current
if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce - U256::one(), current_nonce) { if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) {
self.last_nonces.insert(sender, new_current_top); self.last_nonces.insert(sender, new_current_top);
} }
} }
@ -299,9 +297,7 @@ impl TransactionQueue {
self.last_nonces.clear(); self.last_nonces.clear();
} }
fn move_future_txs(&mut self, address: Address, current_nonce: U256, first_nonce: U256) -> Option<U256> { fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) -> Option<U256> {
println!("Moving from future for: {:?} base: {:?}", current_nonce, first_nonce);
let mut current_nonce = current_nonce + U256::one();
{ {
let by_nonce = self.future.by_address.row_mut(&address); let by_nonce = self.future.by_address.row_mut(&address);
if let None = by_nonce { if let None = by_nonce {
@ -312,7 +308,6 @@ impl TransactionQueue {
// remove also from priority and hash // remove also from priority and hash
self.future.by_priority.remove(&order); self.future.by_priority.remove(&order);
// Put to current // Put to current
println!("Moved: {:?}", order);
let order = order.update_height(current_nonce.clone(), first_nonce); let order = order.update_height(current_nonce.clone(), first_nonce);
self.current.insert(address.clone(), current_nonce, order); self.current.insert(address.clone(), current_nonce, order);
current_nonce = current_nonce + U256::one(); current_nonce = current_nonce + U256::one();
@ -333,7 +328,6 @@ impl TransactionQueue {
.cloned() .cloned()
.map_or_else(|| fetch_nonce(&address), |n| n + U256::one()); .map_or_else(|| fetch_nonce(&address), |n| n + U256::one());
println!("Expected next: {:?}, got: {:?}", next_nonce, nonce);
// Check height // Check height
if nonce > next_nonce { if nonce > next_nonce {
let order = TransactionOrder::for_transaction(&tx, next_nonce); let order = TransactionOrder::for_transaction(&tx, next_nonce);
@ -345,6 +339,7 @@ impl TransactionQueue {
return; return;
} else if next_nonce > nonce { } else if next_nonce > nonce {
// Droping transaction // Droping transaction
trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce);
return; return;
} }
@ -356,7 +351,7 @@ impl TransactionQueue {
// Insert to current // Insert to current
self.current.insert(address.clone(), nonce, order); self.current.insert(address.clone(), nonce, order);
// But maybe there are some more items waiting in future? // But maybe there are some more items waiting in future?
let new_last_nonce = self.move_future_txs(address.clone(), nonce, base_nonce); let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce);
self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce));
// Enforce limit // Enforce limit
self.current.enforce_limit(&self.by_hash); self.current.enforce_limit(&self.by_hash);

View File

@ -28,7 +28,7 @@ extern crate ethcore_util;
extern crate rand; extern crate rand;
use test::{Bencher, black_box}; use test::{Bencher, black_box};
use ethcore_util::uint::*; use ethcore_util::numbers::*;
#[bench] #[bench]
fn u256_add(b: &mut Bencher) { fn u256_add(b: &mut Bencher) {

View File

@ -28,7 +28,7 @@ extern crate ethcore_util;
use test::Bencher; use test::Bencher;
use std::str::FromStr; use std::str::FromStr;
use ethcore_util::rlp::*; use ethcore_util::rlp::*;
use ethcore_util::uint::U256; use ethcore_util::numbers::U256;
#[bench] #[bench]
fn bench_stream_u64_value(b: &mut Bencher) { fn bench_stream_u64_value(b: &mut Bencher) {

View File

@ -25,7 +25,10 @@ use kvdb::{Database, DBTransaction, DatabaseConfig};
use std::env; use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics. /// and, possibly, latent-removal semantics.
///
/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves
/// differently:
/// ///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
@ -34,7 +37,7 @@ use std::env;
pub struct JournalDB { pub struct JournalDB {
overlay: MemoryDB, overlay: MemoryDB,
backing: Arc<Database>, backing: Arc<Database>,
counters: Arc<RwLock<HashMap<H256, i32>>>, counters: Option<Arc<RwLock<HashMap<H256, i32>>>>,
} }
impl Clone for JournalDB { impl Clone for JournalDB {
@ -48,10 +51,11 @@ impl Clone for JournalDB {
} }
// all keys must be at least 12 bytes // all keys must be at least 12 bytes
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION: u32 = 3; const DB_VERSION : u32 = 3;
const DB_VERSION_NO_JOURNAL : u32 = 3 + 256;
const PADDING : [u8; 10] = [ 0u8; 10 ]; const PADDING : [u8; 10] = [ 0u8; 10 ];
@ -59,25 +63,38 @@ impl JournalDB {
/// Create a new instance from file /// Create a new instance from file
pub fn new(path: &str) -> JournalDB { pub fn new(path: &str) -> JournalDB {
Self::from_prefs(path, true)
}
/// Create a new instance from file
pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB {
let opts = DatabaseConfig { let opts = DatabaseConfig {
prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix
}; };
let backing = Database::open(&opts, path).unwrap_or_else(|e| { let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e); panic!("Error opening state db: {}", e);
}); });
let with_journal;
if !backing.is_empty() { if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {}, Ok(Some(DB_VERSION)) => { with_journal = true; },
Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; },
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
} }
} else { } else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database");
with_journal = prefer_journal;
} }
let counters = JournalDB::read_counters(&backing);
let counters = if with_journal {
Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing))))
} else {
None
};
JournalDB { JournalDB {
overlay: MemoryDB::new(), overlay: MemoryDB::new(),
backing: Arc::new(backing), backing: Arc::new(backing),
counters: Arc::new(RwLock::new(counters)), counters: counters,
} }
} }
@ -94,9 +111,47 @@ impl JournalDB {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
} }
/// Commit all recent insert operations.
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
let have_counters = self.counters.is_some();
if have_counters {
self.commit_with_counters(now, id, end)
} else {
self.commit_without_counters()
}
}
/// Drain the overlay and place it into a batch for the DB.
fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize {
let mut inserts = 0usize;
let mut deletes = 0usize;
for i in overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc > 0 {
assert!(rc == 1);
batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?");
inserts += 1;
}
if rc < 0 {
assert!(rc == -1);
deletes += 1;
}
}
trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes);
inserts + deletes
}
/// Just commit the overlay into the backing DB.
fn commit_without_counters(&mut self) -> Result<u32, UtilError> {
let batch = DBTransaction::new();
let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch);
try!(self.backing.write(batch));
Ok(ret as u32)
}
/// Commit all recent insert operations and historical removals from the old era /// Commit all recent insert operations and historical removals from the old era
/// to the backing database. /// to the backing database.
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format: // journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
@ -121,21 +176,30 @@ impl JournalDB {
// and the key is safe to delete. // and the key is safe to delete.
// record new commit's details. // record new commit's details.
debug!("commit: #{} ({}), end era: {:?}", now, id, end); trace!("commit: #{} ({}), end era: {:?}", now, id, end);
let mut counters = self.counters.as_ref().unwrap().write().unwrap();
let batch = DBTransaction::new(); let batch = DBTransaction::new();
let mut counters = self.counters.write().unwrap();
{ {
let mut index = 0usize; let mut index = 0usize;
let mut last; let mut last;
while try!(self.backing.get({ while {
let mut r = RlpStream::new_list(3); let record = try!(self.backing.get({
r.append(&now); let mut r = RlpStream::new_list(3);
r.append(&index); r.append(&now);
r.append(&&PADDING[..]); r.append(&index);
last = r.drain(); r.append(&&PADDING[..]);
&last last = r.drain();
})).is_some() { &last
}));
match record {
Some(r) => {
assert!(&Rlp::new(&r).val_at::<H256>(0) != id);
true
},
None => false,
}
} {
index += 1; index += 1;
} }
@ -181,6 +245,7 @@ impl JournalDB {
trace!("Purging nodes inserted in non-canon: {:?}", inserts); trace!("Purging nodes inserted in non-canon: {:?}", inserts);
to_remove.append(&mut inserts); to_remove.append(&mut inserts);
} }
trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::<H256>(0), canon_id, to_remove.len());
try!(batch.delete(&last)); try!(batch.delete(&last));
index += 1; index += 1;
} }
@ -188,33 +253,18 @@ impl JournalDB {
let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>(); let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>();
// Purge removed keys if they are not referenced and not re-inserted in the canon commit // Purge removed keys if they are not referenced and not re-inserted in the canon commit
let mut deletes = 0; let mut deletes = 0;
trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::<Vec<_>>());
for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) {
try!(batch.delete(&h)); try!(batch.delete(&h));
deletes += 1; deletes += 1;
} }
debug!("commit: Delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, deletes); trace!("Total nodes purged: {}", deletes);
} }
// Commit overlay insertions // Commit overlay insertions
let mut ret = 0u32; let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch);
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc > 0 {
assert!(rc == 1);
batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?");
ret += 1;
}
if rc < 0 {
assert!(rc == -1);
ret += 1;
deletes += 1;
}
}
try!(self.backing.write(batch)); try!(self.backing.write(batch));
debug!("commit: Deleted {} nodes", deletes); Ok(ret as u32)
Ok(ret)
} }
@ -262,7 +312,7 @@ impl JournalDB {
era -= 1; era -= 1;
} }
} }
debug!("Recovered {} counters", res.len()); trace!("Recovered {} counters", res.len());
res res
} }
} }

View File

@ -105,7 +105,7 @@ impl SecretStore {
import_path.push(".ethereum"); import_path.push(".ethereum");
import_path.push("keystore"); import_path.push("keystore");
if let Err(e) = geth_import::import_geth_keys(self, &import_path) { if let Err(e) = geth_import::import_geth_keys(self, &import_path) {
warn!(target: "sstore", "Error retrieving geth keys: {:?}", e) trace!(target: "sstore", "Geth key not imported: {:?}", e);
} }
} }

View File

@ -400,7 +400,8 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
// public_endpoint in host info contains local adderss at this point // public_endpoint in host info contains local adderss at this point
let listen_address = self.info.read().unwrap().public_endpoint.address.clone(); let listen_address = self.info.read().unwrap().public_endpoint.address.clone();
let udp_port = self.info.read().unwrap().config.udp_port.unwrap_or(listen_address.port()); let udp_port = self.info.read().unwrap().config.udp_port.unwrap_or(listen_address.port());
let public_endpoint = match self.info.read().unwrap().config.public_address { let public_address = self.info.read().unwrap().config.public_address.clone();
let public_endpoint = match public_address {
None => { None => {
let public_address = select_public_address(listen_address.port()); let public_address = select_public_address(listen_address.port());
let local_endpoint = NodeEndpoint { address: public_address, udp_port: udp_port }; let local_endpoint = NodeEndpoint { address: public_address, udp_port: udp_port };

View File

@ -146,7 +146,7 @@ impl OverlayDB {
}) })
} }
/// Get the refs and value of the given key. /// Put the refs and value of the given key, possibly deleting it from the db.
fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool {
if payload.1 > 0 { if payload.1 > 0 {
let mut s = RlpStream::new_list(2); let mut s = RlpStream::new_list(2);