Merge branch 'master' of github.com:ethcore/parity into rpc
This commit is contained in:
commit
e05fe2e0eb
3
.gitignore
vendored
3
.gitignore
vendored
@ -23,4 +23,5 @@ Cargo.lock
|
||||
|
||||
/json-tests/target/
|
||||
|
||||
|
||||
# jetbrains ide stuff
|
||||
.idea
|
@ -21,6 +21,7 @@ evmjit = { path = "rust-evmjit", optional = true }
|
||||
ethash = { path = "ethash" }
|
||||
num_cpus = "0.2"
|
||||
clippy = "0.0.37"
|
||||
crossbeam = "0.1.5"
|
||||
|
||||
[features]
|
||||
jit = ["evmjit"]
|
||||
|
@ -19,6 +19,16 @@ pub struct BlockQueueInfo {
|
||||
pub unverified_queue_size: usize,
|
||||
/// Number of verified queued blocks pending import
|
||||
pub verified_queue_size: usize,
|
||||
/// Number of blocks being verified
|
||||
pub verifying_queue_size: usize,
|
||||
}
|
||||
|
||||
impl BlockQueueInfo {
|
||||
/// The total size of the queues.
|
||||
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size + self.verifying_queue_size }
|
||||
|
||||
/// The size of the unverified and verifying queues.
|
||||
pub fn incomplete_queue_size(&self) -> usize { self.unverified_queue_size + self.verifying_queue_size }
|
||||
}
|
||||
|
||||
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||
@ -30,6 +40,7 @@ pub struct BlockQueue {
|
||||
verifiers: Vec<JoinHandle<()>>,
|
||||
deleting: Arc<AtomicBool>,
|
||||
ready_signal: Arc<QueueSignal>,
|
||||
empty: Arc<Condvar>,
|
||||
processing: HashSet<H256>
|
||||
}
|
||||
|
||||
@ -74,6 +85,7 @@ impl BlockQueue {
|
||||
let more_to_verify = Arc::new(Condvar::new());
|
||||
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
|
||||
let deleting = Arc::new(AtomicBool::new(false));
|
||||
let empty = Arc::new(Condvar::new());
|
||||
|
||||
let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
|
||||
let thread_count = max(::num_cpus::get(), 3) - 2;
|
||||
@ -82,8 +94,9 @@ impl BlockQueue {
|
||||
let engine = engine.clone();
|
||||
let more_to_verify = more_to_verify.clone();
|
||||
let ready_signal = ready_signal.clone();
|
||||
let empty = empty.clone();
|
||||
let deleting = deleting.clone();
|
||||
verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting))
|
||||
verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty))
|
||||
.expect("Error starting block verification thread"));
|
||||
}
|
||||
BlockQueue {
|
||||
@ -94,13 +107,19 @@ impl BlockQueue {
|
||||
verifiers: verifiers,
|
||||
deleting: deleting.clone(),
|
||||
processing: HashSet::new(),
|
||||
empty: empty.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>) {
|
||||
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
|
||||
while !deleting.load(AtomicOrdering::Relaxed) {
|
||||
{
|
||||
let mut lock = verification.lock().unwrap();
|
||||
|
||||
if lock.unverified.is_empty() && lock.verifying.is_empty() {
|
||||
empty.notify_all();
|
||||
}
|
||||
|
||||
while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Relaxed) {
|
||||
lock = wait.wait(lock).unwrap();
|
||||
}
|
||||
@ -169,6 +188,14 @@ impl BlockQueue {
|
||||
verification.verifying.clear();
|
||||
}
|
||||
|
||||
/// Wait for queue to be empty
|
||||
pub fn flush(&mut self) {
|
||||
let mut verification = self.verification.lock().unwrap();
|
||||
while !verification.unverified.is_empty() && !verification.verifying.is_empty() {
|
||||
verification = self.empty.wait(verification).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a block to the queue.
|
||||
pub fn import_block(&mut self, bytes: Bytes) -> ImportResult {
|
||||
let header = BlockView::new(&bytes).header();
|
||||
@ -242,6 +269,7 @@ impl BlockQueue {
|
||||
full: false,
|
||||
verified_queue_size: verification.verified.len(),
|
||||
unverified_queue_size: verification.unverified.len(),
|
||||
verifying_queue_size: verification.verifying.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -283,13 +283,6 @@ impl BlockChain {
|
||||
bc
|
||||
}
|
||||
|
||||
/// Ensure that the best block does indeed have a state_root in the state DB.
|
||||
/// If it doesn't, then rewind down until we find one that does and delete data to ensure that
|
||||
/// later blocks will be reimported.
|
||||
pub fn ensure_good(&mut self, _state: &JournalDB) {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
/// Returns a tree route between `from` and `to`, which is a tuple of:
|
||||
///
|
||||
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
|
||||
@ -392,7 +385,6 @@ impl BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Inserts the block into backing cache database.
|
||||
/// Expects the block to be valid and already verified.
|
||||
/// If the block is already known, does nothing.
|
||||
|
@ -121,6 +121,7 @@ impl ClientReport {
|
||||
}
|
||||
|
||||
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
||||
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
||||
pub struct Client {
|
||||
chain: Arc<RwLock<BlockChain>>,
|
||||
engine: Arc<Box<Engine>>,
|
||||
@ -140,7 +141,8 @@ impl Client {
|
||||
let mut opts = Options::new();
|
||||
opts.set_max_open_files(256);
|
||||
opts.create_if_missing(true);
|
||||
/*opts.set_use_fsync(false);
|
||||
opts.set_use_fsync(false);
|
||||
/*
|
||||
opts.set_bytes_per_sync(8388608);
|
||||
opts.set_disable_data_sync(false);
|
||||
opts.set_block_cache_size_mb(1024);
|
||||
@ -177,15 +179,16 @@ impl Client {
|
||||
}))
|
||||
}
|
||||
|
||||
/// Flush the block import queue.
|
||||
pub fn flush_queue(&self) {
|
||||
self.block_queue.write().unwrap().flush();
|
||||
}
|
||||
|
||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||
pub fn import_verified_blocks(&self, _io: &IoChannel<NetSyncMessage>) {
|
||||
let mut bad = HashSet::new();
|
||||
let _import_lock = self.import_lock.lock();
|
||||
let blocks = self.block_queue.write().unwrap().drain(128);
|
||||
if blocks.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let blocks = self.block_queue.write().unwrap().drain(128);
|
||||
for block in blocks {
|
||||
if bad.contains(&block.header.parent_hash) {
|
||||
self.block_queue.write().unwrap().mark_as_bad(&block.header.hash());
|
||||
@ -233,6 +236,7 @@ impl Client {
|
||||
}
|
||||
};
|
||||
if let Err(e) = verify_block_final(&header, result.block().header()) {
|
||||
flushln!("Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
|
||||
return;
|
||||
|
@ -64,7 +64,7 @@ impl IntoJit<evmjit::I256> for H256 {
|
||||
for i in 0..self.bytes().len() {
|
||||
let rev = self.bytes().len() - 1 - i;
|
||||
let pos = rev / 8;
|
||||
ret[pos] += (self.bytes()[i] as u64) << (rev % 8) * 8;
|
||||
ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8);
|
||||
}
|
||||
evmjit::I256 { words: ret }
|
||||
}
|
||||
@ -218,9 +218,11 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
match self.ext.call(&call_gas,
|
||||
match self.ext.call(
|
||||
&call_gas,
|
||||
&self.address,
|
||||
&receive_address,
|
||||
&value,
|
||||
Some(value),
|
||||
unsafe { slice::from_raw_parts(in_beg, in_size as usize) },
|
||||
&code_address,
|
||||
unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) {
|
||||
@ -262,7 +264,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> {
|
||||
}
|
||||
|
||||
let bytes_ref: &[u8] = slice::from_raw_parts(beg, size as usize);
|
||||
self.ext.log(topics, bytes_ref.to_vec());
|
||||
self.ext.log(topics, bytes_ref);
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,8 +289,8 @@ impl evm::Evm for JitEvm {
|
||||
assert!(params.gas <= U256::from(i64::max_value() as u64), "evmjit max gas is 2 ^ 63");
|
||||
assert!(params.gas_price <= U256::from(i64::max_value() as u64), "evmjit max gas is 2 ^ 63");
|
||||
|
||||
let call_data = params.data.unwrap_or(vec![]);
|
||||
let code = params.code.unwrap_or(vec![]);
|
||||
let call_data = params.data.unwrap_or_else(Vec::new);
|
||||
let code = params.code.unwrap_or_else(Vec::new);
|
||||
|
||||
let mut data = evmjit::RuntimeDataHandle::new();
|
||||
data.gas = params.gas.low_u64() as i64;
|
||||
@ -303,7 +305,10 @@ impl evm::Evm for JitEvm {
|
||||
data.address = params.address.into_jit();
|
||||
data.caller = params.sender.into_jit();
|
||||
data.origin = params.origin.into_jit();
|
||||
data.call_value = params.value.into_jit();
|
||||
data.call_value = match params.value {
|
||||
ActionValue::Transfer(val) => val.into_jit(),
|
||||
ActionValue::Apparent(val) => val.into_jit()
|
||||
};
|
||||
|
||||
data.author = ext.env_info().author.clone().into_jit();
|
||||
data.difficulty = ext.env_info().difficulty.into_jit();
|
||||
|
@ -2,67 +2,67 @@
|
||||
|
||||
/// Definition of the cost schedule and other parameterisations for the EVM.
|
||||
pub struct Schedule {
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Does it support exceptional failed code deposit
|
||||
pub exceptional_failed_code_deposit: bool,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Does it have a delegate cal
|
||||
pub have_delegate_call: bool,
|
||||
/// TODO [Tomusdrw] Please document me
|
||||
/// VM stack limit
|
||||
pub stack_limit: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Max number of nested calls/creates
|
||||
pub max_depth: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas prices for instructions in all tiers
|
||||
pub tier_step_gas: [usize; 8],
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `EXP` opcode
|
||||
pub exp_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas for `EXP` opcode for each byte of exponent
|
||||
pub exp_byte_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `SHA3` opcode
|
||||
pub sha3_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas for `SHA3` opcode for each word of hashed memory
|
||||
pub sha3_word_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for loading from storage
|
||||
pub sload_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for setting new value to storage (`storage==0`, `new!=0`)
|
||||
pub sstore_set_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for altering value in storage
|
||||
pub sstore_reset_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas refund for `SSTORE` clearing (when `storage!=0`, `new==0`)
|
||||
pub sstore_refund_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `JUMPDEST` opcode
|
||||
pub jumpdest_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `LOG*`
|
||||
pub log_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas for data in `LOG*`
|
||||
pub log_data_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas for each topic in `LOG*`
|
||||
pub log_topic_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `CREATE` opcode
|
||||
pub create_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for `*CALL*` opcodes
|
||||
pub call_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Stipend for transfer for `CALL|CALLCODE` opcode when `value>0`
|
||||
pub call_stipend: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas required for value transfer (`CALL|CALLCODE`)
|
||||
pub call_value_transfer_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional gas for creating new account (`CALL|CALLCODE`)
|
||||
pub call_new_account_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Refund for SUICIDE
|
||||
pub suicide_refund_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas for used memory
|
||||
pub memory_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Coefficient used to convert memory size to gas price for memory
|
||||
pub quad_coeff_div: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Cost for contract length when executing `CREATE`
|
||||
pub create_data_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Transaction cost
|
||||
pub tx_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// `CREATE` transaction cost
|
||||
pub tx_create_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Additional cost for empty data transaction
|
||||
pub tx_data_zero_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Aditional cost for non-empty data transaction
|
||||
pub tx_data_non_zero_gas: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
/// Gas price for copying memory
|
||||
pub copy_gas: usize,
|
||||
}
|
||||
|
||||
|
@ -215,6 +215,7 @@ fn test_origin(factory: super::Factory) {
|
||||
assert_eq!(ext.store.get(&H256::new()).unwrap(), &H256::from_str("000000000000000000000000cd1722f2947def4cf144679da39c4c32bdc35681").unwrap());
|
||||
}
|
||||
|
||||
// TODO [todr] Fails with Signal 11 on JIT
|
||||
evm_test!{test_sender: test_sender_jit, test_sender_int}
|
||||
fn test_sender(factory: super::Factory) {
|
||||
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||
|
@ -5,6 +5,12 @@ use engine::*;
|
||||
use evm::{self, Ext};
|
||||
use externalities::*;
|
||||
use substate::*;
|
||||
use crossbeam;
|
||||
|
||||
/// Max depth to avoid stack overflow (when it's reached we start a new thread with VM)
|
||||
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
|
||||
/// Maybe something like here: https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp
|
||||
const MAX_VM_DEPTH_FOR_THREAD: usize = 128;
|
||||
|
||||
/// Returns new address created from address and given nonce.
|
||||
pub fn contract_address(address: &Address, nonce: &U256) -> Address {
|
||||
@ -161,6 +167,27 @@ impl<'a> Executive<'a> {
|
||||
Ok(try!(self.finalize(t, substate, res)))
|
||||
}
|
||||
|
||||
fn exec_vm(&mut self, params: ActionParams, unconfirmed_substate: &mut Substate, output_policy: OutputPolicy) -> evm::Result {
|
||||
// Ordinary execution - keep VM in same thread
|
||||
if (self.depth + 1) % MAX_VM_DEPTH_FOR_THREAD != 0 {
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy);
|
||||
let vm_factory = self.engine.vm_factory();
|
||||
return vm_factory.create().exec(params, &mut ext);
|
||||
}
|
||||
|
||||
// Start in new thread to reset stack
|
||||
// TODO [todr] No thread builder yet, so we need to reset once for a while
|
||||
// https://github.com/aturon/crossbeam/issues/16
|
||||
crossbeam::scope(|scope| {
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy);
|
||||
let vm_factory = self.engine.vm_factory();
|
||||
|
||||
scope.spawn(move || {
|
||||
vm_factory.create().exec(params, &mut ext)
|
||||
})
|
||||
}).join()
|
||||
}
|
||||
|
||||
/// Calls contract function with given contract params.
|
||||
/// NOTE. It does not finalize the transaction (doesn't do refunds, nor suicides).
|
||||
/// Modifies the substate and the output.
|
||||
@ -200,8 +227,7 @@ impl<'a> Executive<'a> {
|
||||
let mut unconfirmed_substate = Substate::new();
|
||||
|
||||
let res = {
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::Return(output));
|
||||
self.engine.vm_factory().create().exec(params, &mut ext)
|
||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output))
|
||||
};
|
||||
|
||||
trace!("exec: sstore-clears={}\n", unconfirmed_substate.sstore_clears_count);
|
||||
@ -234,8 +260,7 @@ impl<'a> Executive<'a> {
|
||||
}
|
||||
|
||||
let res = {
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), &mut unconfirmed_substate, OutputPolicy::InitContract);
|
||||
self.engine.vm_factory().create().exec(params, &mut ext)
|
||||
self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract)
|
||||
};
|
||||
self.enact_result(&res, substate, unconfirmed_substate, backup);
|
||||
res
|
||||
@ -276,7 +301,6 @@ impl<'a> Executive<'a> {
|
||||
|
||||
match result {
|
||||
Err(evm::Error::Internal) => Err(ExecutionError::Internal),
|
||||
// TODO [ToDr] BadJumpDestination @debris - how to handle that?
|
||||
Err(_) => {
|
||||
Ok(Executed {
|
||||
gas: t.gas,
|
||||
@ -301,7 +325,6 @@ impl<'a> Executive<'a> {
|
||||
}
|
||||
|
||||
fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate, backup: State) {
|
||||
// TODO: handle other evm::Errors same as OutOfGas once they are implemented
|
||||
match *result {
|
||||
Err(evm::Error::OutOfGas)
|
||||
| Err(evm::Error::BadJumpDestination {..})
|
||||
|
@ -90,6 +90,7 @@ extern crate num_cpus;
|
||||
extern crate evmjit;
|
||||
#[macro_use]
|
||||
extern crate ethcore_util as util;
|
||||
extern crate crossbeam;
|
||||
|
||||
// NOTE: Add doc parser exception for these pub declarations.
|
||||
|
||||
|
@ -1,17 +1,25 @@
|
||||
use util::*;
|
||||
use pod_account::*;
|
||||
|
||||
#[derive(Debug,Clone,PartialEq,Eq)]
|
||||
#[derive(Debug,Clone,PartialEq,Eq,Default)]
|
||||
/// TODO [Gav Wood] Please document me
|
||||
pub struct PodState (BTreeMap<Address, PodAccount>);
|
||||
|
||||
impl PodState {
|
||||
/// Contruct a new object from the `m`.
|
||||
pub fn new(m: BTreeMap<Address, PodAccount>) -> PodState { PodState(m) }
|
||||
pub fn new() -> PodState { Default::default() }
|
||||
|
||||
/// Contruct a new object from the `m`.
|
||||
pub fn from(m: BTreeMap<Address, PodAccount>) -> PodState { PodState(m) }
|
||||
|
||||
/// Get the underlying map.
|
||||
pub fn get(&self) -> &BTreeMap<Address, PodAccount> { &self.0 }
|
||||
|
||||
/// Get the root hash of the trie of the RLP of this.
|
||||
pub fn root(&self) -> H256 {
|
||||
sec_trie_root(self.0.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect())
|
||||
}
|
||||
|
||||
/// Drain object to get the underlying map.
|
||||
pub fn drain(self) -> BTreeMap<Address, PodAccount> { self.0 }
|
||||
}
|
||||
|
84
src/spec.rs
84
src/spec.rs
@ -1,6 +1,7 @@
|
||||
use common::*;
|
||||
use flate2::read::GzDecoder;
|
||||
use engine::*;
|
||||
use pod_state::*;
|
||||
use null_engine::*;
|
||||
|
||||
/// Converts file from base64 gzipped bytes to json
|
||||
@ -40,28 +41,6 @@ fn json_to_rlp_map(json: &Json) -> HashMap<String, Bytes> {
|
||||
})
|
||||
}
|
||||
|
||||
//TODO: add code and data
|
||||
#[derive(Debug)]
|
||||
/// Genesis account data. Does no thave a DB overlay cache
|
||||
pub struct GenesisAccount {
|
||||
// Balance of the account.
|
||||
balance: U256,
|
||||
// Nonce of the account.
|
||||
nonce: U256,
|
||||
}
|
||||
|
||||
impl GenesisAccount {
|
||||
/// TODO [arkpar] Please document me
|
||||
pub fn rlp(&self) -> Bytes {
|
||||
let mut stream = RlpStream::new_list(4);
|
||||
stream.append(&self.nonce);
|
||||
stream.append(&self.balance);
|
||||
stream.append(&SHA3_NULL_RLP);
|
||||
stream.append(&SHA3_EMPTY);
|
||||
stream.out()
|
||||
}
|
||||
}
|
||||
|
||||
/// Parameters for a block chain; includes both those intrinsic to the design of the
|
||||
/// chain and those to be interpreted by the active chain engine.
|
||||
#[derive(Debug)]
|
||||
@ -83,7 +62,7 @@ pub struct Spec {
|
||||
|
||||
// Builtin-contracts are here for now but would like to abstract into Engine API eventually.
|
||||
/// TODO [Gav Wood] Please document me
|
||||
pub builtins: HashMap<Address, Builtin>,
|
||||
pub builtins: BTreeMap<Address, Builtin>,
|
||||
|
||||
// Genesis params.
|
||||
/// TODO [Gav Wood] Please document me
|
||||
@ -101,7 +80,7 @@ pub struct Spec {
|
||||
/// TODO [arkpar] Please document me
|
||||
pub extra_data: Bytes,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
pub genesis_state: HashMap<Address, GenesisAccount>,
|
||||
genesis_state: PodState,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
pub seal_fields: usize,
|
||||
/// TODO [Gav Wood] Please document me
|
||||
@ -126,7 +105,7 @@ impl Spec {
|
||||
/// Return the state root for the genesis state, memoising accordingly.
|
||||
pub fn state_root(&self) -> H256 {
|
||||
if self.state_root_memo.read().unwrap().is_none() {
|
||||
*self.state_root_memo.write().unwrap() = Some(sec_trie_root(self.genesis_state.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect()));
|
||||
*self.state_root_memo.write().unwrap() = Some(self.genesis_state.root());
|
||||
}
|
||||
self.state_root_memo.read().unwrap().as_ref().unwrap().clone()
|
||||
}
|
||||
@ -174,6 +153,46 @@ impl Spec {
|
||||
ret.append_raw(&empty_list, 1);
|
||||
ret.out()
|
||||
}
|
||||
|
||||
/// Overwrite the genesis components with the given JSON, assuming standard Ethereum test format.
|
||||
pub fn overwrite_genesis(&mut self, genesis: &Json) {
|
||||
let (seal_fields, seal_rlp) = {
|
||||
if genesis.find("mixHash").is_some() && genesis.find("nonce").is_some() {
|
||||
let mut s = RlpStream::new();
|
||||
s.append(&H256::from_json(&genesis["mixHash"]));
|
||||
s.append(&H64::from_json(&genesis["nonce"]));
|
||||
(2, s.out())
|
||||
} else {
|
||||
// backup algo that will work with sealFields/sealRlp (and without).
|
||||
(
|
||||
u64::from_json(&genesis["sealFields"]) as usize,
|
||||
Bytes::from_json(&genesis["sealRlp"])
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
self.parent_hash = H256::from_json(&genesis["parentHash"]);
|
||||
self.author = Address::from_json(&genesis["coinbase"]);
|
||||
self.difficulty = U256::from_json(&genesis["difficulty"]);
|
||||
self.gas_limit = U256::from_json(&genesis["gasLimit"]);
|
||||
self.gas_used = U256::from_json(&genesis["gasUsed"]);
|
||||
self.timestamp = u64::from_json(&genesis["timestamp"]);
|
||||
self.extra_data = Bytes::from_json(&genesis["extraData"]);
|
||||
self.seal_fields = seal_fields;
|
||||
self.seal_rlp = seal_rlp;
|
||||
self.state_root_memo = RwLock::new(genesis.find("stateRoot").and_then(|_| Some(H256::from_json(&genesis["stateRoot"]))));
|
||||
}
|
||||
|
||||
/// Alter the value of the genesis state.
|
||||
pub fn set_genesis_state(&mut self, s: PodState) {
|
||||
self.genesis_state = s;
|
||||
*self.state_root_memo.write().unwrap() = None;
|
||||
}
|
||||
|
||||
/// Returns `false` if the memoized state root is invalid. `true` otherwise.
|
||||
pub fn is_state_root_valid(&self) -> bool {
|
||||
self.state_root_memo.read().unwrap().clone().map_or(true, |sr| sr == self.genesis_state.root())
|
||||
}
|
||||
}
|
||||
|
||||
impl FromJson for Spec {
|
||||
@ -181,8 +200,8 @@ impl FromJson for Spec {
|
||||
fn from_json(json: &Json) -> Spec {
|
||||
// once we commit ourselves to some json parsing library (serde?)
|
||||
// move it to proper data structure
|
||||
let mut state = HashMap::new();
|
||||
let mut builtins = HashMap::new();
|
||||
let mut builtins = BTreeMap::new();
|
||||
let mut state = PodState::new();
|
||||
|
||||
if let Some(&Json::Object(ref accounts)) = json.find("accounts") {
|
||||
for (address, acc) in accounts.iter() {
|
||||
@ -192,15 +211,8 @@ impl FromJson for Spec {
|
||||
builtins.insert(addr.clone(), builtin);
|
||||
}
|
||||
}
|
||||
let balance = acc.find("balance").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None });
|
||||
let nonce = acc.find("nonce").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None });
|
||||
// let balance = if let Some(&Json::String(ref b)) = acc.find("balance") {U256::from_dec_str(b).unwrap_or(U256::from(0))} else {U256::from(0)};
|
||||
// let nonce = if let Some(&Json::String(ref n)) = acc.find("nonce") {U256::from_dec_str(n).unwrap_or(U256::from(0))} else {U256::from(0)};
|
||||
// TODO: handle code & data if they exist.
|
||||
if balance.is_some() || nonce.is_some() {
|
||||
state.insert(addr, GenesisAccount { balance: balance.unwrap_or_else(U256::zero), nonce: nonce.unwrap_or_else(U256::zero) });
|
||||
}
|
||||
}
|
||||
state = xjson!(&json["accounts"]);
|
||||
}
|
||||
|
||||
let nodes = if let Some(&Json::Array(ref ns)) = json.find("nodes") {
|
||||
@ -253,7 +265,7 @@ impl Spec {
|
||||
let mut root = H256::new();
|
||||
{
|
||||
let mut t = SecTrieDBMut::new(db, &mut root);
|
||||
for (address, account) in &self.genesis_state {
|
||||
for (address, account) in self.genesis_state.get().iter() {
|
||||
t.insert(address.as_slice(), &account.rlp());
|
||||
}
|
||||
}
|
||||
|
12
src/state.rs
12
src/state.rs
@ -3,7 +3,7 @@ use engine::Engine;
|
||||
use executive::Executive;
|
||||
use pod_account::*;
|
||||
use pod_state::*;
|
||||
use state_diff::*;
|
||||
//use state_diff::*; // TODO: uncomment once to_pod() works correctly.
|
||||
|
||||
/// TODO [Gav Wood] Please document me
|
||||
pub type ApplyResult = Result<Receipt, Error>;
|
||||
@ -145,16 +145,16 @@ impl State {
|
||||
/// Execute a given transaction.
|
||||
/// This will change the state accordingly.
|
||||
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &Transaction) -> ApplyResult {
|
||||
|
||||
let old = self.to_pod();
|
||||
// let old = self.to_pod();
|
||||
|
||||
let e = try!(Executive::new(self, env_info, engine).transact(t));
|
||||
//println!("Executed: {:?}", e);
|
||||
|
||||
trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod()));
|
||||
// TODO uncomment once to_pod() works correctly.
|
||||
// trace!("Applied transaction. Diff:\n{}\n", StateDiff::diff_pod(&old, &self.to_pod()));
|
||||
self.commit();
|
||||
let receipt = Receipt::new(self.root().clone(), e.cumulative_gas_used, e.logs);
|
||||
trace!("Transaction receipt: {:?}", receipt);
|
||||
// trace!("Transaction receipt: {:?}", receipt);
|
||||
Ok(receipt)
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ impl State {
|
||||
/// Populate a PodAccount map from this state.
|
||||
pub fn to_pod(&self) -> PodState {
|
||||
// TODO: handle database rather than just the cache.
|
||||
PodState::new(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
|
||||
PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
|
||||
if let Some(ref acc) = *opt {
|
||||
m.insert(add.clone(), PodAccount::from_account(acc));
|
||||
}
|
||||
|
@ -32,8 +32,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn create_delete() {
|
||||
let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
|
||||
assert_eq!(StateDiff::diff_pod(&a, &PodState::new(map![])), StateDiff(map![
|
||||
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
|
||||
assert_eq!(StateDiff::diff_pod(&a, &PodState::new()), StateDiff(map![
|
||||
x!(1) => AccountDiff{
|
||||
balance: Diff::Died(x!(69)),
|
||||
nonce: Diff::Died(x!(0)),
|
||||
@ -41,7 +41,7 @@ mod test {
|
||||
storage: map![],
|
||||
}
|
||||
]));
|
||||
assert_eq!(StateDiff::diff_pod(&PodState::new(map![]), &a), StateDiff(map![
|
||||
assert_eq!(StateDiff::diff_pod(&PodState::new(), &a), StateDiff(map![
|
||||
x!(1) => AccountDiff{
|
||||
balance: Diff::Born(x!(69)),
|
||||
nonce: Diff::Born(x!(0)),
|
||||
@ -53,8 +53,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn create_delete_with_unchanged() {
|
||||
let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
|
||||
let b = PodState::new(map![
|
||||
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
|
||||
let b = PodState::from(map![
|
||||
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
|
||||
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
|
||||
]);
|
||||
@ -78,11 +78,11 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn change_with_unchanged() {
|
||||
let a = PodState::new(map![
|
||||
let a = PodState::from(map![
|
||||
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
|
||||
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
|
||||
]);
|
||||
let b = PodState::new(map![
|
||||
let b = PodState::from(map![
|
||||
x!(1) => PodAccount::new(x!(69), x!(1), vec![], map![]),
|
||||
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
|
||||
]);
|
||||
|
@ -9,13 +9,13 @@
|
||||
/// extern crate ethcore;
|
||||
/// use std::env;
|
||||
/// use std::sync::Arc;
|
||||
/// use util::network::NetworkService;
|
||||
/// use util::network::{NetworkService, NetworkConfiguration};
|
||||
/// use ethcore::client::Client;
|
||||
/// use ethcore::sync::EthSync;
|
||||
/// use ethcore::ethereum;
|
||||
///
|
||||
/// fn main() {
|
||||
/// let mut service = NetworkService::start().unwrap();
|
||||
/// let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap();
|
||||
/// let dir = env::temp_dir();
|
||||
/// let client = Client::new(ethereum::new_frontier(), &dir, service.io().channel()).unwrap();
|
||||
/// EthSync::register(&mut service, client);
|
||||
|
@ -158,6 +158,7 @@ impl BlockChainClient for TestBlockChainClient {
|
||||
full: false,
|
||||
verified_queue_size: 0,
|
||||
unverified_queue_size: 0,
|
||||
verifying_queue_size: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
64
src/tests/chain.rs
Normal file
64
src/tests/chain.rs
Normal file
@ -0,0 +1,64 @@
|
||||
use std::env;
|
||||
use super::test_common::*;
|
||||
use client::{BlockChainClient,Client};
|
||||
use pod_state::*;
|
||||
use ethereum;
|
||||
|
||||
fn do_json_test(json_data: &[u8]) -> Vec<String> {
|
||||
let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid");
|
||||
let mut failed = Vec::new();
|
||||
|
||||
for (name, test) in json.as_object().unwrap() {
|
||||
let mut fail = false;
|
||||
{
|
||||
let mut fail_unless = |cond: bool| if !cond && !fail {
|
||||
failed.push(name.clone());
|
||||
flush(format!("FAIL\n"));
|
||||
fail = true;
|
||||
true
|
||||
} else {false};
|
||||
|
||||
flush(format!(" - {}...", name));
|
||||
|
||||
let blocks: Vec<Bytes> = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect();
|
||||
let mut spec = ethereum::new_frontier_like_test();
|
||||
spec.set_genesis_state(PodState::from_json(test.find("pre").unwrap()));
|
||||
spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap());
|
||||
assert!(spec.is_state_root_valid());
|
||||
|
||||
let mut dir = env::temp_dir();
|
||||
dir.push(H32::random().hex());
|
||||
{
|
||||
let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap();
|
||||
blocks.into_iter().foreach(|b| {
|
||||
client.import_block(b).unwrap();
|
||||
});
|
||||
client.flush_queue();
|
||||
client.import_verified_blocks(&IoChannel::disconnected());
|
||||
fail_unless(client.chain_info().best_block_hash == H256::from_json(&test["lastblockhash"]));
|
||||
}
|
||||
fs::remove_dir_all(&dir).unwrap();
|
||||
}
|
||||
if !fail {
|
||||
flush(format!("ok\n"));
|
||||
}
|
||||
}
|
||||
println!("!!! {:?} tests from failed.", failed.len());
|
||||
failed
|
||||
}
|
||||
|
||||
declare_test!{ignore => BlockchainTests_bcBlockGasLimitTest, "BlockchainTests/bcBlockGasLimitTest"} // FAILS
|
||||
declare_test!{BlockchainTests_bcForkBlockTest, "BlockchainTests/bcForkBlockTest"}
|
||||
declare_test!{ignore => BlockchainTests_bcForkStressTest, "BlockchainTests/bcForkStressTest"} // FAILS
|
||||
declare_test!{ignore => BlockchainTests_bcForkUncle, "BlockchainTests/bcForkUncle"} // FAILS
|
||||
declare_test!{BlockchainTests_bcGasPricerTest, "BlockchainTests/bcGasPricerTest"}
|
||||
declare_test!{BlockchainTests_bcInvalidHeaderTest, "BlockchainTests/bcInvalidHeaderTest"}
|
||||
declare_test!{ignore => BlockchainTests_bcInvalidRLPTest, "BlockchainTests/bcInvalidRLPTest"} // FAILS
|
||||
declare_test!{ignore => BlockchainTests_bcMultiChainTest, "BlockchainTests/bcMultiChainTest"} // FAILS
|
||||
declare_test!{BlockchainTests_bcRPC_API_Test, "BlockchainTests/bcRPC_API_Test"}
|
||||
declare_test!{ignore => BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"} // FAILS (Suicides, GasUsed)
|
||||
declare_test!{BlockchainTests_bcTotalDifficultyTest, "BlockchainTests/bcTotalDifficultyTest"}
|
||||
declare_test!{ignore => BlockchainTests_bcUncleHeaderValiditiy, "BlockchainTests/bcUncleHeaderValiditiy"} // FAILS
|
||||
declare_test!{ignore => BlockchainTests_bcUncleTest, "BlockchainTests/bcUncleTest"} // FAILS
|
||||
declare_test!{ignore => BlockchainTests_bcValidBlockTest, "BlockchainTests/bcValidBlockTest"} // FAILS
|
||||
declare_test!{ignore => BlockchainTests_bcWalletTest, "BlockchainTests/bcWalletTest"} // FAILS
|
@ -271,8 +271,8 @@ fn do_json_test_for(vm: &VMType, json_data: &[u8]) -> Vec<String> {
|
||||
|
||||
declare_test!{ExecutiveTests_vmArithmeticTest, "VMTests/vmArithmeticTest"}
|
||||
declare_test!{ExecutiveTests_vmBitwiseLogicOperationTest, "VMTests/vmBitwiseLogicOperationTest"}
|
||||
// this one crashes with some vm internal error. Separately they pass.
|
||||
declare_test!{ignore => ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"}
|
||||
declare_test!{ExecutiveTests_vmBlockInfoTest, "VMTests/vmBlockInfoTest"}
|
||||
// TODO [todr] Fails with Signal 11 when using JIT
|
||||
declare_test!{ExecutiveTests_vmEnvironmentalInfoTest, "VMTests/vmEnvironmentalInfoTest"}
|
||||
declare_test!{ExecutiveTests_vmIOandFlowOperationsTest, "VMTests/vmIOandFlowOperationsTest"}
|
||||
declare_test!{heavy => ExecutiveTests_vmInputLimits, "VMTests/vmInputLimits"}
|
||||
|
@ -4,3 +4,4 @@ mod test_common;
|
||||
mod transaction;
|
||||
mod executive;
|
||||
mod state;
|
||||
mod chain;
|
||||
|
@ -73,7 +73,7 @@ fn do_json_test(json_data: &[u8]) -> Vec<String> {
|
||||
|
||||
declare_test!{StateTests_stBlockHashTest, "StateTests/stBlockHashTest"}
|
||||
declare_test!{StateTests_stCallCodes, "StateTests/stCallCodes"}
|
||||
declare_test!{ignore => StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"} //<< Out of stack
|
||||
declare_test!{StateTests_stCallCreateCallCodeTest, "StateTests/stCallCreateCallCodeTest"}
|
||||
declare_test!{StateTests_stDelegatecallTest, "StateTests/stDelegatecallTest"}
|
||||
declare_test!{StateTests_stExample, "StateTests/stExample"}
|
||||
declare_test!{StateTests_stInitCodeTest, "StateTests/stInitCodeTest"}
|
||||
@ -81,12 +81,12 @@ declare_test!{StateTests_stLogTests, "StateTests/stLogTests"}
|
||||
declare_test!{heavy => StateTests_stMemoryStressTest, "StateTests/stMemoryStressTest"}
|
||||
declare_test!{heavy => StateTests_stMemoryTest, "StateTests/stMemoryTest"}
|
||||
declare_test!{StateTests_stPreCompiledContracts, "StateTests/stPreCompiledContracts"}
|
||||
declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"} //<< Too long
|
||||
declare_test!{ignore => StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"} //<< Out of stack
|
||||
declare_test!{heavy => StateTests_stQuadraticComplexityTest, "StateTests/stQuadraticComplexityTest"}
|
||||
declare_test!{StateTests_stRecursiveCreate, "StateTests/stRecursiveCreate"}
|
||||
declare_test!{StateTests_stRefundTest, "StateTests/stRefundTest"}
|
||||
declare_test!{StateTests_stSolidityTest, "StateTests/stSolidityTest"}
|
||||
declare_test!{ignore => StateTests_stSpecialTest, "StateTests/stSpecialTest"} //<< Out of Stack
|
||||
declare_test!{ignore => StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"} //<< Out of stack
|
||||
declare_test!{StateTests_stSpecialTest, "StateTests/stSpecialTest"}
|
||||
declare_test!{StateTests_stSystemOperationsTest, "StateTests/stSystemOperationsTest"}
|
||||
declare_test!{StateTests_stTransactionTest, "StateTests/stTransactionTest"}
|
||||
declare_test!{StateTests_stTransitionTest, "StateTests/stTransitionTest"}
|
||||
declare_test!{StateTests_stWalletTest, "StateTests/stWalletTest"}
|
||||
|
@ -469,6 +469,18 @@ impl<'_> From<&'_ U256> for H256 {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<H256> for U256 {
|
||||
fn from(value: H256) -> U256 {
|
||||
U256::from(value.bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'_> From<&'_ H256> for U256 {
|
||||
fn from(value: &'_ H256) -> U256 {
|
||||
U256::from(value.bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<H256> for Address {
|
||||
fn from(value: H256) -> Address {
|
||||
unsafe {
|
||||
@ -562,6 +574,7 @@ pub static ZERO_H256: H256 = H256([0x00; 32]);
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use hash::*;
|
||||
use uint::*;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
@ -635,5 +648,18 @@ mod tests {
|
||||
// too short.
|
||||
assert_eq!(H64::from(0), H64::from("0x34567890abcdef"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_and_to_u256() {
|
||||
let u: U256 = x!(0x123456789abcdef0u64);
|
||||
let h = H256::from(u);
|
||||
assert_eq!(H256::from(u), H256::from("000000000000000000000000000000000000000000000000123456789abcdef0"));
|
||||
let h_ref = H256::from(&u);
|
||||
assert_eq!(h, h_ref);
|
||||
let r_ref: U256 = From::from(&h);
|
||||
assert_eq!(r_ref, u);
|
||||
let r: U256 = From::from(h);
|
||||
assert_eq!(r, u);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ pub enum DecoderError {
|
||||
RlpListLenWithZeroPrefix,
|
||||
/// TODO [debris] Please document me
|
||||
RlpInvalidIndirection,
|
||||
/// Returned when declared length is inconsistent with data specified after
|
||||
RlpInconsistentLengthAndData
|
||||
}
|
||||
|
||||
impl StdError for DecoderError {
|
||||
|
@ -4,7 +4,7 @@ use self::json_tests::rlp as rlptest;
|
||||
use std::{fmt, cmp};
|
||||
use std::str::FromStr;
|
||||
use rlp;
|
||||
use rlp::{UntrustedRlp, RlpStream, View, Stream};
|
||||
use rlp::{UntrustedRlp, RlpStream, View, Stream, DecoderError};
|
||||
use uint::U256;
|
||||
|
||||
#[test]
|
||||
@ -351,3 +351,56 @@ fn test_decoding_array() {
|
||||
assert_eq!(arr[0], 5);
|
||||
assert_eq!(arr[1], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_data_length_check()
|
||||
{
|
||||
let data = vec![0x84, b'c', b'a', b't'];
|
||||
let rlp = UntrustedRlp::new(&data);
|
||||
|
||||
let as_val: Result<String, DecoderError> = rlp.as_val();
|
||||
assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_long_data_length_check()
|
||||
{
|
||||
let mut data: Vec<u8> = vec![0xb8, 255];
|
||||
for _ in 0..253 {
|
||||
data.push(b'c');
|
||||
}
|
||||
|
||||
let rlp = UntrustedRlp::new(&data);
|
||||
|
||||
let as_val: Result<String, DecoderError> = rlp.as_val();
|
||||
assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_the_exact_long_string()
|
||||
{
|
||||
let mut data: Vec<u8> = vec![0xb8, 255];
|
||||
for _ in 0..255 {
|
||||
data.push(b'c');
|
||||
}
|
||||
|
||||
let rlp = UntrustedRlp::new(&data);
|
||||
|
||||
let as_val: Result<String, DecoderError> = rlp.as_val();
|
||||
assert!(as_val.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rlp_2bytes_data_length_check()
|
||||
{
|
||||
let mut data: Vec<u8> = vec![0xb9, 2, 255]; // 512+255
|
||||
for _ in 0..700 {
|
||||
data.push(b'c');
|
||||
}
|
||||
|
||||
let rlp = UntrustedRlp::new(&data);
|
||||
|
||||
let as_val: Result<String, DecoderError> = rlp.as_val();
|
||||
assert_eq!(Err(DecoderError::RlpInconsistentLengthAndData), as_val);
|
||||
}
|
||||
|
||||
|
@ -331,18 +331,31 @@ impl<'a> Decoder for BasicDecoder<'a> {
|
||||
Some(l @ 0...0x7f) => Ok(try!(f(&[l]))),
|
||||
// 0-55 bytes
|
||||
Some(l @ 0x80...0xb7) => {
|
||||
let d = &bytes[1..(1 + l as usize - 0x80)];
|
||||
let last_index_of = 1 + l as usize - 0x80;
|
||||
if bytes.len() < last_index_of {
|
||||
return Err(DecoderError::RlpInconsistentLengthAndData);
|
||||
}
|
||||
let d = &bytes[1..last_index_of];
|
||||
if l == 0x81 && d[0] < 0x80 {
|
||||
return Err(DecoderError::RlpInvalidIndirection);
|
||||
}
|
||||
|
||||
Ok(try!(f(d)))
|
||||
},
|
||||
// longer than 55 bytes
|
||||
Some(l @ 0xb8...0xbf) => {
|
||||
let len_of_len = l as usize - 0xb7;
|
||||
let begin_of_value = 1 as usize + len_of_len;
|
||||
if bytes.len() < begin_of_value {
|
||||
return Err(DecoderError::RlpInconsistentLengthAndData);
|
||||
}
|
||||
let len = try!(usize::from_bytes(&bytes[1..begin_of_value]));
|
||||
Ok(try!(f(&bytes[begin_of_value..begin_of_value + len])))
|
||||
|
||||
let last_index_of_value = begin_of_value + len;
|
||||
if bytes.len() < last_index_of_value {
|
||||
return Err(DecoderError::RlpInconsistentLengthAndData);
|
||||
}
|
||||
Ok(try!(f(&bytes[begin_of_value..last_index_of_value])))
|
||||
}
|
||||
// we are reading value, not a list!
|
||||
_ => Err(DecoderError::RlpExpectedToBeData)
|
||||
|
@ -1,13 +1,14 @@
|
||||
pub use std::io;
|
||||
pub use std::fs;
|
||||
pub use std::str;
|
||||
pub use std::fmt;
|
||||
pub use std::slice;
|
||||
pub use std::cmp;
|
||||
pub use std::ptr;
|
||||
pub use std::result;
|
||||
pub use std::option;
|
||||
pub use std::mem;
|
||||
pub use std::ops;
|
||||
pub use std::slice;
|
||||
pub use std::result;
|
||||
pub use std::option;
|
||||
|
||||
pub use std::path::Path;
|
||||
pub use std::str::{FromStr};
|
||||
@ -15,9 +16,9 @@ pub use std::io::{Read,Write};
|
||||
pub use std::hash::{Hash, Hasher};
|
||||
pub use std::error::Error as StdError;
|
||||
|
||||
pub use std::sync::*;
|
||||
pub use std::ops::*;
|
||||
pub use std::cmp::*;
|
||||
pub use std::sync::*;
|
||||
pub use std::cell::*;
|
||||
pub use std::collections::*;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user