Revert "revert to #7677 #7679" (#7715)

This reverts commit 568dc33a02.
This commit is contained in:
Tomasz Drwięga 2018-01-29 11:43:30 +01:00 committed by Marek Kotewicz
parent 53ec1141cf
commit 33b39f0725
20 changed files with 470 additions and 1482 deletions

5
Cargo.lock generated
View File

@ -2671,7 +2671,7 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb" name = "rocksdb"
version = "0.4.5" version = "0.4.5"
source = "git+https://github.com/paritytech/rust-rocksdb#7adec2311d31387a832b0ef051472cdef906b480" source = "git+https://github.com/paritytech/rust-rocksdb#ecf06adf3148ab10f6f7686b724498382ff4f36e"
dependencies = [ dependencies = [
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2681,10 +2681,11 @@ dependencies = [
[[package]] [[package]]
name = "rocksdb-sys" name = "rocksdb-sys"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/paritytech/rust-rocksdb#7adec2311d31387a832b0ef051472cdef906b480" source = "git+https://github.com/paritytech/rust-rocksdb#ecf06adf3148ab10f6f7686b724498382ff4f36e"
dependencies = [ dependencies = [
"cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)",
"local-encoding 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)", "snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)",
] ]

View File

@ -47,6 +47,8 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
// Content Security Policy headers // Content Security Policy headers
headers.set_raw("Content-Security-Policy", String::new() headers.set_raw("Content-Security-Policy", String::new()
// Restrict everything to the same origin by default.
+ "default-src 'self';"
// Allow connecting to WS servers and HTTP(S) servers. // Allow connecting to WS servers and HTTP(S) servers.
// We could be more restrictive and allow only RPC server URL. // We could be more restrictive and allow only RPC server URL.
+ "connect-src http: https: ws: wss:;" + "connect-src http: https: ws: wss:;"
@ -64,7 +66,9 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
+ "style-src 'self' 'unsafe-inline' data: blob: https:;" + "style-src 'self' 'unsafe-inline' data: blob: https:;"
// Allow fonts from data: and HTTPS. // Allow fonts from data: and HTTPS.
+ "font-src 'self' data: https:;" + "font-src 'self' data: https:;"
// Allow inline scripts and scripts eval (webpack/jsconsole) // Disallow objects
+ "object-src 'none';"
// Allow scripts
+ { + {
let script_src = embeddable_on.as_ref() let script_src = embeddable_on.as_ref()
.map(|e| e.extra_script_src.iter() .map(|e| e.extra_script_src.iter()
@ -72,18 +76,16 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
.join(" ") .join(" ")
).unwrap_or_default(); ).unwrap_or_default();
&format!( &format!(
"script-src 'self' 'unsafe-inline' 'unsafe-eval' {};", "script-src 'self' {};",
script_src script_src
) )
} }
// Same restrictions as script-src with additional // Same restrictions as script-src with additional
// blob: that is required for camera access (worker) // blob: that is required for camera access (worker)
+ "worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;" + "worker-src 'self' https: blob:;"
// Restrict everything else to the same origin.
+ "default-src 'self';"
// Run in sandbox mode (although it's not fully safe since we allow same-origin and script) // Run in sandbox mode (although it's not fully safe since we allow same-origin and script)
+ "sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;" + "sandbox allow-same-origin allow-forms allow-modals allow-popups allow-presentation allow-scripts;"
// Disallow subitting forms from any dapps // Disallow submitting forms from any dapps
+ "form-action 'none';" + "form-action 'none';"
// Never allow mixed content // Never allow mixed content
+ "block-all-mixed-content;" + "block-all-mixed-content;"

View File

@ -44,8 +44,7 @@ use bigint::hash::{H256, H520};
use semantic_version::SemanticVersion; use semantic_version::SemanticVersion;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use unexpected::{Mismatch, OutOfBounds}; use unexpected::{Mismatch, OutOfBounds};
use util::*; use util::Address;
use bytes::Bytes;
mod finality; mod finality;
@ -291,9 +290,11 @@ struct EpochVerifier {
impl super::EpochVerifier<EthereumMachine> for EpochVerifier { impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
fn verify_light(&self, header: &Header) -> Result<(), Error> { fn verify_light(&self, header: &Header) -> Result<(), Error> {
// Validate the timestamp
verify_timestamp(&*self.step, header_step(header)?)?;
// always check the seal since it's fast. // always check the seal since it's fast.
// nothing heavier to do. // nothing heavier to do.
verify_external(header, &self.subchain_validators, &*self.step, |_| {}) verify_external(header, &self.subchain_validators)
} }
fn check_finality_proof(&self, proof: &[u8]) -> Option<Vec<H256>> { fn check_finality_proof(&self, proof: &[u8]) -> Option<Vec<H256>> {
@ -317,7 +318,7 @@ impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
// //
// `verify_external` checks that signature is correct and author == signer. // `verify_external` checks that signature is correct and author == signer.
if header.seal().len() != 2 { return None } if header.seal().len() != 2 { return None }
otry!(verify_external(header, &self.subchain_validators, &*self.step, |_| {}).ok()); otry!(verify_external(header, &self.subchain_validators).ok());
let newly_finalized = otry!(finality_checker.push_hash(header.hash(), header.author().clone()).ok()); let newly_finalized = otry!(finality_checker.push_hash(header.hash(), header.author().clone()).ok());
finalized.extend(newly_finalized); finalized.extend(newly_finalized);
@ -327,16 +328,6 @@ impl super::EpochVerifier<EthereumMachine> for EpochVerifier {
} }
} }
// Report misbehavior
#[derive(Debug)]
#[allow(dead_code)]
enum Report {
// Malicious behavior
Malicious(Address, BlockNumber, Bytes),
// benign misbehavior
Benign(Address, BlockNumber),
}
fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> { fn header_step(header: &Header) -> Result<usize, ::rlp::DecoderError> {
UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val() UntrustedRlp::new(&header.seal().get(0).expect("was either checked with verify_block_basic or is genesis; has 2 fields; qed (Make sure the spec file has a correct genesis seal)")).as_val()
} }
@ -355,22 +346,25 @@ fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: usize, address:
step_proposer(validators, bh, step) == *address step_proposer(validators, bh, step) == *address
} }
fn verify_external<F: Fn(Report)>(header: &Header, validators: &ValidatorSet, step: &Step, report: F) fn verify_timestamp(step: &Step, header_step: usize) -> Result<(), BlockError> {
-> Result<(), Error>
{
let header_step = header_step(header)?;
match step.check_future(header_step) { match step.check_future(header_step) {
Err(None) => { Err(None) => {
trace!(target: "engine", "verify_block_external: block from the future"); trace!(target: "engine", "verify_timestamp: block from the future");
report(Report::Benign(*header.author(), header.number())); Err(BlockError::InvalidSeal.into())
return Err(BlockError::InvalidSeal.into())
}, },
Err(Some(oob)) => { Err(Some(oob)) => {
trace!(target: "engine", "verify_block_external: block too early"); // NOTE This error might be returned only in early stage of verification (Stage 1).
return Err(BlockError::TemporarilyInvalid(oob).into()) // Returning it further won't recover the sync process.
trace!(target: "engine", "verify_timestamp: block too early");
Err(BlockError::TemporarilyInvalid(oob).into())
}, },
Ok(_) => { Ok(_) => Ok(()),
}
}
fn verify_external(header: &Header, validators: &ValidatorSet) -> Result<(), Error> {
let header_step = header_step(header)?;
let proposer_signature = header_signature(header)?; let proposer_signature = header_signature(header)?;
let correct_proposer = validators.get(header.parent_hash(), header_step); let correct_proposer = validators.get(header.parent_hash(), header_step);
let is_invalid_proposer = *header.author() != correct_proposer || let is_invalid_proposer = *header.author() != correct_proposer ||
@ -382,8 +376,6 @@ fn verify_external<F: Fn(Report)>(header: &Header, validators: &ValidatorSet, st
} else { } else {
Ok(()) Ok(())
} }
}
}
} }
fn combine_proofs(signal_number: BlockNumber, set_proof: &[u8], finality_proof: &[u8]) -> Vec<u8> { fn combine_proofs(signal_number: BlockNumber, set_proof: &[u8], finality_proof: &[u8]) -> Vec<u8> {
@ -655,26 +647,38 @@ impl Engine<EthereumMachine> for AuthorityRound {
/// Check the number of seal fields. /// Check the number of seal fields.
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) { if header.number() >= self.validate_score_transition && *header.difficulty() >= U256::from(U128::max_value()) {
Err(From::from(BlockError::DifficultyOutOfBounds( return Err(From::from(BlockError::DifficultyOutOfBounds(
OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() } OutOfBounds { min: None, max: Some(U256::from(U128::max_value())), found: *header.difficulty() }
))) )));
} else { }
Ok(())
// TODO [ToDr] Should this go from epoch manager?
// If yes then probably benign reporting needs to be moved further in the verification.
let set_number = header.number();
match verify_timestamp(&*self.step, header_step(header)?) {
Err(BlockError::InvalidSeal) => {
self.validators.report_benign(header.author(), set_number, header.number());
Err(BlockError::InvalidSeal.into())
}
Err(e) => Err(e.into()),
Ok(()) => Ok(()),
} }
} }
/// Do the step and gas limit validation. /// Do the step and gas limit validation.
fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> { fn verify_block_family(&self, header: &Header, parent: &Header) -> Result<(), Error> {
let step = header_step(header)?; let step = header_step(header)?;
let parent_step = header_step(parent)?; let parent_step = header_step(parent)?;
// TODO [ToDr] Should this go from epoch manager?
let set_number = header.number();
// Ensure header is from the step after parent. // Ensure header is from the step after parent.
if step == parent_step if step == parent_step
|| (header.number() >= self.validate_step_transition && step <= parent_step) { || (header.number() >= self.validate_step_transition && step <= parent_step) {
trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step); trace!(target: "engine", "Multiple blocks proposed for step {}.", parent_step);
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default()); self.validators.report_malicious(header.author(), set_number, header.number(), Default::default());
Err(EngineError::DoubleVote(header.author().clone()))?; Err(EngineError::DoubleVote(header.author().clone()))?;
} }
@ -687,7 +691,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
let skipped_primary = step_proposer(&*self.validators, &parent.hash(), s); let skipped_primary = step_proposer(&*self.validators, &parent.hash(), s);
// Do not report this signer. // Do not report this signer.
if skipped_primary != me { if skipped_primary != me {
self.validators.report_benign(&skipped_primary, header.number(), header.number()); self.validators.report_benign(&skipped_primary, set_number, header.number());
} }
// Stop reporting once validators start repeating. // Stop reporting once validators start repeating.
if !reported.insert(skipped_primary) { break; } if !reported.insert(skipped_primary) { break; }
@ -702,9 +706,8 @@ impl Engine<EthereumMachine> for AuthorityRound {
// fetch correct validator set for current epoch, taking into account // fetch correct validator set for current epoch, taking into account
// finality of previous transitions. // finality of previous transitions.
let active_set; let active_set;
let validators = if self.immediate_transitions {
let (validators, set_number) = if self.immediate_transitions { &*self.validators
(&*self.validators, header.number())
} else { } else {
// get correct validator set for epoch. // get correct validator set for epoch.
let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) {
@ -722,21 +725,12 @@ impl Engine<EthereumMachine> for AuthorityRound {
} }
active_set = epoch_manager.validators().clone(); active_set = epoch_manager.validators().clone();
(&active_set as &_, epoch_manager.epoch_transition_number) &active_set as &_
};
// always report with "self.validators" so that the report actually gets
// to the contract.
let report = |report| match report {
Report::Benign(address, block_number) =>
self.validators.report_benign(&address, set_number, block_number),
Report::Malicious(address, block_number, proof) =>
self.validators.report_malicious(&address, set_number, block_number, proof),
}; };
// verify signature against fixed list, but reports should go to the // verify signature against fixed list, but reports should go to the
// contract itself. // contract itself.
verify_external(header, validators, &*self.step, report) verify_external(header, validators)
} }
fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result<Vec<u8>, String> { fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result<Vec<u8>, String> {
@ -1059,8 +1053,7 @@ mod tests {
assert!(engine.verify_block_family(&header, &parent_header).is_ok()); assert!(engine.verify_block_family(&header, &parent_header).is_ok());
assert!(engine.verify_block_external(&header).is_ok()); assert!(engine.verify_block_external(&header).is_ok());
header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]); header.set_seal(vec![encode(&5usize).into_vec(), encode(&(&*signature as &[u8])).into_vec()]);
assert!(engine.verify_block_family(&header, &parent_header).is_ok()); assert!(engine.verify_block_basic(&header).is_err());
assert!(engine.verify_block_external(&header).is_err());
} }
#[test] #[test]
@ -1200,3 +1193,4 @@ mod tests {
AuthorityRound::new(params, machine).unwrap(); AuthorityRound::new(params, machine).unwrap();
} }
} }

View File

@ -192,7 +192,7 @@ mod tests {
header.set_number(2); header.set_number(2);
header.set_parent_hash(client.chain_info().best_block_hash); header.set_parent_hash(client.chain_info().best_block_hash);
// `reportBenign` when the designated proposer releases block from the future (bad clock). // `reportBenign` when the designated proposer releases block from the future (bad clock).
assert!(client.engine().verify_block_external(&header).is_err()); assert!(client.engine().verify_block_basic(&header).is_err());
// Seal a block. // Seal a block.
client.engine().step(); client.engine().step();
assert_eq!(client.chain_info().best_block_number, 1); assert_eq!(client.chain_info().best_block_number, 1);

View File

@ -1406,7 +1406,7 @@ mod tests {
} }
#[test] #[test]
fn should_not_trace_delegatecall() { fn should_trace_delegatecall_properly() {
init_log(); init_log();
let mut state = get_temp_state(); let mut state = get_temp_state();
@ -1426,7 +1426,7 @@ mod tests {
}.sign(&secret(), None); }.sign(&secret(), None);
state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap(); state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()).unwrap();
state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()).unwrap(); state.init_code(&0xb.into(), FromHex::from_hex("60056000526001601ff3").unwrap()).unwrap();
let result = state.apply(&info, &machine, &t, true).unwrap(); let result = state.apply(&info, &machine, &t, true).unwrap();
let expected_trace = vec![FlatTrace { let expected_trace = vec![FlatTrace {
@ -1441,23 +1441,23 @@ mod tests {
call_type: CallType::Call, call_type: CallType::Call,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: U256::from(721), // in post-eip150 gas_used: U256::from(736), // in post-eip150
output: vec![] output: vec![]
}), }),
}, FlatTrace { }, FlatTrace {
trace_address: vec![0].into_iter().collect(), trace_address: vec![0].into_iter().collect(),
subtraces: 0, subtraces: 0,
action: trace::Action::Call(trace::Call { action: trace::Action::Call(trace::Call {
from: "9cce34f7ab185c7aba1b7c8140d620b4bda941d6".into(), from: 0xa.into(),
to: 0xa.into(), to: 0xb.into(),
value: 0.into(), value: 0.into(),
gas: 32768.into(), gas: 32768.into(),
input: vec![], input: vec![],
call_type: CallType::DelegateCall, call_type: CallType::DelegateCall,
}), }),
result: trace::Res::Call(trace::CallResult { result: trace::Res::Call(trace::CallResult {
gas_used: 3.into(), gas_used: 18.into(),
output: vec![], output: vec![5],
}), }),
}]; }];

View File

@ -74,13 +74,23 @@ pub struct Call {
impl From<ActionParams> for Call { impl From<ActionParams> for Call {
fn from(p: ActionParams) -> Self { fn from(p: ActionParams) -> Self {
Call { match p.call_type {
CallType::DelegateCall => Call {
from: p.address,
to: p.code_address,
value: p.value.value(),
gas: p.gas,
input: p.data.unwrap_or_else(Vec::new),
call_type: p.call_type,
},
_ => Call {
from: p.sender, from: p.sender,
to: p.address, to: p.address,
value: p.value.value(), value: p.value.value(),
gas: p.gas, gas: p.gas,
input: p.data.unwrap_or_else(Vec::new), input: p.data.unwrap_or_else(Vec::new),
call_type: p.call_type, call_type: p.call_type,
},
} }
} }
} }

View File

@ -65,15 +65,15 @@ export default function (api, browserHistory, forEmbed = false) {
.then(() => console.log('v1: started Status Provider')) .then(() => console.log('v1: started Status Provider'))
.then(() => console.log('v1: starting Personal Provider...')) .then(() => console.log('v1: starting Personal Provider...'))
.then(() => PersonalProvider.start()) .then(() => withTimeoutForLight('personal', PersonalProvider.start(), store))
.then(() => console.log('v1: started Personal Provider')) .then(() => console.log('v1: started Personal Provider'))
.then(() => console.log('v1: starting Balances Provider...')) .then(() => console.log('v1: starting Balances Provider...'))
.then(() => BalancesProvider.start()) .then(() => withTimeoutForLight('balances', BalancesProvider.start(), store))
.then(() => console.log('v1: started Balances Provider')) .then(() => console.log('v1: started Balances Provider'))
.then(() => console.log('v1: starting Tokens Provider...')) .then(() => console.log('v1: starting Tokens Provider...'))
.then(() => TokensProvider.start()) .then(() => withTimeoutForLight('tokens', TokensProvider.start(), store))
.then(() => console.log('v1: started Tokens Provider')); .then(() => console.log('v1: started Tokens Provider'));
}; };
@ -97,3 +97,39 @@ export default function (api, browserHistory, forEmbed = false) {
return store; return store;
} }
function withTimeoutForLight (id, promise, store) {
const { nodeKind } = store.getState().nodeStatus;
const isLightNode = nodeKind.capability !== 'full';
if (!isLightNode) {
// make sure that no values are passed
return promise.then(() => {});
}
return new Promise((resolve, reject) => {
let isResolved = false;
const doResolve = () => {
if (!isResolved) {
isResolved = true;
resolve();
}
};
const timeout = setTimeout(() => {
console.warn(`Resolving ${id} by timeout.`);
doResolve();
}, 1000);
promise
.then(() => {
clearTimeout(timeout);
doResolve();
})
.catch(err => {
clearTimeout(timeout);
if (!isResolved) {
reject(err);
}
});
});
}

1504
js/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -170,7 +170,6 @@
"redux": "3.7.2", "redux": "3.7.2",
"semantic-ui-react": "0.77.0", "semantic-ui-react": "0.77.0",
"solc": "ngotchac/solc-js", "solc": "ngotchac/solc-js",
"store": "1.3.20", "store": "1.3.20"
"web3": "1.0.0-beta.26"
} }
} }

View File

@ -26,11 +26,14 @@
.list { .list {
margin: 0 !important; margin: 0 !important;
padding: 1em 1em !important; padding: 1em 1em !important;
background-color: #f5f5f5; background-color: white;
} }
.isDefault { .accountsList {
background-color: white; background-color: #f5f5f5;
height: 300px;
overflow-y: auto;
} }
.hasOtherAccounts { .hasOtherAccounts {

View File

@ -68,14 +68,14 @@ class DefaultAccount extends Component {
} }
content={ content={
<div> <div>
<List relaxed='very' selection className={ [styles.list, styles.isDefault, allAccounts.length > 1 && styles.hasOtherAccounts].join(' ') }> <List relaxed='very' selection className={ [styles.list, allAccounts.length > 1 && styles.hasOtherAccounts].join(' ') }>
<AccountItem <AccountItem
isDefault isDefault
account={ defaultAccount } account={ defaultAccount }
/> />
</List> </List>
{allAccounts.length > 1 && {allAccounts.length > 1 &&
<List relaxed='very' selection className={ styles.list } divided> <List relaxed='very' selection className={ [styles.list, styles.accountsList].join(' ') } divided>
{allAccounts {allAccounts
.filter(({ address }) => address !== defaultAddress) .filter(({ address }) => address !== defaultAddress)
.map(account => ( .map(account => (

View File

@ -16,7 +16,6 @@
import Api from '@parity/api'; import Api from '@parity/api';
import qs from 'query-string'; import qs from 'query-string';
import Web3 from 'web3';
function initProvider () { function initProvider () {
const path = window.location.pathname.split('/'); const path = window.location.pathname.split('/');
@ -48,24 +47,9 @@ function initProvider () {
} }
function initWeb3 (ethereum) { function initWeb3 (ethereum) {
// FIXME: Use standard provider for web3 const currentProvider = new Api.Provider.SendAsync(ethereum);
const provider = new Api.Provider.SendAsync(ethereum);
const web3 = new Web3(provider);
if (!web3.currentProvider) { window.web3 = { currentProvider };
web3.currentProvider = provider;
}
// set default account
web3.eth.getAccounts((error, accounts) => {
if (error || !accounts || !accounts[0]) {
return;
}
web3.eth.defaultAccount = accounts[0];
});
window.web3 = web3;
} }
function initParity (ethereum) { function initParity (ethereum) {

View File

@ -9,7 +9,7 @@
!define COMPANYNAME "Parity Technologies" !define COMPANYNAME "Parity Technologies"
!define DESCRIPTION "Fast, light, robust Ethereum implementation" !define DESCRIPTION "Fast, light, robust Ethereum implementation"
!define VERSIONMAJOR 1 !define VERSIONMAJOR 1
!define VERSIONMINOR 10 !define VERSIONMINOR 9
!define VERSIONBUILD 0 !define VERSIONBUILD 0
!define ARGS "" !define ARGS ""
!define FIRST_START_ARGS "--mode=passive ui" !define FIRST_START_ARGS "--mode=passive ui"

View File

@ -586,7 +586,12 @@ impl Configuration {
let mut extra_embed = dev_ui.clone(); let mut extra_embed = dev_ui.clone();
match self.ui_hosts() { match self.ui_hosts() {
// In case host validation is disabled allow all frame ancestors // In case host validation is disabled allow all frame ancestors
None => extra_embed.push(("*".to_owned(), ui_port)), None => {
// NOTE Chrome does not seem to support "*:<port>"
// we use `http(s)://*:<port>` instead.
extra_embed.push(("http://*".to_owned(), ui_port));
extra_embed.push(("https://*".to_owned(), ui_port));
},
Some(hosts) => extra_embed.extend(hosts.into_iter().filter_map(|host| { Some(hosts) => extra_embed.extend(hosts.into_iter().filter_map(|host| {
let mut it = host.split(":"); let mut it = host.split(":");
let host = it.next(); let host = it.next();

View File

@ -147,19 +147,30 @@ impl LightFetch {
Err(e) => return Box::new(future::err(e)), Err(e) => return Box::new(future::err(e)),
}; };
let maybe_future = self.sync.with_context(move |ctx| {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect("all back-references known to be valid; qed")
.map(|res| extract_header(&res, header_ref)
.expect("these responses correspond to requests that header_ref belongs to. \
therefore it will not fail; qed"))
.map_err(errors::on_demand_cancel))
});
match maybe_future { self.send_requests(reqs, |res|
Some(recv) => recv, extract_header(&res, header_ref)
None => Box::new(future::err(errors::network_disabled())) .expect("these responses correspond to requests that header_ref belongs to \
therefore it will not fail; qed")
)
} }
/// Helper for getting contract code at a given block.
pub fn code(&self, address: Address, id: BlockId) -> BoxFuture<Vec<u8>> {
let mut reqs = Vec::new();
let header_ref = match self.make_header_requests(id, &mut reqs) {
Ok(r) => r,
Err(e) => return Box::new(future::err(e)),
};
reqs.push(request::Account { header: header_ref.clone(), address: address }.into());
let account_idx = reqs.len() - 1;
reqs.push(request::Code { header: header_ref, code_hash: Field::back_ref(account_idx, 0) }.into());
self.send_requests(reqs, |mut res| match res.pop() {
Some(OnDemandResponse::Code(code)) => code,
_ => panic!("responses correspond directly with requests in amount and type; qed"),
})
} }
/// Helper for getting account info at a given block. /// Helper for getting account info at a given block.
@ -173,20 +184,10 @@ impl LightFetch {
reqs.push(request::Account { header: header_ref, address: address }.into()); reqs.push(request::Account { header: header_ref, address: address }.into());
let maybe_future = self.sync.with_context(move |ctx| { self.send_requests(reqs, |mut res|match res.pop() {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect("all back-references known to be valid; qed")
.map(|mut res| match res.pop() {
Some(OnDemandResponse::Account(acc)) => acc, Some(OnDemandResponse::Account(acc)) => acc,
_ => panic!("responses correspond directly with requests in amount and type; qed"), _ => panic!("responses correspond directly with requests in amount and type; qed"),
}) })
.map_err(errors::on_demand_cancel))
});
match maybe_future {
Some(recv) => recv,
None => Box::new(future::err(errors::network_disabled()))
}
} }
/// Helper for getting proved execution. /// Helper for getting proved execution.
@ -277,20 +278,10 @@ impl LightFetch {
reqs.push(request::Body(header_ref).into()); reqs.push(request::Body(header_ref).into());
let maybe_future = self.sync.with_context(move |ctx| { self.send_requests(reqs, |mut res| match res.pop() {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect(NO_INVALID_BACK_REFS)
.map(|mut res| match res.pop() {
Some(OnDemandResponse::Body(b)) => b, Some(OnDemandResponse::Body(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"), _ => panic!("responses correspond directly with requests in amount and type; qed"),
}) })
.map_err(errors::on_demand_cancel))
});
match maybe_future {
Some(recv) => recv,
None => Box::new(future::err(errors::network_disabled()))
}
} }
/// Get the block receipts. Fails on unknown block ID. /// Get the block receipts. Fails on unknown block ID.
@ -303,20 +294,10 @@ impl LightFetch {
reqs.push(request::BlockReceipts(header_ref).into()); reqs.push(request::BlockReceipts(header_ref).into());
let maybe_future = self.sync.with_context(move |ctx| { self.send_requests(reqs, |mut res| match res.pop() {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect(NO_INVALID_BACK_REFS)
.map(|mut res| match res.pop() {
Some(OnDemandResponse::Receipts(b)) => b, Some(OnDemandResponse::Receipts(b)) => b,
_ => panic!("responses correspond directly with requests in amount and type; qed"), _ => panic!("responses correspond directly with requests in amount and type; qed"),
}) })
.map_err(errors::on_demand_cancel))
});
match maybe_future {
Some(recv) => recv,
None => Box::new(future::err(errors::network_disabled()))
}
} }
/// Get transaction logs /// Get transaction logs
@ -433,6 +414,23 @@ impl LightFetch {
Either::B(extract_transaction) Either::B(extract_transaction)
})) }))
} }
fn send_requests<T, F>(&self, reqs: Vec<OnDemandRequest>, parse_response: F) -> BoxFuture<T> where
F: FnOnce(Vec<OnDemandResponse>) -> T + Send + 'static,
T: Send + 'static,
{
let maybe_future = self.sync.with_context(move |ctx| {
Box::new(self.on_demand.request_raw(ctx, reqs)
.expect(NO_INVALID_BACK_REFS)
.map(parse_response)
.map_err(errors::on_demand_cancel))
});
match maybe_future {
Some(recv) => recv,
None => Box::new(future::err(errors::network_disabled()))
}
}
} }
#[derive(Clone)] #[derive(Clone)]

View File

@ -349,8 +349,8 @@ impl<T: LightChainClient + 'static> Eth for EthClient<T> {
})) }))
} }
fn code_at(&self, _address: RpcH160, _num: Trailing<BlockNumber>) -> BoxFuture<Bytes> { fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes> {
Box::new(future::err(errors::unimplemented(None))) Box::new(self.fetcher().code(address.into(), num.unwrap_or_default().into()).map(Into::into))
} }
fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256> { fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256> {

View File

@ -208,7 +208,12 @@ impl Parity for ParityClient {
} }
fn registry_address(&self) -> Result<Option<H160>> { fn registry_address(&self) -> Result<Option<H160>> {
Err(errors::light_unimplemented(None)) let reg = self.light_dispatch.client.engine().params().registrar;
if reg == Default::default() {
Ok(None)
} else {
Ok(Some(reg.into()))
}
} }
fn rpc_settings(&self) -> Result<RpcSettings> { fn rpc_settings(&self) -> Result<RpcSettings> {

View File

@ -170,7 +170,18 @@ pub struct AttachedProtocol {
} }
impl AttachedProtocol { impl AttachedProtocol {
fn register(&self, _network: &NetworkService) {} fn register(&self, network: &NetworkService) {
let res = network.register_protocol(
self.handler.clone(),
self.protocol_id,
self.packet_count,
self.versions
);
if let Err(e) = res {
warn!(target: "sync", "Error attaching protocol {:?}: {:?}", self.protocol_id, e);
}
}
} }
/// EthSync initialization parameters. /// EthSync initialization parameters.

View File

@ -522,6 +522,10 @@ impl BlockDownloader {
trace!(target: "sync", "Unknown new block parent, restarting sync"); trace!(target: "sync", "Unknown new block parent, restarting sync");
break; break;
}, },
Err(BlockImportError::Block(BlockError::TemporarilyInvalid(_))) => {
debug!(target: "sync", "Block temporarily invalid, restarting sync");
break;
},
Err(e) => { Err(e) => {
debug!(target: "sync", "Bad block {:?} : {:?}", h, e); debug!(target: "sync", "Bad block {:?} : {:?}", h, e);
bad = true; bad = true;

View File

@ -32,7 +32,7 @@ use std::cmp;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use std::{mem, fs, io}; use std::{fs, io, mem, result};
use parking_lot::{Mutex, MutexGuard, RwLock}; use parking_lot::{Mutex, MutexGuard, RwLock};
use rocksdb::{ use rocksdb::{
@ -257,7 +257,25 @@ pub struct Database {
flushing_lock: Mutex<bool>, flushing_lock: Mutex<bool>,
} }
#[inline]
fn check_for_corruption<T, P: AsRef<Path>>(path: P, res: result::Result<T, String>) -> result::Result<T, String> {
if let Err(ref s) = res {
if s.starts_with("Corruption:") {
warn!("DB corrupted: {}. Repair will be triggered on next restart", s);
let _ = fs::File::create(path.as_ref().join(Database::CORRUPTION_FILE_NAME));
}
}
res
}
fn is_corrupted(s: &str) -> bool {
s.starts_with("Corruption:") || s.starts_with("Invalid argument: You have to open all column families")
}
impl Database { impl Database {
const CORRUPTION_FILE_NAME: &'static str = "CORRUPTED";
/// Open database with default settings. /// Open database with default settings.
pub fn open_default(path: &str) -> Result<Database> { pub fn open_default(path: &str) -> Result<Database> {
Database::open(&DatabaseConfig::default(), path) Database::open(&DatabaseConfig::default(), path)
@ -287,6 +305,14 @@ impl Database {
block_opts.set_cache(cache); block_opts.set_cache(cache);
} }
// attempt database repair if it has been previously marked as corrupted
let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME);
if db_corrupted.exists() {
warn!("DB has been previously marked as corrupted, attempting repair");
DB::repair(&opts, path)?;
fs::remove_file(db_corrupted)?;
}
let columns = config.columns.unwrap_or(0) as usize; let columns = config.columns.unwrap_or(0) as usize;
let mut cf_options = Vec::with_capacity(columns); let mut cf_options = Vec::with_capacity(columns);
@ -306,12 +332,11 @@ impl Database {
let mut cfs: Vec<Column> = Vec::new(); let mut cfs: Vec<Column> = Vec::new();
let db = match config.columns { let db = match config.columns {
Some(columns) => { Some(_) => {
match DB::open_cf(&opts, path, &cfnames, &cf_options) { match DB::open_cf(&opts, path, &cfnames, &cf_options) {
Ok(db) => { Ok(db) => {
cfs = cfnames.iter().map(|n| db.cf_handle(n) cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); .expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
assert!(cfs.len() == columns as usize);
Ok(db) Ok(db)
} }
Err(_) => { Err(_) => {
@ -321,7 +346,7 @@ impl Database {
cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<::std::result::Result<_, _>>()?; cfs = cfnames.iter().enumerate().map(|(i, n)| db.create_cf(n, &cf_options[i])).collect::<::std::result::Result<_, _>>()?;
Ok(db) Ok(db)
}, },
err @ Err(_) => err, err => err,
} }
} }
} }
@ -331,14 +356,18 @@ impl Database {
let db = match db { let db = match db {
Ok(db) => db, Ok(db) => db,
Err(ref s) if s.starts_with("Corruption:") => { Err(ref s) if is_corrupted(s) => {
info!("{}", s); warn!("DB corrupted: {}, attempting repair", s);
info!("Attempting DB repair for {}", path);
DB::repair(&opts, path)?; DB::repair(&opts, path)?;
match cfnames.is_empty() { match cfnames.is_empty() {
true => DB::open(&opts, path)?, true => DB::open(&opts, path)?,
false => DB::open_cf(&opts, path, &cfnames, &cf_options)? false => {
let db = DB::open_cf(&opts, path, &cfnames, &cf_options)?;
cfs = cfnames.iter().map(|n| db.cf_handle(n)
.expect("rocksdb opens a cf_handle for each cfname; qed")).collect();
db
},
} }
}, },
Err(s) => { return Err(s.into()); } Err(s) => { return Err(s.into()); }
@ -425,7 +454,11 @@ impl Database {
} }
} }
} }
db.write_opt(batch, &self.write_opts)?;
check_for_corruption(
&self.path,
db.write_opt(batch, &self.write_opts))?;
for column in self.flushing.write().iter_mut() { for column in self.flushing.write().iter_mut() {
column.clear(); column.clear();
column.shrink_to_fit(); column.shrink_to_fit();
@ -471,7 +504,10 @@ impl Database {
}, },
} }
} }
db.write_opt(batch, &self.write_opts).map_err(Into::into)
check_for_corruption(
&self.path,
db.write_opt(batch, &self.write_opts)).map_err(Into::into)
}, },
None => Err("Database is closed".into()) None => Err("Database is closed".into())
} }