Compare commits
21 Commits
main
...
v1.10.5-ci
Author | SHA1 | Date | |
---|---|---|---|
|
c7202a771d | ||
|
02ffb9be42 | ||
|
39b9f1e252 | ||
|
226a1d31b6 | ||
|
063b0761f5 | ||
|
b9ceda38a6 | ||
|
d9f6aba308 | ||
|
f4ae813fda | ||
|
7202f7ae82 | ||
|
45c29a2a57 | ||
|
fc652db729 | ||
|
68320e8e89 | ||
|
fdd03cc40d | ||
|
c3d446420c | ||
|
9e7f887606 | ||
|
5acdd091a2 | ||
|
0a9d41e294 | ||
|
610f6f1425 | ||
|
2403fc52c1 | ||
|
0ab1930c04 | ||
|
3d6ede0c58 |
@ -8,14 +8,12 @@ variables:
|
||||
CARGOFLAGS: ""
|
||||
CI_SERVER_NAME: "GitLab CI"
|
||||
LIBSSL: "libssl1.0.0 (>=1.0.0)"
|
||||
CARGO_HOME: $CI_PROJECT_DIR/cargo
|
||||
cache:
|
||||
key: "$CI_BUILD_STAGE-$CI_BUILD_REF_NAME"
|
||||
paths:
|
||||
- target/
|
||||
- cargo/
|
||||
untracked: true
|
||||
linux-stable:
|
||||
linux-ubuntu:
|
||||
stage: build
|
||||
image: parity/rust:gitlab-ci
|
||||
only:
|
||||
@ -33,7 +31,7 @@ linux-stable:
|
||||
paths:
|
||||
- parity.zip
|
||||
name: "stable-x86_64-unknown-linux-gnu_parity"
|
||||
linux-stable-debian:
|
||||
linux-debian:
|
||||
stage: build
|
||||
image: parity/rust-debian:gitlab-ci
|
||||
only:
|
||||
@ -82,6 +80,7 @@ linux-i686:
|
||||
paths:
|
||||
- parity.zip
|
||||
name: "i686-unknown-linux-gnu"
|
||||
allow_failure: true
|
||||
linux-armv7:
|
||||
stage: build
|
||||
image: parity/rust-armv7:gitlab-ci
|
||||
@ -98,6 +97,7 @@ linux-armv7:
|
||||
paths:
|
||||
- parity.zip
|
||||
name: "armv7_unknown_linux_gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-arm:
|
||||
stage: build
|
||||
image: parity/rust-arm:gitlab-ci
|
||||
@ -114,6 +114,7 @@ linux-arm:
|
||||
paths:
|
||||
- parity.zip
|
||||
name: "arm-unknown-linux-gnueabihf_parity"
|
||||
allow_failure: true
|
||||
linux-aarch64:
|
||||
stage: build
|
||||
image: parity/rust-arm64:gitlab-ci
|
||||
@ -132,7 +133,7 @@ linux-aarch64:
|
||||
name: "aarch64-unknown-linux-gnu_parity"
|
||||
linux-snap:
|
||||
stage: build
|
||||
image: snapcore/snapcraft:stable
|
||||
image: parity/snapcraft:gitlab-ci
|
||||
only:
|
||||
- stable
|
||||
- beta
|
||||
@ -187,7 +188,7 @@ docker-build:
|
||||
before_script:
|
||||
- docker info
|
||||
script:
|
||||
- if [ "$CI_BUILD_REF_NAME" == "beta-release" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi
|
||||
- if [ "$CI_BUILD_REF_NAME" == "master" ]; then DOCKER_TAG="latest"; else DOCKER_TAG=$CI_BUILD_REF_NAME; fi
|
||||
- echo "Tag:" $DOCKER_TAG
|
||||
- docker login -u $Docker_Hub_User_Parity -p $Docker_Hub_Pass_Parity
|
||||
- scripts/docker-build.sh $DOCKER_TAG
|
||||
|
789
Cargo.lock
generated
789
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
description = "Parity Ethereum client"
|
||||
name = "parity"
|
||||
# NOTE Make sure to update util/version/Cargo.toml as well
|
||||
version = "1.10.0"
|
||||
version = "1.10.5"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
@ -27,7 +27,6 @@ toml = "0.4"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
serde_derive = "1.0"
|
||||
app_dirs = "1.1.1"
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
fdlimit = "0.1"
|
||||
|
@ -35,6 +35,7 @@ node-health = { path = "./node-health" }
|
||||
parity-hash-fetch = { path = "../hash-fetch" }
|
||||
parity-reactor = { path = "../util/reactor" }
|
||||
parity-ui = { path = "./ui" }
|
||||
parity-ui-deprecation = { path = "./ui-deprecation" }
|
||||
keccak-hash = { path = "../util/hash" }
|
||||
parity-version = { path = "../util/version" }
|
||||
|
||||
|
@ -19,9 +19,10 @@ use std::{fs, fmt};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::path::PathBuf;
|
||||
use ethereum_types::H256;
|
||||
use fetch::{self, Mime};
|
||||
use fetch;
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash::keccak_buffer;
|
||||
use mime_guess::Mime;
|
||||
|
||||
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest};
|
||||
use handlers::{ContentValidator, ValidatorResponse};
|
||||
@ -53,7 +54,7 @@ fn write_response_and_check_hash(
|
||||
|
||||
// Now write the response
|
||||
let mut file = io::BufWriter::new(fs::File::create(&content_path)?);
|
||||
let mut reader = io::BufReader::new(response);
|
||||
let mut reader = io::BufReader::new(fetch::BodyReader::new(response));
|
||||
io::copy(&mut reader, &mut file)?;
|
||||
file.flush()?;
|
||||
|
||||
|
@ -216,6 +216,7 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
|
||||
),
|
||||
self.embeddable_on.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
)
|
||||
},
|
||||
URLHintResult::GithubDapp(content) => {
|
||||
@ -232,6 +233,7 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
|
||||
),
|
||||
self.embeddable_on.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
)
|
||||
},
|
||||
URLHintResult::Content(content) => {
|
||||
@ -248,6 +250,7 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
|
||||
),
|
||||
self.embeddable_on.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
)
|
||||
},
|
||||
};
|
||||
@ -280,7 +283,7 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
|
||||
mod tests {
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use fetch::{Fetch, Client};
|
||||
use fetch::Client;
|
||||
use futures::{future, Future};
|
||||
use hash_fetch::urlhint::{URLHint, URLHintResult};
|
||||
use ethereum_types::H256;
|
||||
|
@ -23,8 +23,6 @@ use page;
|
||||
use proxypac::ProxyPac;
|
||||
use web::Web;
|
||||
use fetch::Fetch;
|
||||
use parity_dapps::WebApp;
|
||||
use parity_ui;
|
||||
use {WebProxyTokens, ParentFrameSettings};
|
||||
|
||||
mod app;
|
||||
@ -44,11 +42,15 @@ pub const WEB_PATH: &'static str = "web";
|
||||
pub const URL_REFERER: &'static str = "__referer=";
|
||||
|
||||
pub fn utils(pool: CpuPool) -> Box<Endpoint> {
|
||||
Box::new(page::builtin::Dapp::new(pool, parity_ui::App::default()))
|
||||
Box::new(page::builtin::Dapp::new(pool, ::parity_ui::App::default()))
|
||||
}
|
||||
|
||||
pub fn ui(pool: CpuPool) -> Box<Endpoint> {
|
||||
Box::new(page::builtin::Dapp::with_fallback_to_index(pool, parity_ui::App::default()))
|
||||
Box::new(page::builtin::Dapp::with_fallback_to_index(pool, ::parity_ui::App::default()))
|
||||
}
|
||||
|
||||
pub fn ui_deprecation(pool: CpuPool) -> Box<Endpoint> {
|
||||
Box::new(page::builtin::Dapp::with_fallback_to_index(pool, ::parity_ui_deprecation::App::default()))
|
||||
}
|
||||
|
||||
pub fn ui_redirection(embeddable: Option<ParentFrameSettings>) -> Box<Endpoint> {
|
||||
@ -76,25 +78,28 @@ pub fn all_endpoints<F: Fetch>(
|
||||
}
|
||||
|
||||
// NOTE [ToDr] Dapps will be currently embeded on 8180
|
||||
insert::<parity_ui::App>(&mut pages, "ui", Embeddable::Yes(embeddable.clone()), pool.clone());
|
||||
pages.insert(
|
||||
"ui".into(),
|
||||
Box::new(page::builtin::Dapp::new_safe_to_embed(pool.clone(), ::parity_ui::App::default(), embeddable.clone()))
|
||||
);
|
||||
// old version
|
||||
insert::<parity_ui::old::App>(&mut pages, "v1", Embeddable::Yes(embeddable.clone()), pool.clone());
|
||||
|
||||
pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned()));
|
||||
pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), fetch.clone()));
|
||||
pages.insert(
|
||||
"v1".into(),
|
||||
Box::new({
|
||||
let mut page = page::builtin::Dapp::new_safe_to_embed(pool.clone(), ::parity_ui::old::App::default(), embeddable.clone());
|
||||
// allow JS eval on old Wallet
|
||||
page.allow_js_eval();
|
||||
page
|
||||
})
|
||||
);
|
||||
pages.insert(
|
||||
"proxy".into(),
|
||||
ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned())
|
||||
);
|
||||
pages.insert(
|
||||
WEB_PATH.into(),
|
||||
Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), fetch.clone(), pool.clone())
|
||||
);
|
||||
|
||||
(local_endpoints, pages)
|
||||
}
|
||||
|
||||
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str, embed_at: Embeddable, pool: CpuPool) {
|
||||
pages.insert(id.to_owned(), Box::new(match embed_at {
|
||||
Embeddable::Yes(address) => page::builtin::Dapp::new_safe_to_embed(pool, T::default(), address),
|
||||
Embeddable::No => page::builtin::Dapp::new(pool, T::default()),
|
||||
}));
|
||||
}
|
||||
|
||||
enum Embeddable {
|
||||
Yes(Option<ParentFrameSettings>),
|
||||
#[allow(dead_code)]
|
||||
No,
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ use std::time::{Instant, Duration};
|
||||
use fetch::{self, Fetch};
|
||||
use futures::sync::oneshot;
|
||||
use futures::{self, Future};
|
||||
use futures_cpupool::CpuPool;
|
||||
use hyper::{self, Method, StatusCode};
|
||||
use parking_lot::Mutex;
|
||||
|
||||
@ -35,7 +36,7 @@ const FETCH_TIMEOUT: u64 = 300;
|
||||
|
||||
pub enum ValidatorResponse {
|
||||
Local(local::Dapp),
|
||||
Streaming(StreamingHandler<fetch::Response>),
|
||||
Streaming(StreamingHandler<fetch::BodyReader>),
|
||||
}
|
||||
|
||||
pub trait ContentValidator: Sized + Send + 'static {
|
||||
@ -252,6 +253,7 @@ impl ContentFetcherHandler {
|
||||
installer: H,
|
||||
embeddable_on: Embeddable,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
) -> Self {
|
||||
let fetch_control = FetchControl::default();
|
||||
let errors = Errors { embeddable_on };
|
||||
@ -262,6 +264,7 @@ impl ContentFetcherHandler {
|
||||
Method::Get => {
|
||||
trace!(target: "dapps", "Fetching content from: {:?}", url);
|
||||
FetchState::InProgress(Self::fetch_content(
|
||||
pool,
|
||||
fetch,
|
||||
url,
|
||||
fetch_control.abort.clone(),
|
||||
@ -282,6 +285,7 @@ impl ContentFetcherHandler {
|
||||
}
|
||||
|
||||
fn fetch_content<H: ContentValidator, F: Fetch>(
|
||||
pool: CpuPool,
|
||||
fetch: F,
|
||||
url: &str,
|
||||
abort: Arc<AtomicBool>,
|
||||
@ -290,8 +294,8 @@ impl ContentFetcherHandler {
|
||||
installer: H,
|
||||
) -> Box<Future<Item=FetchState, Error=()> + Send> {
|
||||
// Start fetching the content
|
||||
let fetch2 = fetch.clone();
|
||||
let future = fetch.fetch_with_abort(url, abort.into()).then(move |result| {
|
||||
let pool2 = pool.clone();
|
||||
let future = fetch.fetch(url, abort.into()).then(move |result| {
|
||||
trace!(target: "dapps", "Fetching content finished. Starting validation: {:?}", result);
|
||||
Ok(match result {
|
||||
Ok(response) => match installer.validate_and_install(response) {
|
||||
@ -303,7 +307,7 @@ impl ContentFetcherHandler {
|
||||
Ok(ValidatorResponse::Streaming(stream)) => {
|
||||
trace!(target: "dapps", "Validation OK. Streaming response.");
|
||||
let (reading, response) = stream.into_response();
|
||||
fetch2.process_and_forget(reading);
|
||||
pool.spawn(reading).forget();
|
||||
FetchState::Streaming(response)
|
||||
},
|
||||
Err(e) => {
|
||||
@ -319,7 +323,7 @@ impl ContentFetcherHandler {
|
||||
});
|
||||
|
||||
// make sure to run within fetch thread pool.
|
||||
fetch.process(future)
|
||||
Box::new(pool2.spawn(future))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,7 @@ extern crate node_health;
|
||||
extern crate parity_dapps_glue as parity_dapps;
|
||||
extern crate parity_hash_fetch as hash_fetch;
|
||||
extern crate parity_ui;
|
||||
extern crate parity_ui_deprecation;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate parity_version;
|
||||
|
||||
@ -158,6 +159,7 @@ impl Middleware {
|
||||
registrar: Arc<ContractClient>,
|
||||
sync_status: Arc<SyncStatus>,
|
||||
fetch: F,
|
||||
info_page_only: bool,
|
||||
) -> Self {
|
||||
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
|
||||
hash_fetch::urlhint::URLHintContract::new(registrar),
|
||||
@ -165,6 +167,23 @@ impl Middleware {
|
||||
fetch.clone(),
|
||||
pool.clone(),
|
||||
).embeddable_on(None).allow_dapps(false));
|
||||
|
||||
if info_page_only {
|
||||
let mut special = HashMap::default();
|
||||
special.insert(router::SpecialEndpoint::Home, Some(apps::ui_deprecation(pool.clone())));
|
||||
|
||||
return Middleware {
|
||||
endpoints: Default::default(),
|
||||
router: router::Router::new(
|
||||
content_fetcher,
|
||||
None,
|
||||
special,
|
||||
None,
|
||||
dapps_domain.to_owned(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
let special = {
|
||||
let mut special = special_endpoints(
|
||||
pool.clone(),
|
||||
|
@ -75,6 +75,11 @@ impl<T: WebApp + 'static> Dapp<T> {
|
||||
fallback_to_index_html: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Allow the dapp to use `unsafe-eval` to run JS.
|
||||
pub fn allow_js_eval(&mut self) {
|
||||
self.info.allow_js_eval = Some(true);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: WebApp> Endpoint for Dapp<T> {
|
||||
|
@ -150,10 +150,20 @@ impl Router {
|
||||
}
|
||||
},
|
||||
// RPC by default
|
||||
_ => {
|
||||
_ if self.special.contains_key(&SpecialEndpoint::Rpc) => {
|
||||
trace!(target: "dapps", "Resolving to RPC call.");
|
||||
Response::None(req)
|
||||
}
|
||||
},
|
||||
// 404 otherwise
|
||||
_ => {
|
||||
Response::Some(Box::new(future::ok(handlers::ContentHandler::error(
|
||||
hyper::StatusCode::NotFound,
|
||||
"404 Not Found",
|
||||
"Requested content was not found.",
|
||||
None,
|
||||
self.embeddable_on.clone(),
|
||||
).into())))
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -14,12 +14,13 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{io, thread, time};
|
||||
use std::{thread, time};
|
||||
use std::sync::{atomic, mpsc, Arc};
|
||||
use parking_lot::Mutex;
|
||||
use hyper;
|
||||
|
||||
use futures::{self, Future};
|
||||
use fetch::{self, Fetch};
|
||||
use fetch::{self, Fetch, Url};
|
||||
|
||||
pub struct FetchControl {
|
||||
sender: mpsc::Sender<()>,
|
||||
@ -96,11 +97,8 @@ impl FakeFetch {
|
||||
impl Fetch for FakeFetch {
|
||||
type Result = Box<Future<Item = fetch::Response, Error = fetch::Error> + Send>;
|
||||
|
||||
fn new() -> Result<Self, fetch::Error> where Self: Sized {
|
||||
Ok(FakeFetch::default())
|
||||
}
|
||||
|
||||
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
|
||||
fn fetch(&self, url: &str, abort: fetch::Abort) -> Self::Result {
|
||||
let u = Url::parse(url).unwrap();
|
||||
self.requested.lock().push(url.into());
|
||||
let manual = self.manual.clone();
|
||||
let response = self.response.clone();
|
||||
@ -111,23 +109,10 @@ impl Fetch for FakeFetch {
|
||||
// wait for manual resume
|
||||
let _ = rx.recv();
|
||||
}
|
||||
|
||||
let data = response.lock().take().unwrap_or(b"Some content");
|
||||
let cursor = io::Cursor::new(data);
|
||||
tx.send(fetch::Response::from_reader(cursor)).unwrap();
|
||||
tx.send(fetch::Response::new(u, hyper::Response::new().with_body(data), abort)).unwrap();
|
||||
});
|
||||
|
||||
Box::new(rx.map_err(|_| fetch::Error::Aborted))
|
||||
}
|
||||
|
||||
fn process_and_forget<F, I, E>(&self, f: F) where
|
||||
F: Future<Item=I, Error=E> + Send + 'static,
|
||||
I: Send + 'static,
|
||||
E: Send + 'static,
|
||||
{
|
||||
// Spawn the task in a separate thread.
|
||||
thread::spawn(|| {
|
||||
let _ = f.wait();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ pub fn init_server<F, B>(process: F, io: IoHandler) -> (Server, Arc<FakeRegistra
|
||||
let mut dapps_path = env::temp_dir();
|
||||
dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading");
|
||||
|
||||
let mut builder = ServerBuilder::new(&dapps_path, registrar.clone());
|
||||
let mut builder = ServerBuilder::new(FetchClient::new().unwrap(), &dapps_path, registrar.clone());
|
||||
builder.signer_address = Some(("127.0.0.1".into(), SIGNER_PORT));
|
||||
let server = process(builder).start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap();
|
||||
(
|
||||
@ -149,13 +149,13 @@ pub struct ServerBuilder<T: Fetch = FetchClient> {
|
||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||
signer_address: Option<(String, u16)>,
|
||||
allowed_hosts: DomainsValidation<Host>,
|
||||
fetch: Option<T>,
|
||||
fetch: T,
|
||||
serve_ui: bool,
|
||||
}
|
||||
|
||||
impl ServerBuilder {
|
||||
/// Construct new dapps server
|
||||
pub fn new<P: AsRef<Path>>(dapps_path: P, registrar: Arc<ContractClient>) -> Self {
|
||||
pub fn new<P: AsRef<Path>>(fetch: FetchClient, dapps_path: P, registrar: Arc<ContractClient>) -> Self {
|
||||
ServerBuilder {
|
||||
dapps_path: dapps_path.as_ref().to_owned(),
|
||||
registrar: registrar,
|
||||
@ -163,7 +163,7 @@ impl ServerBuilder {
|
||||
web_proxy_tokens: Arc::new(|_| None),
|
||||
signer_address: None,
|
||||
allowed_hosts: DomainsValidation::Disabled,
|
||||
fetch: None,
|
||||
fetch: fetch,
|
||||
serve_ui: false,
|
||||
}
|
||||
}
|
||||
@ -179,7 +179,7 @@ impl<T: Fetch> ServerBuilder<T> {
|
||||
web_proxy_tokens: self.web_proxy_tokens,
|
||||
signer_address: self.signer_address,
|
||||
allowed_hosts: self.allowed_hosts,
|
||||
fetch: Some(fetch),
|
||||
fetch: fetch,
|
||||
serve_ui: self.serve_ui,
|
||||
}
|
||||
}
|
||||
@ -187,7 +187,6 @@ impl<T: Fetch> ServerBuilder<T> {
|
||||
/// Asynchronously start server with no authentication,
|
||||
/// returns result with `Server` handle on success or an error.
|
||||
pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> io::Result<Server> {
|
||||
let fetch = self.fetch_client();
|
||||
Server::start_http(
|
||||
addr,
|
||||
io,
|
||||
@ -199,17 +198,10 @@ impl<T: Fetch> ServerBuilder<T> {
|
||||
self.sync_status,
|
||||
self.web_proxy_tokens,
|
||||
Remote::new_sync(),
|
||||
fetch,
|
||||
self.fetch,
|
||||
self.serve_ui,
|
||||
)
|
||||
}
|
||||
|
||||
fn fetch_client(&self) -> T {
|
||||
match self.fetch.clone() {
|
||||
Some(fetch) => fetch,
|
||||
None => T::new().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DAPPS_DOMAIN: &'static str = "web3.site";
|
||||
@ -248,6 +240,7 @@ impl Server {
|
||||
registrar,
|
||||
sync_status,
|
||||
fetch,
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
Middleware::dapps(
|
||||
|
@ -25,6 +25,7 @@ use hyper::{mime, StatusCode};
|
||||
use apps;
|
||||
use endpoint::{Endpoint, EndpointPath, Request, Response};
|
||||
use futures::future;
|
||||
use futures_cpupool::CpuPool;
|
||||
use handlers::{
|
||||
ContentFetcherHandler, ContentHandler, ContentValidator, ValidatorResponse,
|
||||
StreamingHandler,
|
||||
@ -35,6 +36,7 @@ pub struct Web<F> {
|
||||
embeddable_on: Embeddable,
|
||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
}
|
||||
|
||||
impl<F: Fetch> Web<F> {
|
||||
@ -42,11 +44,13 @@ impl<F: Fetch> Web<F> {
|
||||
embeddable_on: Embeddable,
|
||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
) -> Box<Endpoint> {
|
||||
Box::new(Web {
|
||||
embeddable_on,
|
||||
web_proxy_tokens,
|
||||
fetch,
|
||||
pool,
|
||||
})
|
||||
}
|
||||
|
||||
@ -129,6 +133,7 @@ impl<F: Fetch> Endpoint for Web<F> {
|
||||
},
|
||||
self.embeddable_on.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
@ -146,7 +151,7 @@ impl ContentValidator for WebInstaller {
|
||||
let is_html = response.is_html();
|
||||
let mime = response.content_type().unwrap_or(mime::TEXT_HTML);
|
||||
let mut handler = StreamingHandler::new(
|
||||
response,
|
||||
fetch::BodyReader::new(response),
|
||||
status,
|
||||
mime,
|
||||
self.embeddable_on,
|
||||
|
18
dapps/ui-deprecation/Cargo.toml
Normal file
18
dapps/ui-deprecation/Cargo.toml
Normal file
@ -0,0 +1,18 @@
|
||||
[package]
|
||||
description = "Parity UI deprecation notice."
|
||||
name = "parity-ui-deprecation"
|
||||
version = "1.10.0"
|
||||
license = "GPL-3.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
build = "build.rs"
|
||||
|
||||
[features]
|
||||
default = ["with-syntex", "use-precompiled-js"]
|
||||
use-precompiled-js = ["parity-dapps-glue/use-precompiled-js"]
|
||||
with-syntex = ["parity-dapps-glue/with-syntex"]
|
||||
|
||||
[build-dependencies]
|
||||
parity-dapps-glue = "1.9"
|
||||
|
||||
[dependencies]
|
||||
parity-dapps-glue = "1.9"
|
21
dapps/ui-deprecation/build.rs
Normal file
21
dapps/ui-deprecation/build.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate parity_dapps_glue;
|
||||
|
||||
fn main() {
|
||||
parity_dapps_glue::generate();
|
||||
}
|
119
dapps/ui-deprecation/build/index.html
Normal file
119
dapps/ui-deprecation/build/index.html
Normal file
@ -0,0 +1,119 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width">
|
||||
<title>Parity</title>
|
||||
<style>
|
||||
/* Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
/* This file is part of Parity.
|
||||
/*
|
||||
/* Parity is free software: you can redistribute it and/or modify
|
||||
/* it under the terms of the GNU General Public License as published by
|
||||
/* the Free Software Foundation, either version 3 of the License, or
|
||||
/* (at your option) any later version.
|
||||
/*
|
||||
/* Parity is distributed in the hope that it will be useful,
|
||||
/* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
/* GNU General Public License for more details.
|
||||
/*
|
||||
/* You should have received a copy of the GNU General Public License
|
||||
/* along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
:root, :root body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
font-size: 100%;
|
||||
font: inherit;
|
||||
vertical-align: baseline;
|
||||
background: rgb(95, 95, 95);
|
||||
color: rgba(255, 255, 255, 0.75);
|
||||
font-size: 16px;
|
||||
font-family: 'Roboto', sans-serif;
|
||||
font-weight: 300;
|
||||
}
|
||||
|
||||
:root a, :root a:visited {
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
color: rgb(0, 151, 167); /* #f80 */
|
||||
}
|
||||
|
||||
:root a:hover {
|
||||
color: rgb(0, 174, 193);
|
||||
}
|
||||
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
font-weight: 300;
|
||||
text-transform: uppercase;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 24px;
|
||||
line-height: 36px;
|
||||
color: rgb(0, 151, 167);
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 20px;
|
||||
line-height: 34px;
|
||||
}
|
||||
|
||||
code,kbd,pre,samp {
|
||||
font-family: 'Roboto Mono', monospace;
|
||||
}
|
||||
|
||||
.parity-navbar {
|
||||
background: rgb(65, 65, 65);
|
||||
height: 72px;
|
||||
padding: 0 1rem;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
}
|
||||
|
||||
.parity-status {
|
||||
clear: both;
|
||||
padding: 1rem;
|
||||
margin: 1rem 0;
|
||||
text-align: right;
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.parity-box {
|
||||
margin: 1rem;
|
||||
padding: 1rem;
|
||||
background-color: rgb(48, 48, 48);
|
||||
box-sizing: border-box;
|
||||
box-shadow: rgba(0, 0, 0, 0.117647) 0px 1px 6px, rgba(0, 0, 0, 0.117647) 0px 1px 4px;
|
||||
border-radius: 2px;
|
||||
z-index: 1;
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
.parity-box h1,
|
||||
.parity-box h2,
|
||||
.parity-box h3,
|
||||
.parity-box h4,
|
||||
.parity-box h5,
|
||||
.parity-box h6 {
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="parity-navbar">
|
||||
</div>
|
||||
<div class="parity-box">
|
||||
<h1>Parity browser UI is deprecated.</h1>
|
||||
<h3>Get a standalone Parity UI from <a href="https://github.com/Parity-JS/shell/releases">here</a></h3>
|
||||
<p>
|
||||
|
||||
</p>
|
||||
</div>
|
||||
<div class="parity-status">
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
21
dapps/ui-deprecation/src/lib.rs
Normal file
21
dapps/ui-deprecation/src/lib.rs
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#[cfg(feature = "with-syntex")]
|
||||
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
||||
|
||||
#[cfg(not(feature = "with-syntex"))]
|
||||
include!("lib.rs.in");
|
55
dapps/ui-deprecation/src/lib.rs.in
Normal file
55
dapps/ui-deprecation/src/lib.rs.in
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
extern crate parity_dapps_glue;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use parity_dapps_glue::{WebApp, File, Info};
|
||||
|
||||
#[derive(WebAppFiles)]
|
||||
#[webapp(path = "../build")]
|
||||
pub struct App {
|
||||
pub files: HashMap<&'static str, File>,
|
||||
}
|
||||
|
||||
impl Default for App {
|
||||
fn default() -> App {
|
||||
App {
|
||||
files: Self::files(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WebApp for App {
|
||||
fn file(&self, path: &str) -> Option<&File> {
|
||||
self.files.get(path)
|
||||
}
|
||||
|
||||
fn info(&self) -> Info {
|
||||
Info {
|
||||
name: "Parity Wallet info page",
|
||||
version: env!("CARGO_PKG_VERSION"),
|
||||
author: "Parity <admin@parity.io>",
|
||||
description: "Deprecation notice for Parity Wallet",
|
||||
icon_url: "icon.png",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_js() {
|
||||
parity_dapps_glue::js::build(env!("CARGO_MANIFEST_DIR"), "build");
|
||||
}
|
@ -10,10 +10,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
rustc_version = "0.2"
|
||||
|
||||
[dependencies]
|
||||
parity-ui-dev = { git = "https://github.com/parity-js/shell.git", rev = "2f07ecec53a2319f41e5b83c01073b5386243173", optional = true }
|
||||
parity-ui-old-dev = { git = "https://github.com/parity-js/dapp-wallet.git", rev = "1a58bf4836c84e1632e27ef607b5a388abd2bf2d", optional = true }
|
||||
parity-ui-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-10-shell.git", rev="3fb77b61a7f30d6658f81d687e7e415657bd20bd", optional = true }
|
||||
parity-ui-old-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-10-wallet.git", rev="4c067dfa1a17fe71ab2ca26b18c52dcbd0f4fc04", optional = true }
|
||||
parity-ui-dev = { git = "https://github.com/parity-js/shell.git", rev = "eecaadcb9e421bce31e91680d14a20bbd38f92a2", optional = true }
|
||||
parity-ui-old-dev = { git = "https://github.com/parity-js/dapp-wallet.git", rev = "65deb02e7c007a0fd8aab0c089c93e3fd1de6f87", optional = true }
|
||||
parity-ui-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-10-shell.git", rev="bd25b41cd642c6b822d820dded3aa601a29aa079", optional = true }
|
||||
parity-ui-old-precompiled = { git = "https://github.com/js-dist-paritytech/parity-master-1-10-wallet.git", rev="4b6f112412716cd05123d32eeb7fda448288a6c6", optional = true }
|
||||
|
||||
[features]
|
||||
no-precompiled-js = ["parity-ui-dev", "parity-ui-old-dev"]
|
||||
|
@ -30,7 +30,7 @@ RUN apt-get update && \
|
||||
# evmjit dependencies
|
||||
zlib1g-dev \
|
||||
libedit-dev \
|
||||
libudev-dev &&\
|
||||
libudev-dev &&\
|
||||
# cmake and llvm ppa's. then update ppa's
|
||||
add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \
|
||||
add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \
|
||||
|
@ -8,6 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
ansi_term = "0.10"
|
||||
bloomchain = { path = "../util/bloomchain" }
|
||||
bn = { git = "https://github.com/paritytech/bn" }
|
||||
byteorder = "1.0"
|
||||
common-types = { path = "types" }
|
||||
@ -32,6 +33,7 @@ ethjson = { path = "../json" }
|
||||
ethkey = { path = "../ethkey" }
|
||||
ethstore = { path = "../ethstore" }
|
||||
evm = { path = "evm" }
|
||||
futures-cpupool = "0.1"
|
||||
hardware-wallet = { path = "../hw" }
|
||||
heapsize = "0.4"
|
||||
itertools = "0.5"
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
extern crate migration;
|
||||
|
||||
use migration::{ChangeColumns, SimpleMigration};
|
||||
use migration::ChangeColumns;
|
||||
|
||||
/// The migration from v10 to v11.
|
||||
/// Adds a column for node info.
|
||||
@ -35,30 +35,3 @@ pub const TO_V12: ChangeColumns = ChangeColumns {
|
||||
post_columns: Some(8),
|
||||
version: 12,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ToV13;
|
||||
|
||||
impl SimpleMigration for ToV13 {
|
||||
fn columns(&self) -> Option<u32> {
|
||||
Some(8)
|
||||
}
|
||||
|
||||
fn version(&self) -> u32 {
|
||||
13
|
||||
}
|
||||
|
||||
fn migrated_column_index(&self) -> Option<u32> {
|
||||
// extras!
|
||||
Some(3)
|
||||
}
|
||||
|
||||
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
|
||||
// remove all bloom groups
|
||||
if key[0] == 3 {
|
||||
None
|
||||
} else {
|
||||
Some((key, value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0xffffffffffffffff",
|
||||
"eip140Transition": "0x0",
|
||||
"eip211Transition": "0x0",
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0",
|
||||
"eip98Transition": "0xffffffffffffffff",
|
||||
"eip140Transition": "0x0",
|
||||
"eip210Transition": "0x0",
|
||||
|
@ -25,7 +25,8 @@
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffffff",
|
||||
"eip155Transition": "0x7fffffffffffffff",
|
||||
"maxCodeSize": 24576
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x7fffffffffffffff"
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
@ -25,7 +25,8 @@
|
||||
"eip98Transition": "0x7fffffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffffff",
|
||||
"eip155Transition": "0x7fffffffffffffff",
|
||||
"maxCodeSize": 24576
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0x0"
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
@ -13,9 +13,9 @@
|
||||
"eip150Transition": "0x0",
|
||||
"eip160Transition": "0x0",
|
||||
"ecip1017EraRounds": 10000000,
|
||||
|
||||
"eip161abcTransition": "0x7fffffffffffffff",
|
||||
"eip161dTransition": "0x7fffffffffffffff"
|
||||
"eip161dTransition": "0x7fffffffffffffff",
|
||||
"eip100bTransition": 2000000
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -29,7 +29,12 @@
|
||||
"chainID": "0x40",
|
||||
"eip155Transition": "0x0",
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff"
|
||||
"eip86Transition": "0x7fffffffffffff",
|
||||
"wasmActivationTransition": 2000000,
|
||||
"eip140Transition": 2000000,
|
||||
"eip211Transition": 2000000,
|
||||
"eip214Transition": 2000000,
|
||||
"eip658Transition": 2000000
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
@ -67,6 +72,10 @@
|
||||
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
|
||||
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }
|
||||
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||
"0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 2000000, "pricing": { "modexp": { "divisor": 20 } } } },
|
||||
"0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 2000000, "pricing": { "linear": { "base": 500, "word": 0 } } } },
|
||||
"0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 2000000, "pricing": { "linear": { "base": 40000, "word": 0 } } } },
|
||||
"0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 2000000, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } }
|
||||
}
|
||||
}
|
||||
|
@ -152,6 +152,7 @@
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 2675000,
|
||||
"eip140Transition": 4370000,
|
||||
"eip211Transition": 4370000,
|
||||
"eip214Transition": 4370000,
|
||||
|
@ -34,13 +34,16 @@
|
||||
"networkID" : "0x2A",
|
||||
"forkBlock": 4297256,
|
||||
"forkCanonHash": "0x0a66d93c2f727dca618fabaf70c39b37018c73d78b939d8b11efbbd09034778f",
|
||||
"validateReceiptsTransition" : 1000000,
|
||||
"eip155Transition": 1000000,
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 6600000,
|
||||
"validateChainIdTransition": 1000000,
|
||||
"validateReceiptsTransition" : 1000000,
|
||||
"eip140Transition": 5067000,
|
||||
"eip211Transition": 5067000,
|
||||
"eip214Transition": 5067000,
|
||||
"eip658Transition": 5067000
|
||||
"eip658Transition": 5067000,
|
||||
"wasmActivationTransition": 6600000
|
||||
},
|
||||
"genesis": {
|
||||
"seal": {
|
||||
|
@ -40,7 +40,8 @@
|
||||
"eip211Transition":"0x7fffffffffffff",
|
||||
"eip214Transition":"0x7fffffffffffff",
|
||||
"eip658Transition":"0x7fffffffffffff",
|
||||
"maxCodeSize":"0x6000"
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition": "0x7fffffffffffff"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
|
161
ethcore/res/ethereum/mcip6_byz.json
Normal file
161
ethcore/res/ethereum/mcip6_byz.json
Normal file
@ -0,0 +1,161 @@
|
||||
{
|
||||
"name":"Musicoin Byzantium Test",
|
||||
"dataDir":"mcip6test",
|
||||
"engine":{
|
||||
"Ethash":{
|
||||
"params":{
|
||||
"minimumDifficulty":"0x020000",
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"homesteadTransition":"0x17",
|
||||
"eip100bTransition":"0x2a",
|
||||
"eip150Transition":"0x2a",
|
||||
"eip160Transition":"0x7fffffffffffff",
|
||||
"eip161abcTransition":"0x7fffffffffffff",
|
||||
"eip161dTransition":"0x7fffffffffffff",
|
||||
"eip649Transition":"0x2a",
|
||||
"blockReward":"0x1105a0185b50a80000",
|
||||
"mcip3Transition":"0x17",
|
||||
"mcip3MinerReward":"0xd8d726b7177a80000",
|
||||
"mcip3UbiReward":"0x2b5e3af16b1880000",
|
||||
"mcip3UbiContract":"0x00efdd5883ec628983e9063c7d969fe268bbf310",
|
||||
"mcip3DevReward":"0xc249fdd327780000",
|
||||
"mcip3DevContract":"0x00756cf8159095948496617f5fb17ed95059f536"
|
||||
}
|
||||
}
|
||||
},
|
||||
"params":{
|
||||
"gasLimitBoundDivisor":"0x0400",
|
||||
"registrar":"0x5C271c4C9A67E7D73b7b3669d47504741354f21D",
|
||||
"accountStartNonce":"0x00",
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x76740c",
|
||||
"forkBlock":"0x2b",
|
||||
"forkCanonHash":"0x23c3171e864a5d513a3ef85e4cf86dac4cc36b89e5b8e63bf0ebcca68b9e43c9",
|
||||
"eip86Transition":"0x7fffffffffffff",
|
||||
"eip98Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x2a",
|
||||
"eip155Transition":"0x2a",
|
||||
"eip211Transition":"0x2a",
|
||||
"eip214Transition":"0x2a",
|
||||
"eip658Transition":"0x2a",
|
||||
"maxCodeSize":"0x6000",
|
||||
"maxCodeSizeTransition": "0x7fffffffffffff"
|
||||
},
|
||||
"genesis":{
|
||||
"seal":{
|
||||
"ethereum":{
|
||||
"nonce":"0x000000000000002a",
|
||||
"mixHash":"0x00000000000000000000000000000000000000647572616c65787365646c6578"
|
||||
}
|
||||
},
|
||||
"difficulty":"0x3d0900",
|
||||
"author":"0x0000000000000000000000000000000000000000",
|
||||
"timestamp":"0x00",
|
||||
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"extraData":"",
|
||||
"gasLimit":"0x7a1200"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://5ddc110733f6d34101973cdef3f9b43484159acf6f816d3b1ee92bc3c98ea453e857bb1207edf0ec0242008ab3a0f9f05eeaee99d47bd414c08a5bdf4847de13@176.9.3.148:30303",
|
||||
"enode://38f074f4db8e64dfbaf87984bf290eef67772a901a7113d1b62f36216be152b8450c393d6fc562a5e38f04f99bc8f439a99010a230b1d92dc1df43bf0bd00615@176.9.3.148:30403"
|
||||
],
|
||||
"accounts":{
|
||||
"0000000000000000000000000000000000000001":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"ecrecover",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":3000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000002":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"sha256",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":60,
|
||||
"word":12
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000003":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"ripemd160",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":600,
|
||||
"word":120
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000004":{
|
||||
"balance":"1",
|
||||
"builtin":{
|
||||
"name":"identity",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":15,
|
||||
"word":3
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
"word":0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x2a",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
"pair":80000
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -8,12 +8,12 @@
|
||||
"difficultyBoundDivisor":"0x0800",
|
||||
"durationLimit":"0x0d",
|
||||
"homesteadTransition":"0x118c30",
|
||||
"eip100bTransition":"0x7fffffffffffff",
|
||||
"eip150Transition":"0x7fffffffffffff",
|
||||
"eip160Transition":"0x7fffffffffffff",
|
||||
"eip161abcTransition":"0x7fffffffffffff",
|
||||
"eip161dTransition":"0x7fffffffffffff",
|
||||
"eip649Transition":"0x7fffffffffffff",
|
||||
"eip100bTransition":"0x21e88e",
|
||||
"eip150Transition":"0x21e88e",
|
||||
"eip160Transition":"0x21e88e",
|
||||
"eip161abcTransition":"0x21e88e",
|
||||
"eip161dTransition":"0x21e88e",
|
||||
"eip649Transition":"0x21e88e",
|
||||
"blockReward":"0x1105a0185b50a80000",
|
||||
"mcip3Transition":"0x124f81",
|
||||
"mcip3MinerReward":"0xd8d726b7177a80000",
|
||||
@ -31,15 +31,15 @@
|
||||
"maximumExtraDataSize":"0x20",
|
||||
"minGasLimit":"0x1388",
|
||||
"networkID":"0x76740f",
|
||||
"forkBlock":"0x5b6",
|
||||
"forkCanonHash":"0xa5e88ad9e34d113e264e307bc27e8471452c8fc13780324bb3abb96fd0558343",
|
||||
"forkBlock":"0x1d8015",
|
||||
"forkCanonHash":"0x380602acf82b629a0be6b5adb2b4a801e960a07dc8261bf196d21befdbb8f2f9",
|
||||
"eip86Transition":"0x7fffffffffffff",
|
||||
"eip98Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x7fffffffffffff",
|
||||
"eip155Transition":"0x7fffffffffffff",
|
||||
"eip211Transition":"0x7fffffffffffff",
|
||||
"eip214Transition":"0x7fffffffffffff",
|
||||
"eip658Transition":"0x7fffffffffffff",
|
||||
"eip140Transition":"0x21e88e",
|
||||
"eip155Transition":"0x21e88e",
|
||||
"eip211Transition":"0x21e88e",
|
||||
"eip214Transition":"0x21e88e",
|
||||
"eip658Transition":"0x21e88e",
|
||||
"maxCodeSize":"0x6000"
|
||||
},
|
||||
"genesis":{
|
||||
@ -57,12 +57,9 @@
|
||||
"gasLimit":"0x7a1200"
|
||||
},
|
||||
"nodes":[
|
||||
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
|
||||
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303",
|
||||
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303",
|
||||
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303",
|
||||
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
|
||||
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
|
||||
"enode://09fcd36d553044c8b499b9b9e13a228ffd99572c513f77073d41f009717c464cd4399c0e665d6aff1590324254ee4e698b2b2533b1998dd04d896b9d6aff7895@35.185.67.35:30303",
|
||||
"enode://89e51a34770a0badf8ea18c4c4d2c361cde707abd60031d99b1ab3010363e1898230a516ddb37d974af8d8db1b322779d7fe0caae0617bed4924d1b4968cf92b@35.231.48.142:30303",
|
||||
"enode://b58c0c71f08864c0cf7fa9dea2c4cbefae5ae7a36cc30d286603b24982d25f3ccc056b589119324c51768fc2054b8c529ecf682e06e1e9980170b93ff194ed7a@132.148.132.9:30303",
|
||||
"enode://d302f52c8789ad87ee528f1431a67f1aa646c9bec17babb4665dfb3d61de5b9119a70aa77b2147a5f28854092ba09769323c1c552a6ac6f6a34cbcf767e2d2fe@158.69.248.48:30303",
|
||||
"enode://c72564bce8331ae298fb8ece113a456e3927d7e5989c2be3e445678b3600579f722410ef9bbfe339335d676af77343cb21b5b1703b7bebc32be85fce937a2220@191.252.185.71:30303",
|
||||
"enode://e3ae4d25ee64791ff98bf17c37acf90933359f2505c00f65c84f6863231a32a94153cadb0a462e428f18f35ded6bd91cd91033d26576a28558c22678be9cfaee@5.63.158.137:35555"
|
||||
@ -119,7 +116,7 @@
|
||||
"0000000000000000000000000000000000000005":{
|
||||
"builtin":{
|
||||
"name":"modexp",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"modexp":{
|
||||
"divisor":20
|
||||
@ -130,7 +127,7 @@
|
||||
"0000000000000000000000000000000000000006":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_add",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":500,
|
||||
@ -142,7 +139,7 @@
|
||||
"0000000000000000000000000000000000000007":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_mul",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"linear":{
|
||||
"base":40000,
|
||||
@ -154,7 +151,7 @@
|
||||
"0000000000000000000000000000000000000008":{
|
||||
"builtin":{
|
||||
"name":"alt_bn128_pairing",
|
||||
"activate_at":"0x7fffffffffffff",
|
||||
"activate_at":"0x21e88e",
|
||||
"pricing":{
|
||||
"alt_bn128_pairing":{
|
||||
"base":100000,
|
||||
|
@ -29,6 +29,7 @@
|
||||
"forkBlock": 641350,
|
||||
"forkCanonHash": "0x8033403e9fe5811a7b6d6b469905915de1c59207ce2172cbcf5d6ff14fa6a2eb",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": 10,
|
||||
"eip155Transition": 10,
|
||||
"eip98Transition": "0x7fffffffffffff",
|
||||
"eip86Transition": "0x7fffffffffffff",
|
||||
|
@ -26,6 +26,7 @@
|
||||
"minGasLimit": "0x1388",
|
||||
"networkID" : "0x1",
|
||||
"maxCodeSize": 24576,
|
||||
"maxCodeSizeTransition": "0",
|
||||
"eip98Transition": "5",
|
||||
"eip140Transition": "5",
|
||||
"eip211Transition": "5",
|
||||
|
@ -31,7 +31,7 @@ pub struct BlockInfo {
|
||||
}
|
||||
|
||||
/// Describes location of newly inserted block.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum BlockLocation {
|
||||
/// It's part of the canon chain.
|
||||
CanonChain,
|
||||
@ -43,7 +43,7 @@ pub enum BlockLocation {
|
||||
BranchBecomingCanonChain(BranchBecomingCanonChainData),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct BranchBecomingCanonChainData {
|
||||
/// Hash of the newest common ancestor with old canon chain.
|
||||
pub ancestor: H256,
|
||||
|
@ -16,10 +16,11 @@
|
||||
|
||||
//! Blockchain database.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::{HashMap, HashSet, hash_map};
|
||||
use std::sync::Arc;
|
||||
use std::mem;
|
||||
use itertools::Itertools;
|
||||
use bloomchain as bc;
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::{H256, Bloom, U256};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
@ -31,6 +32,7 @@ use transaction::*;
|
||||
use views::*;
|
||||
use log_entry::{LogEntry, LocalizedLogEntry};
|
||||
use receipt::Receipt;
|
||||
use blooms::{BloomGroup, GroupPosition};
|
||||
use blockchain::best_block::{BestBlock, BestAncientBlock};
|
||||
use blockchain::block_info::{BlockInfo, BlockLocation, BranchBecomingCanonChainData};
|
||||
use blockchain::extras::{BlockReceipts, BlockDetails, TransactionAddress, EPOCH_KEY_PREFIX, EpochTransitions};
|
||||
@ -46,12 +48,21 @@ use rayon::prelude::*;
|
||||
use ansi_term::Colour;
|
||||
use kvdb::{DBTransaction, KeyValueDB};
|
||||
|
||||
const LOG_BLOOMS_LEVELS: usize = 3;
|
||||
const LOG_BLOOMS_ELEMENTS_PER_INDEX: usize = 16;
|
||||
|
||||
/// Interface for querying blocks by hash and by number.
|
||||
pub trait BlockProvider {
|
||||
/// Returns true if the given block is known
|
||||
/// (though not necessarily a part of the canon chain).
|
||||
fn is_known(&self, hash: &H256) -> bool;
|
||||
|
||||
/// Returns true if the given block is known and in the canon chain.
|
||||
fn is_canon(&self, hash: &H256) -> bool {
|
||||
let is_canon = || Some(hash == &self.block_hash(self.block_number(hash)?)?);
|
||||
is_canon().unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Get the first block of the best part of the chain.
|
||||
/// Return `None` if there is no gap and the first block is the genesis.
|
||||
/// Any queries of blocks which precede this one are not guaranteed to
|
||||
@ -145,10 +156,10 @@ pub trait BlockProvider {
|
||||
}
|
||||
|
||||
/// Returns numbers of blocks containing given bloom.
|
||||
fn blocks_with_blooms(&self, bloom: &[Bloom], from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
|
||||
fn blocks_with_bloom(&self, bloom: &Bloom, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
|
||||
|
||||
/// Returns logs matching given filter.
|
||||
fn logs<F>(&self, blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
fn logs<F>(&self, blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized;
|
||||
}
|
||||
|
||||
@ -159,14 +170,26 @@ enum CacheId {
|
||||
BlockDetails(H256),
|
||||
BlockHashes(BlockNumber),
|
||||
TransactionAddresses(H256),
|
||||
BlocksBlooms(GroupPosition),
|
||||
BlockReceipts(H256),
|
||||
}
|
||||
|
||||
impl bc::group::BloomGroupDatabase for BlockChain {
|
||||
fn blooms_at(&self, position: &bc::group::GroupPosition) -> Option<bc::group::BloomGroup> {
|
||||
let position = GroupPosition::from(position.clone());
|
||||
let result = self.db.read_with_cache(db::COL_EXTRA, &self.blocks_blooms, &position).map(Into::into);
|
||||
self.cache_man.lock().note_used(CacheId::BlocksBlooms(position));
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure providing fast access to blockchain data.
|
||||
///
|
||||
/// **Does not do input data verification.**
|
||||
pub struct BlockChain {
|
||||
// All locks must be captured in the order declared here.
|
||||
blooms_config: bc::Config,
|
||||
|
||||
best_block: RwLock<BestBlock>,
|
||||
// Stores best block of the first uninterrupted sequence of blocks. `None` if there are no gaps.
|
||||
// Only updated with `insert_unordered_block`.
|
||||
@ -183,6 +206,7 @@ pub struct BlockChain {
|
||||
block_details: RwLock<HashMap<H256, BlockDetails>>,
|
||||
block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
||||
transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
|
||||
blocks_blooms: RwLock<HashMap<GroupPosition, BloomGroup>>,
|
||||
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
|
||||
|
||||
db: Arc<KeyValueDB>,
|
||||
@ -332,38 +356,43 @@ impl BlockProvider for BlockChain {
|
||||
result
|
||||
}
|
||||
|
||||
fn blocks_with_blooms(&self, blooms: &[Bloom], from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber> {
|
||||
// +1, cause it's inclusive range
|
||||
(from_block..to_block + 1)
|
||||
.into_par_iter()
|
||||
.filter_map(|number| self.block_hash(number).map(|hash| (number, hash)))
|
||||
.map(|(number, hash)| (number, self.block_header_data(&hash).expect("hash exists; qed")))
|
||||
.filter(|&(_, ref header)| blooms.iter().any(|bloom| header.view().log_bloom().contains_bloom(bloom)))
|
||||
.map(|(number, _)| number)
|
||||
/// Returns numbers of blocks containing given bloom.
|
||||
fn blocks_with_bloom(&self, bloom: &Bloom, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber> {
|
||||
let range = from_block as bc::Number..to_block as bc::Number;
|
||||
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
||||
chain.with_bloom(&range, bloom)
|
||||
.into_iter()
|
||||
.map(|b| b as BlockNumber)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
/// Returns logs matching given filter. The order of logs returned will be the same as the order of the blocks
|
||||
/// provided. And it's the callers responsibility to sort blocks provided in advance.
|
||||
fn logs<F>(&self, mut blocks: Vec<H256>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized {
|
||||
// sort in reverse order
|
||||
blocks.sort_by(|a, b| b.cmp(a));
|
||||
blocks.reverse();
|
||||
|
||||
let mut logs = blocks
|
||||
.chunks(128)
|
||||
.flat_map(move |blocks_chunk| {
|
||||
blocks_chunk.into_par_iter()
|
||||
.filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash)))
|
||||
.filter_map(|hash| self.block_number(&hash).map(|r| (r, hash)))
|
||||
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
||||
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes())))
|
||||
.flat_map(|(number, hash, receipts, hashes)| {
|
||||
assert_eq!(receipts.len(), hashes.len(), "Block {} ({}) has different number of receipts ({}) to transactions ({})", number, hash, receipts.len(), hashes.len());
|
||||
let mut log_index: usize = receipts.iter().map(|r| r.logs.len()).sum();
|
||||
.flat_map(|(number, hash, mut receipts, mut hashes)| {
|
||||
if receipts.len() != hashes.len() {
|
||||
warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len());
|
||||
assert!(false);
|
||||
}
|
||||
let mut log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
|
||||
|
||||
let receipts_len = receipts.len();
|
||||
hashes.reverse();
|
||||
receipts.reverse();
|
||||
receipts.into_iter()
|
||||
.map(|receipt| receipt.logs)
|
||||
.zip(hashes)
|
||||
.rev()
|
||||
.enumerate()
|
||||
.flat_map(move |(index, (mut logs, tx_hash))| {
|
||||
let current_log_index = log_index;
|
||||
@ -375,7 +404,7 @@ impl BlockProvider for BlockChain {
|
||||
.enumerate()
|
||||
.map(move |(i, log)| LocalizedLogEntry {
|
||||
entry: log,
|
||||
block_hash: hash,
|
||||
block_hash: *hash,
|
||||
block_number: number,
|
||||
transaction_hash: tx_hash,
|
||||
// iterating in reverse order
|
||||
@ -467,6 +496,10 @@ impl BlockChain {
|
||||
let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400);
|
||||
|
||||
let mut bc = BlockChain {
|
||||
blooms_config: bc::Config {
|
||||
levels: LOG_BLOOMS_LEVELS,
|
||||
elements_per_index: LOG_BLOOMS_ELEMENTS_PER_INDEX,
|
||||
},
|
||||
first_block: None,
|
||||
best_block: RwLock::new(BestBlock::default()),
|
||||
best_ancient_block: RwLock::new(None),
|
||||
@ -475,6 +508,7 @@ impl BlockChain {
|
||||
block_details: RwLock::new(HashMap::new()),
|
||||
block_hashes: RwLock::new(HashMap::new()),
|
||||
transaction_addresses: RwLock::new(HashMap::new()),
|
||||
blocks_blooms: RwLock::new(HashMap::new()),
|
||||
block_receipts: RwLock::new(HashMap::new()),
|
||||
db: db.clone(),
|
||||
cache_man: Mutex::new(cache_man),
|
||||
@ -725,6 +759,7 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: self.prepare_block_details_update(bytes, &info),
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info,
|
||||
timestamp: header.timestamp(),
|
||||
@ -773,6 +808,7 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: update,
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info,
|
||||
timestamp: header.timestamp(),
|
||||
@ -923,6 +959,7 @@ impl BlockChain {
|
||||
block_hashes: self.prepare_block_hashes_update(bytes, &info),
|
||||
block_details: self.prepare_block_details_update(bytes, &info),
|
||||
block_receipts: self.prepare_block_receipts_update(receipts, &info),
|
||||
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
|
||||
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
|
||||
info: info.clone(),
|
||||
timestamp: header.timestamp(),
|
||||
@ -980,22 +1017,50 @@ impl BlockChain {
|
||||
batch.extend_with_cache(db::COL_EXTRA, &mut *write_receipts, update.block_receipts, CacheUpdatePolicy::Remove);
|
||||
}
|
||||
|
||||
{
|
||||
let mut write_blocks_blooms = self.blocks_blooms.write();
|
||||
// update best block
|
||||
match update.info.location {
|
||||
BlockLocation::Branch => (),
|
||||
BlockLocation::BranchBecomingCanonChain(_) => {
|
||||
// clear all existing blooms, cause they may be created for block
|
||||
// number higher than current best block
|
||||
*write_blocks_blooms = update.blocks_blooms;
|
||||
for (key, value) in write_blocks_blooms.iter() {
|
||||
batch.write(db::COL_EXTRA, key, value);
|
||||
}
|
||||
},
|
||||
BlockLocation::CanonChain => {
|
||||
// update all existing blooms groups
|
||||
for (key, value) in update.blocks_blooms {
|
||||
match write_blocks_blooms.entry(key) {
|
||||
hash_map::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().accrue_bloom_group(&value);
|
||||
batch.write(db::COL_EXTRA, entry.key(), entry.get());
|
||||
},
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
batch.write(db::COL_EXTRA, entry.key(), &value);
|
||||
entry.insert(value);
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// These cached values must be updated last with all four locks taken to avoid
|
||||
// cache decoherence
|
||||
{
|
||||
let mut best_block = self.pending_best_block.write();
|
||||
match update.info.location {
|
||||
BlockLocation::Branch => (),
|
||||
_ => if is_best {
|
||||
batch.put(db::COL_EXTRA, b"best", &update.info.hash);
|
||||
*best_block = Some(BestBlock {
|
||||
hash: update.info.hash,
|
||||
number: update.info.number,
|
||||
total_difficulty: update.info.total_difficulty,
|
||||
timestamp: update.timestamp,
|
||||
block: update.block.to_vec(),
|
||||
});
|
||||
}
|
||||
if is_best && update.info.location != BlockLocation::Branch {
|
||||
batch.put(db::COL_EXTRA, b"best", &update.info.hash);
|
||||
*best_block = Some(BestBlock {
|
||||
hash: update.info.hash,
|
||||
number: update.info.number,
|
||||
total_difficulty: update.info.total_difficulty,
|
||||
timestamp: update.timestamp,
|
||||
block: update.block.to_vec(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut write_hashes = self.pending_block_hashes.write();
|
||||
@ -1214,6 +1279,59 @@ impl BlockChain {
|
||||
}
|
||||
}
|
||||
|
||||
/// This functions returns modified blocks blooms.
|
||||
///
|
||||
/// To accelerate blooms lookups, blomms are stored in multiple
|
||||
/// layers (BLOOM_LEVELS, currently 3).
|
||||
/// ChainFilter is responsible for building and rebuilding these layers.
|
||||
/// It returns them in HashMap, where values are Blooms and
|
||||
/// keys are BloomIndexes. BloomIndex represents bloom location on one
|
||||
/// of these layers.
|
||||
///
|
||||
/// To reduce number of queries to databse, block blooms are stored
|
||||
/// in BlocksBlooms structure which contains info about several
|
||||
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
|
||||
///
|
||||
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
|
||||
/// to bloom location in database (BlocksBloomLocation).
|
||||
///
|
||||
fn prepare_block_blooms_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<GroupPosition, BloomGroup> {
|
||||
let block = BlockView::new(block_bytes);
|
||||
let header = block.header_view();
|
||||
|
||||
let log_blooms = match info.location {
|
||||
BlockLocation::Branch => HashMap::new(),
|
||||
BlockLocation::CanonChain => {
|
||||
let log_bloom = header.log_bloom();
|
||||
if log_bloom.is_zero() {
|
||||
HashMap::new()
|
||||
} else {
|
||||
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
||||
chain.insert(info.number as bc::Number, log_bloom)
|
||||
}
|
||||
},
|
||||
BlockLocation::BranchBecomingCanonChain(ref data) => {
|
||||
let ancestor_number = self.block_number(&data.ancestor).unwrap();
|
||||
let start_number = ancestor_number + 1;
|
||||
let range = start_number as bc::Number..self.best_block_number() as bc::Number;
|
||||
|
||||
let mut blooms: Vec<Bloom> = data.enacted.iter()
|
||||
.map(|hash| self.block_header_data(hash).unwrap())
|
||||
.map(|h| h.log_bloom())
|
||||
.collect();
|
||||
|
||||
blooms.push(header.log_bloom());
|
||||
|
||||
let chain = bc::group::BloomGroupChain::new(self.blooms_config, self);
|
||||
chain.replace(&range, blooms)
|
||||
}
|
||||
};
|
||||
|
||||
log_blooms.into_iter()
|
||||
.map(|p| (From::from(p.0), From::from(p.1)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get best block hash.
|
||||
pub fn best_block_hash(&self) -> H256 {
|
||||
self.best_block.read().hash
|
||||
@ -1247,6 +1365,7 @@ impl BlockChain {
|
||||
blocks: self.block_headers.read().heap_size_of_children() + self.block_bodies.read().heap_size_of_children(),
|
||||
block_details: self.block_details.read().heap_size_of_children(),
|
||||
transaction_addresses: self.transaction_addresses.read().heap_size_of_children(),
|
||||
blocks_blooms: self.blocks_blooms.read().heap_size_of_children(),
|
||||
block_receipts: self.block_receipts.read().heap_size_of_children(),
|
||||
}
|
||||
}
|
||||
@ -1260,6 +1379,7 @@ impl BlockChain {
|
||||
let mut block_details = self.block_details.write();
|
||||
let mut block_hashes = self.block_hashes.write();
|
||||
let mut transaction_addresses = self.transaction_addresses.write();
|
||||
let mut blocks_blooms = self.blocks_blooms.write();
|
||||
let mut block_receipts = self.block_receipts.write();
|
||||
|
||||
let mut cache_man = self.cache_man.lock();
|
||||
@ -1271,6 +1391,7 @@ impl BlockChain {
|
||||
CacheId::BlockDetails(ref h) => { block_details.remove(h); }
|
||||
CacheId::BlockHashes(ref h) => { block_hashes.remove(h); }
|
||||
CacheId::TransactionAddresses(ref h) => { transaction_addresses.remove(h); }
|
||||
CacheId::BlocksBlooms(ref h) => { blocks_blooms.remove(h); }
|
||||
CacheId::BlockReceipts(ref h) => { block_receipts.remove(h); }
|
||||
}
|
||||
}
|
||||
@ -1280,6 +1401,7 @@ impl BlockChain {
|
||||
block_details.shrink_to_fit();
|
||||
block_hashes.shrink_to_fit();
|
||||
transaction_addresses.shrink_to_fit();
|
||||
blocks_blooms.shrink_to_fit();
|
||||
block_receipts.shrink_to_fit();
|
||||
|
||||
block_headers.heap_size_of_children() +
|
||||
@ -1287,6 +1409,7 @@ impl BlockChain {
|
||||
block_details.heap_size_of_children() +
|
||||
block_hashes.heap_size_of_children() +
|
||||
transaction_addresses.heap_size_of_children() +
|
||||
blocks_blooms.heap_size_of_children() +
|
||||
block_receipts.heap_size_of_children()
|
||||
});
|
||||
}
|
||||
@ -1842,17 +1965,33 @@ mod tests {
|
||||
value: 103.into(),
|
||||
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
|
||||
}.sign(&secret(), None);
|
||||
let t4 = Transaction {
|
||||
nonce: 0.into(),
|
||||
gas_price: 0.into(),
|
||||
gas: 100_000.into(),
|
||||
action: Action::Create,
|
||||
value: 104.into(),
|
||||
data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(),
|
||||
}.sign(&secret(), None);
|
||||
let tx_hash1 = t1.hash();
|
||||
let tx_hash2 = t2.hash();
|
||||
let tx_hash3 = t3.hash();
|
||||
let tx_hash4 = t4.hash();
|
||||
|
||||
let genesis = BlockBuilder::genesis();
|
||||
let b1 = genesis.add_block_with_transactions(vec![t1, t2]);
|
||||
let b2 = b1.add_block_with_transactions(iter::once(t3));
|
||||
let b3 = genesis.add_block_with(|| BlockOptions {
|
||||
transactions: vec![t4.clone()],
|
||||
difficulty: U256::from(9),
|
||||
..Default::default()
|
||||
}); // Branch block
|
||||
let b1_hash = b1.last().hash();
|
||||
let b1_number = b1.last().number();
|
||||
let b2_hash = b2.last().hash();
|
||||
let b2_number = b2.last().number();
|
||||
let b3_hash = b3.last().hash();
|
||||
let b3_number = b3.last().number();
|
||||
|
||||
let db = new_db();
|
||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
||||
@ -1883,10 +2022,21 @@ mod tests {
|
||||
],
|
||||
}
|
||||
]);
|
||||
insert_block(&db, &bc, &b3.last().encoded(), vec![
|
||||
Receipt {
|
||||
outcome: TransactionOutcome::StateRoot(H256::default()),
|
||||
gas_used: 10_000.into(),
|
||||
log_bloom: Default::default(),
|
||||
logs: vec![
|
||||
LogEntry { address: Default::default(), topics: vec![], data: vec![5], },
|
||||
],
|
||||
}
|
||||
]);
|
||||
|
||||
// when
|
||||
let logs1 = bc.logs(vec![1, 2], |_| true, None);
|
||||
let logs2 = bc.logs(vec![1, 2], |_| true, Some(1));
|
||||
let logs1 = bc.logs(vec![b1_hash, b2_hash], |_| true, None);
|
||||
let logs2 = bc.logs(vec![b1_hash, b2_hash], |_| true, Some(1));
|
||||
let logs3 = bc.logs(vec![b3_hash], |_| true, None);
|
||||
|
||||
// then
|
||||
assert_eq!(logs1, vec![
|
||||
@ -1938,6 +2088,17 @@ mod tests {
|
||||
log_index: 0,
|
||||
}
|
||||
]);
|
||||
assert_eq!(logs3, vec![
|
||||
LocalizedLogEntry {
|
||||
entry: LogEntry { address: Default::default(), topics: vec![], data: vec![5] },
|
||||
block_hash: b3_hash,
|
||||
block_number: b3_number,
|
||||
transaction_hash: tx_hash4,
|
||||
transaction_index: 0,
|
||||
transaction_log_index: 0,
|
||||
log_index: 0,
|
||||
}
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1963,46 +2124,46 @@ mod tests {
|
||||
let db = new_db();
|
||||
let bc = new_chain(&genesis.last().encoded(), db.clone());
|
||||
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
assert!(blocks_b1.is_empty());
|
||||
assert!(blocks_b2.is_empty());
|
||||
|
||||
insert_block(&db, &bc, &b1.last().encoded(), vec![]);
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
assert_eq!(blocks_b1, vec![1]);
|
||||
assert!(blocks_b2.is_empty());
|
||||
|
||||
insert_block(&db, &bc, &b2.last().encoded(), vec![]);
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
assert_eq!(blocks_b1, vec![1]);
|
||||
assert_eq!(blocks_b2, vec![2]);
|
||||
|
||||
// hasn't been forked yet
|
||||
insert_block(&db, &bc, &b1a.last().encoded(), vec![]);
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
||||
assert_eq!(blocks_b1, vec![1]);
|
||||
assert_eq!(blocks_b2, vec![2]);
|
||||
assert!(blocks_ba.is_empty());
|
||||
|
||||
// fork has happend
|
||||
insert_block(&db, &bc, &b2a.last().encoded(), vec![]);
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
||||
assert!(blocks_b1.is_empty());
|
||||
assert!(blocks_b2.is_empty());
|
||||
assert_eq!(blocks_ba, vec![1, 2]);
|
||||
|
||||
// fork back
|
||||
insert_block(&db, &bc, &b3.last().encoded(), vec![]);
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 5);
|
||||
let blocks_ba = bc.blocks_with_blooms(&[bloom_ba], 0, 5);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 5);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 5);
|
||||
let blocks_ba = bc.blocks_with_bloom(&bloom_ba, 0, 5);
|
||||
assert_eq!(blocks_b1, vec![1]);
|
||||
assert_eq!(blocks_b2, vec![2]);
|
||||
assert_eq!(blocks_ba, vec![3]);
|
||||
@ -2038,9 +2199,9 @@ mod tests {
|
||||
assert_eq!(bc.block_hash(2).unwrap(), b2.last().hash());
|
||||
assert_eq!(bc.block_hash(3).unwrap(), b3.last().hash());
|
||||
|
||||
let blocks_b1 = bc.blocks_with_blooms(&[bloom_b1], 0, 3);
|
||||
let blocks_b2 = bc.blocks_with_blooms(&[bloom_b2], 0, 3);
|
||||
let blocks_b3 = bc.blocks_with_blooms(&[bloom_b3], 0, 3);
|
||||
let blocks_b1 = bc.blocks_with_bloom(&bloom_b1, 0, 3);
|
||||
let blocks_b2 = bc.blocks_with_bloom(&bloom_b2, 0, 3);
|
||||
let blocks_b3 = bc.blocks_with_bloom(&bloom_b3, 0, 3);
|
||||
|
||||
assert_eq!(blocks_b1, vec![1]);
|
||||
assert_eq!(blocks_b2, vec![2]);
|
||||
|
@ -23,6 +23,8 @@ pub struct CacheSize {
|
||||
pub block_details: usize,
|
||||
/// Transaction addresses cache size.
|
||||
pub transaction_addresses: usize,
|
||||
/// Blooms cache size.
|
||||
pub blocks_blooms: usize,
|
||||
/// Block receipts size.
|
||||
pub block_receipts: usize,
|
||||
}
|
||||
@ -30,6 +32,6 @@ pub struct CacheSize {
|
||||
impl CacheSize {
|
||||
/// Total amount used by the cache.
|
||||
pub fn total(&self) -> usize {
|
||||
self.blocks + self.block_details + self.transaction_addresses + self.block_receipts
|
||||
self.blocks + self.block_details + self.transaction_addresses + self.blocks_blooms + self.block_receipts
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
use std::ops;
|
||||
use std::io::Write;
|
||||
use blooms::{GroupPosition, BloomGroup};
|
||||
use db::Key;
|
||||
use engines::epoch::{Transition as EpochTransition};
|
||||
use header::BlockNumber;
|
||||
@ -36,6 +37,8 @@ pub enum ExtrasIndex {
|
||||
BlockHash = 1,
|
||||
/// Transaction address index
|
||||
TransactionAddress = 2,
|
||||
/// Block blooms index
|
||||
BlocksBlooms = 3,
|
||||
/// Block receipts index
|
||||
BlockReceipts = 4,
|
||||
/// Epoch transition data index.
|
||||
@ -83,6 +86,31 @@ impl Key<BlockDetails> for H256 {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LogGroupKey([u8; 6]);
|
||||
|
||||
impl ops::Deref for LogGroupKey {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Key<BloomGroup> for GroupPosition {
|
||||
type Target = LogGroupKey;
|
||||
|
||||
fn key(&self) -> Self::Target {
|
||||
let mut result = [0u8; 6];
|
||||
result[0] = ExtrasIndex::BlocksBlooms as u8;
|
||||
result[1] = self.level;
|
||||
result[2] = (self.index >> 24) as u8;
|
||||
result[3] = (self.index >> 16) as u8;
|
||||
result[4] = (self.index >> 8) as u8;
|
||||
result[5] = self.index as u8;
|
||||
LogGroupKey(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl Key<TransactionAddress> for H256 {
|
||||
type Target = H264;
|
||||
|
||||
|
@ -3,6 +3,7 @@ use ethereum_types::H256;
|
||||
use header::BlockNumber;
|
||||
use blockchain::block_info::BlockInfo;
|
||||
use blockchain::extras::{BlockDetails, BlockReceipts, TransactionAddress};
|
||||
use blooms::{BloomGroup, GroupPosition};
|
||||
|
||||
/// Block extras update info.
|
||||
pub struct ExtrasUpdate<'a> {
|
||||
@ -18,6 +19,8 @@ pub struct ExtrasUpdate<'a> {
|
||||
pub block_details: HashMap<H256, BlockDetails>,
|
||||
/// Modified block receipts.
|
||||
pub block_receipts: HashMap<H256, BlockReceipts>,
|
||||
/// Modified blocks blooms.
|
||||
pub blocks_blooms: HashMap<GroupPosition, BloomGroup>,
|
||||
/// Modified transaction addresses (None signifies removed transactions).
|
||||
pub transactions_addresses: HashMap<H256, Option<TransactionAddress>>,
|
||||
}
|
||||
|
55
ethcore/src/blooms/bloom_group.rs
Normal file
55
ethcore/src/blooms/bloom_group.rs
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bloomchain::group as bc;
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::Bloom;
|
||||
|
||||
/// Represents group of X consecutive blooms.
|
||||
#[derive(Debug, Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||
pub struct BloomGroup {
|
||||
blooms: Vec<Bloom>,
|
||||
}
|
||||
|
||||
impl BloomGroup {
|
||||
pub fn accrue_bloom_group(&mut self, group: &BloomGroup) {
|
||||
for (bloom, other) in self.blooms.iter_mut().zip(group.blooms.iter()) {
|
||||
bloom.accrue_bloom(other);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<bc::BloomGroup> for BloomGroup {
|
||||
fn from(group: bc::BloomGroup) -> Self {
|
||||
BloomGroup {
|
||||
blooms: group.blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<bc::BloomGroup> for BloomGroup {
|
||||
fn into(self) -> bc::BloomGroup {
|
||||
bc::BloomGroup {
|
||||
blooms: self.blooms
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for BloomGroup {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
self.blooms.heap_size_of_children()
|
||||
}
|
||||
}
|
42
ethcore/src/blooms/group_position.rs
Normal file
42
ethcore/src/blooms/group_position.rs
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bloomchain::group as bc;
|
||||
use heapsize::HeapSizeOf;
|
||||
|
||||
/// Represents `BloomGroup` position in database.
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
||||
pub struct GroupPosition {
|
||||
/// Bloom level.
|
||||
pub level: u8,
|
||||
/// Group index.
|
||||
pub index: u32,
|
||||
}
|
||||
|
||||
impl From<bc::GroupPosition> for GroupPosition {
|
||||
fn from(p: bc::GroupPosition) -> Self {
|
||||
GroupPosition {
|
||||
level: p.level as u8,
|
||||
index: p.index as u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for GroupPosition {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
0
|
||||
}
|
||||
}
|
23
ethcore/src/blooms/mod.rs
Normal file
23
ethcore/src/blooms/mod.rs
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Parity.
|
||||
|
||||
// Parity is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Parity is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Bridge between bloomchain crate types and ethcore.
|
||||
|
||||
mod bloom_group;
|
||||
mod group_position;
|
||||
|
||||
pub use self::bloom_group::BloomGroup;
|
||||
pub use self::group_position::GroupPosition;
|
@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
|
||||
use std::collections::{HashSet, HashMap, BTreeMap, BTreeSet, VecDeque};
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||
@ -1666,14 +1666,83 @@ impl BlockChainClient for Client {
|
||||
}
|
||||
|
||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||
let (from, to) = match (self.block_number_ref(&filter.from_block), self.block_number_ref(&filter.to_block)) {
|
||||
(Some(from), Some(to)) => (from, to),
|
||||
_ => return Vec::new(),
|
||||
// Wrap the logic inside a closure so that we can take advantage of question mark syntax.
|
||||
let fetch_logs = || {
|
||||
let chain = self.chain.read();
|
||||
|
||||
// First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the
|
||||
// optimized version.
|
||||
let is_canon = |id| {
|
||||
match id {
|
||||
&BlockId::Pending => true,
|
||||
// If it is referred by number, then it is always on the canon chain.
|
||||
&BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true,
|
||||
// If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same
|
||||
// result.
|
||||
&BlockId::Hash(ref hash) => chain.is_canon(hash),
|
||||
}
|
||||
};
|
||||
|
||||
let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) {
|
||||
// If we are on the canon chain, use bloom filter to fetch required hashes.
|
||||
let from = self.block_number_ref(&filter.from_block)?;
|
||||
let to = self.block_number_ref(&filter.to_block)?;
|
||||
|
||||
filter.bloom_possibilities().iter()
|
||||
.map(|bloom| {
|
||||
chain.blocks_with_bloom(bloom, from, to)
|
||||
})
|
||||
.flat_map(|m| m)
|
||||
// remove duplicate elements
|
||||
.collect::<BTreeSet<u64>>()
|
||||
.into_iter()
|
||||
.filter_map(|n| chain.block_hash(n))
|
||||
.collect::<Vec<H256>>()
|
||||
|
||||
} else {
|
||||
// Otherwise, we use a slower version that finds a link between from_block and to_block.
|
||||
let from_hash = Self::block_hash(&chain, &*self.miner, filter.from_block)?;
|
||||
let from_number = chain.block_number(&from_hash)?;
|
||||
let to_hash = Self::block_hash(&chain, &*self.miner, filter.from_block)?;
|
||||
|
||||
let blooms = filter.bloom_possibilities();
|
||||
let bloom_match = |header: &encoded::Header| {
|
||||
blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom))
|
||||
};
|
||||
|
||||
let (blocks, last_hash) = {
|
||||
let mut blocks = Vec::new();
|
||||
let mut current_hash = to_hash;
|
||||
|
||||
loop {
|
||||
let header = chain.block_header_data(¤t_hash)?;
|
||||
if bloom_match(&header) {
|
||||
blocks.push(current_hash);
|
||||
}
|
||||
|
||||
// Stop if `from` block is reached.
|
||||
if header.number() <= from_number {
|
||||
break;
|
||||
}
|
||||
current_hash = header.parent_hash();
|
||||
}
|
||||
|
||||
blocks.reverse();
|
||||
(blocks, current_hash)
|
||||
};
|
||||
|
||||
// Check if we've actually reached the expected `from` block.
|
||||
if last_hash != from_hash || blocks.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
blocks
|
||||
};
|
||||
|
||||
Some(self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit))
|
||||
};
|
||||
|
||||
let chain = self.chain.read();
|
||||
let blocks = chain.blocks_with_blooms(&filter.bloom_possibilities(), from, to);
|
||||
chain.logs(blocks, |entry| filter.matches(entry), filter.limit)
|
||||
fetch_logs().unwrap_or_default()
|
||||
}
|
||||
|
||||
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
||||
|
@ -27,7 +27,7 @@ mod params;
|
||||
|
||||
use std::sync::{Weak, Arc};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
|
||||
use std::collections::{HashSet, BTreeMap};
|
||||
use std::collections::HashSet;
|
||||
use hash::keccak;
|
||||
use ethereum_types::{H256, H520, U128, U256, Address};
|
||||
use parking_lot::RwLock;
|
||||
@ -449,17 +449,6 @@ impl Engine<EthereumMachine> for Tendermint {
|
||||
|
||||
fn maximum_uncle_age(&self) -> usize { 0 }
|
||||
|
||||
/// Additional engine-specific information for the user/developer concerning `header`.
|
||||
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
||||
let message = ConsensusMessage::new_proposal(header).expect("Invalid header.");
|
||||
map![
|
||||
"signature".into() => message.signature.to_string(),
|
||||
"height".into() => message.vote_step.height.to_string(),
|
||||
"view".into() => message.vote_step.view.to_string(),
|
||||
"block_hash".into() => message.block_hash.as_ref().map(ToString::to_string).unwrap_or("".into())
|
||||
]
|
||||
}
|
||||
|
||||
fn populate_from_parent(&self, header: &mut Header, parent: &Header) {
|
||||
// Chain scoring: total weight is sqrt(U256::max_value())*height - view
|
||||
let new_difficulty = U256::from(U128::max_value())
|
||||
|
@ -27,7 +27,7 @@ use error::{BlockError, Error};
|
||||
use header::{Header, BlockNumber};
|
||||
use engines::{self, Engine};
|
||||
use ethjson;
|
||||
use rlp::{self, UntrustedRlp};
|
||||
use rlp::UntrustedRlp;
|
||||
use machine::EthereumMachine;
|
||||
|
||||
/// Number of blocks in an ethash snapshot.
|
||||
@ -38,6 +38,38 @@ const MAX_SNAPSHOT_BLOCKS: u64 = 30000;
|
||||
|
||||
const DEFAULT_EIP649_DELAY: u64 = 3_000_000;
|
||||
|
||||
/// Ethash specific seal
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Seal {
|
||||
/// Ethash seal mix_hash
|
||||
pub mix_hash: H256,
|
||||
/// Ethash seal nonce
|
||||
pub nonce: H64,
|
||||
}
|
||||
|
||||
impl Seal {
|
||||
/// Tries to parse rlp as ethash seal.
|
||||
pub fn parse_seal<T: AsRef<[u8]>>(seal: &[T]) -> Result<Self, Error> {
|
||||
if seal.len() != 2 {
|
||||
return Err(BlockError::InvalidSealArity(
|
||||
Mismatch {
|
||||
expected: 2,
|
||||
found: seal.len()
|
||||
}
|
||||
).into());
|
||||
}
|
||||
|
||||
let mix_hash = UntrustedRlp::new(seal[0].as_ref()).as_val::<H256>()?;
|
||||
let nonce = UntrustedRlp::new(seal[1].as_ref()).as_val::<H64>()?;
|
||||
let seal = Seal {
|
||||
mix_hash,
|
||||
nonce,
|
||||
};
|
||||
|
||||
Ok(seal)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ethash params.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct EthashParams {
|
||||
@ -173,13 +205,12 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
/// Additional engine-specific information for the user/developer concerning `header`.
|
||||
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
||||
if header.seal().len() == self.seal_fields(header) {
|
||||
map![
|
||||
"nonce".to_owned() => format!("0x{:x}", header.nonce()),
|
||||
"mixHash".to_owned() => format!("0x{:x}", header.mix_hash())
|
||||
]
|
||||
} else {
|
||||
BTreeMap::default()
|
||||
match Seal::parse_seal(header.seal()) {
|
||||
Ok(seal) => map![
|
||||
"nonce".to_owned() => format!("0x{:x}", seal.nonce),
|
||||
"mixHash".to_owned() => format!("0x{:x}", seal.mix_hash)
|
||||
],
|
||||
_ => BTreeMap::default()
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,14 +296,7 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
|
||||
// check the seal fields.
|
||||
let expected_seal_fields = self.seal_fields(header);
|
||||
if header.seal().len() != expected_seal_fields {
|
||||
return Err(From::from(BlockError::InvalidSealArity(
|
||||
Mismatch { expected: expected_seal_fields, found: header.seal().len() }
|
||||
)));
|
||||
}
|
||||
UntrustedRlp::new(&header.seal()[0]).as_val::<H256>()?;
|
||||
UntrustedRlp::new(&header.seal()[1]).as_val::<H64>()?;
|
||||
let seal = Seal::parse_seal(header.seal())?;
|
||||
|
||||
// TODO: consider removing these lines.
|
||||
let min_difficulty = self.ethash_params.minimum_difficulty;
|
||||
@ -282,9 +306,10 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
|
||||
let difficulty = Ethash::boundary_to_difficulty(&H256(quick_get_difficulty(
|
||||
&header.bare_hash().0,
|
||||
header.nonce().low_u64(),
|
||||
&header.mix_hash().0
|
||||
seal.nonce.low_u64(),
|
||||
&seal.mix_hash.0
|
||||
)));
|
||||
|
||||
if &difficulty < header.difficulty() {
|
||||
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
|
||||
}
|
||||
@ -297,18 +322,20 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
|
||||
}
|
||||
|
||||
fn verify_block_unordered(&self, header: &Header) -> Result<(), Error> {
|
||||
let expected_seal_fields = self.seal_fields(header);
|
||||
if header.seal().len() != expected_seal_fields {
|
||||
return Err(From::from(BlockError::InvalidSealArity(
|
||||
Mismatch { expected: expected_seal_fields, found: header.seal().len() }
|
||||
)));
|
||||
}
|
||||
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, header.nonce().low_u64());
|
||||
let seal = Seal::parse_seal(header.seal())?;
|
||||
|
||||
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, seal.nonce.low_u64());
|
||||
let mix = H256(result.mix_hash);
|
||||
let difficulty = Ethash::boundary_to_difficulty(&H256(result.value));
|
||||
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_hash_block_number(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value));
|
||||
if mix != header.mix_hash() {
|
||||
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
|
||||
trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}",
|
||||
num = header.number() as u64,
|
||||
seed = H256(slow_hash_block_number(header.number() as u64)),
|
||||
h = header.bare_hash(),
|
||||
non = seal.nonce.low_u64(),
|
||||
mix = H256(result.mix_hash),
|
||||
res = H256(result.value));
|
||||
if mix != seal.mix_hash {
|
||||
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: seal.mix_hash })));
|
||||
}
|
||||
if &difficulty < header.difficulty() {
|
||||
return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
|
||||
@ -439,18 +466,6 @@ impl Ethash {
|
||||
}
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// Get the nonce field of the header.
|
||||
pub fn nonce(&self) -> H64 {
|
||||
rlp::decode(&self.seal()[1])
|
||||
}
|
||||
|
||||
/// Get the mix hash field of the header.
|
||||
pub fn mix_hash(&self) -> H256 {
|
||||
rlp::decode(&self.seal()[0])
|
||||
}
|
||||
}
|
||||
|
||||
fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) {
|
||||
let eras = if block_number != 0 && block_number % era_rounds == 0 {
|
||||
block_number / era_rounds - 1
|
||||
@ -633,7 +648,7 @@ mod tests {
|
||||
#[test]
|
||||
fn can_do_seal_unordered_verification_fail() {
|
||||
let engine = test_spec().engine;
|
||||
let header: Header = Header::default();
|
||||
let header = Header::default();
|
||||
|
||||
let verify_result = engine.verify_block_unordered(&header);
|
||||
|
||||
@ -644,6 +659,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_do_seal_unordered_verification_fail2() {
|
||||
let engine = test_spec().engine;
|
||||
let mut header = Header::default();
|
||||
header.set_seal(vec![vec![], vec![]]);
|
||||
|
||||
let verify_result = engine.verify_block_unordered(&header);
|
||||
// rlp error, shouldn't panic
|
||||
assert!(verify_result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn can_do_seal256_verification_fail() {
|
||||
let engine = test_spec().engine;
|
||||
|
@ -34,10 +34,21 @@ use transaction::{Action, SignedTransaction};
|
||||
use crossbeam;
|
||||
pub use executed::{Executed, ExecutionResult};
|
||||
|
||||
/// Roughly estimate what stack size each level of evm depth will use
|
||||
/// TODO [todr] We probably need some more sophisticated calculations here (limit on my machine 132)
|
||||
/// Maybe something like here: `https://github.com/ethereum/libethereum/blob/4db169b8504f2b87f7d5a481819cfb959fc65f6c/libethereum/ExtVM.cpp`
|
||||
const STACK_SIZE_PER_DEPTH: usize = 24*1024;
|
||||
#[cfg(debug_assertions)]
|
||||
/// Roughly estimate what stack size each level of evm depth will use. (Debug build)
|
||||
const STACK_SIZE_PER_DEPTH: usize = 128 * 1024;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
/// Roughly estimate what stack size each level of evm depth will use.
|
||||
const STACK_SIZE_PER_DEPTH: usize = 24 * 1024;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
/// Entry stack overhead prior to execution. (Debug build)
|
||||
const STACK_SIZE_ENTRY_OVERHEAD: usize = 100 * 1024;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
/// Entry stack overhead prior to execution.
|
||||
const STACK_SIZE_ENTRY_OVERHEAD: usize = 20 * 1024;
|
||||
|
||||
/// Returns new address created from address, nonce, and code hash
|
||||
pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address, nonce: &U256, code: &[u8]) -> (Address, Option<H256>) {
|
||||
@ -332,12 +343,12 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
tracer: &mut T,
|
||||
vm_tracer: &mut V
|
||||
) -> vm::Result<FinalizationResult> where T: Tracer, V: VMTracer {
|
||||
|
||||
let depth_threshold = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get() / STACK_SIZE_PER_DEPTH);
|
||||
let local_stack_size = ::io::LOCAL_STACK_SIZE.with(|sz| sz.get());
|
||||
let depth_threshold = local_stack_size.saturating_sub(STACK_SIZE_ENTRY_OVERHEAD) / STACK_SIZE_PER_DEPTH;
|
||||
let static_call = params.call_type == CallType::StaticCall;
|
||||
|
||||
// Ordinary execution - keep VM in same thread
|
||||
if (self.depth + 1) % depth_threshold != 0 {
|
||||
if self.depth != depth_threshold {
|
||||
let vm_factory = self.state.vm_factory();
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
|
||||
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
|
||||
@ -345,17 +356,15 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
return vm.exec(params, &mut ext).finalize(ext);
|
||||
}
|
||||
|
||||
// Start in new thread to reset stack
|
||||
// TODO [todr] No thread builder yet, so we need to reset once for a while
|
||||
// https://github.com/aturon/crossbeam/issues/16
|
||||
// Start in new thread with stack size needed up to max depth
|
||||
crossbeam::scope(|scope| {
|
||||
let vm_factory = self.state.vm_factory();
|
||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call);
|
||||
|
||||
scope.spawn(move || {
|
||||
scope.builder().stack_size(::std::cmp::max(schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size)).spawn(move || {
|
||||
let mut vm = vm_factory.create(¶ms, &schedule);
|
||||
vm.exec(params, &mut ext).finalize(ext)
|
||||
})
|
||||
}).expect("Sub-thread creation cannot fail; the host might run out of resources; qed")
|
||||
}).join()
|
||||
}
|
||||
|
||||
@ -419,8 +428,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> {
|
||||
self.state.discard_checkpoint();
|
||||
output.write(0, &builtin_out_buffer);
|
||||
|
||||
// trace only top level calls to builtins to avoid DDoS attacks
|
||||
if self.depth == 0 {
|
||||
// Trace only top level calls and calls with balance transfer to builtins. The reason why we don't
|
||||
// trace all internal calls to builtin contracts is that memcpy (IDENTITY) is a heavily used
|
||||
// function.
|
||||
let is_transferred = match params.value {
|
||||
ActionValue::Transfer(value) => value != U256::zero(),
|
||||
ActionValue::Apparent(_) => false,
|
||||
};
|
||||
if self.depth == 0 || is_transferred {
|
||||
let mut trace_output = tracer.prepare_trace_output();
|
||||
if let Some(out) = trace_output.as_mut() {
|
||||
*out = output.to_owned();
|
||||
@ -713,6 +728,12 @@ mod tests {
|
||||
machine
|
||||
}
|
||||
|
||||
fn make_byzantium_machine(max_depth: usize) -> EthereumMachine {
|
||||
let mut machine = ::ethereum::new_byzantium_test_machine();
|
||||
machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth));
|
||||
machine
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contract_address() {
|
||||
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||
@ -805,6 +826,76 @@ mod tests {
|
||||
assert_eq!(substate.contracts_created.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_call_to_precompiled_tracing() {
|
||||
// code:
|
||||
//
|
||||
// 60 00 - push 00 out size
|
||||
// 60 00 - push 00 out offset
|
||||
// 60 00 - push 00 in size
|
||||
// 60 00 - push 00 in offset
|
||||
// 60 01 - push 01 value
|
||||
// 60 03 - push 03 to
|
||||
// 61 ffff - push fff gas
|
||||
// f1 - CALL
|
||||
|
||||
let code = "60006000600060006001600361fffff1".from_hex().unwrap();
|
||||
let sender = Address::from_str("4444444444444444444444444444444444444444").unwrap();
|
||||
let address = Address::from_str("5555555555555555555555555555555555555555").unwrap();
|
||||
|
||||
let mut params = ActionParams::default();
|
||||
params.address = address.clone();
|
||||
params.code_address = address.clone();
|
||||
params.sender = sender.clone();
|
||||
params.origin = sender.clone();
|
||||
params.gas = U256::from(100_000);
|
||||
params.code = Some(Arc::new(code));
|
||||
params.value = ActionValue::Transfer(U256::from(100));
|
||||
params.call_type = CallType::Call;
|
||||
let mut state = get_temp_state();
|
||||
state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap();
|
||||
let info = EnvInfo::default();
|
||||
let machine = make_byzantium_machine(5);
|
||||
let mut substate = Substate::new();
|
||||
let mut tracer = ExecutiveTracer::default();
|
||||
let mut vm_tracer = ExecutiveVMTracer::toplevel();
|
||||
|
||||
let mut ex = Executive::new(&mut state, &info, &machine);
|
||||
let output = BytesRef::Fixed(&mut[0u8;0]);
|
||||
ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap();
|
||||
|
||||
assert_eq!(tracer.drain(), vec![FlatTrace {
|
||||
action: trace::Action::Call(trace::Call {
|
||||
from: "4444444444444444444444444444444444444444".into(),
|
||||
to: "5555555555555555555555555555555555555555".into(),
|
||||
value: 100.into(),
|
||||
gas: 100_000.into(),
|
||||
input: vec![],
|
||||
call_type: CallType::Call
|
||||
}),
|
||||
result: trace::Res::Call(trace::CallResult {
|
||||
gas_used: 33021.into(),
|
||||
output: vec![]
|
||||
}),
|
||||
subtraces: 1,
|
||||
trace_address: Default::default()
|
||||
}, FlatTrace {
|
||||
action: trace::Action::Call(trace::Call {
|
||||
from: "5555555555555555555555555555555555555555".into(),
|
||||
to: "0000000000000000000000000000000000000003".into(),
|
||||
value: 1.into(),
|
||||
gas: 66560.into(),
|
||||
input: vec![],
|
||||
call_type: CallType::Call
|
||||
}), result: trace::Res::Call(trace::CallResult {
|
||||
gas_used: 600.into(),
|
||||
output: vec![]
|
||||
}),
|
||||
subtraces: 0,
|
||||
trace_address: vec![0].into_iter().collect(),
|
||||
}]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tracing is not suported in JIT
|
||||
fn test_call_to_create() {
|
||||
|
@ -54,6 +54,7 @@
|
||||
//! cargo build --release
|
||||
//! ```
|
||||
|
||||
extern crate bloomchain;
|
||||
extern crate bn;
|
||||
extern crate byteorder;
|
||||
extern crate crossbeam;
|
||||
@ -70,6 +71,7 @@ extern crate ethcore_transaction as transaction;
|
||||
extern crate ethereum_types;
|
||||
extern crate ethjson;
|
||||
extern crate ethkey;
|
||||
extern crate futures_cpupool;
|
||||
extern crate hardware_wallet;
|
||||
extern crate hashdb;
|
||||
extern crate itertools;
|
||||
@ -157,6 +159,7 @@ pub mod verification;
|
||||
pub mod views;
|
||||
|
||||
mod cache_manager;
|
||||
mod blooms;
|
||||
mod pod_account;
|
||||
mod account_db;
|
||||
mod builtin;
|
||||
|
@ -263,15 +263,9 @@ impl EthereumMachine {
|
||||
} else if block_number < ext.eip150_transition {
|
||||
Schedule::new_homestead()
|
||||
} else {
|
||||
// There's no max_code_size transition so we tie it to eip161abc
|
||||
let max_code_size = if block_number >= ext.eip161abc_transition {
|
||||
self.params.max_code_size as usize
|
||||
} else {
|
||||
usize::max_value()
|
||||
};
|
||||
|
||||
let max_code_size = self.params.max_code_size(block_number);
|
||||
let mut schedule = Schedule::new_post_eip150(
|
||||
max_code_size,
|
||||
max_code_size as _,
|
||||
block_number >= ext.eip160_transition,
|
||||
block_number >= ext.eip161abc_transition,
|
||||
block_number >= ext.eip161d_transition
|
||||
|
@ -35,6 +35,7 @@ use ethcore_miner::transaction_queue::{
|
||||
AccountDetails,
|
||||
TransactionOrigin,
|
||||
};
|
||||
use futures_cpupool::CpuPool;
|
||||
use ethcore_miner::work_notify::{WorkPoster, NotifyWork};
|
||||
use ethcore_miner::service_transaction_checker::ServiceTransactionChecker;
|
||||
use miner::{MinerService, MinerStatus};
|
||||
@ -216,11 +217,11 @@ pub enum GasPricer {
|
||||
|
||||
impl GasPricer {
|
||||
/// Create a new Calibrated `GasPricer`.
|
||||
pub fn new_calibrated(options: GasPriceCalibratorOptions, fetch: FetchClient) -> GasPricer {
|
||||
pub fn new_calibrated(options: GasPriceCalibratorOptions, fetch: FetchClient, p: CpuPool) -> GasPricer {
|
||||
GasPricer::Calibrated(GasPriceCalibrator {
|
||||
options: options,
|
||||
next_calibration: Instant::now(),
|
||||
price_info: PriceInfoClient::new(fetch),
|
||||
price_info: PriceInfoClient::new(fetch, p),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -248,9 +248,7 @@ impl Rebuilder for PowRebuilder {
|
||||
let abridged_rlp = pair.at(0)?.as_raw().to_owned();
|
||||
let abridged_block = AbridgedBlock::from_raw(abridged_rlp);
|
||||
let receipts: Vec<::receipt::Receipt> = pair.list_at(1)?;
|
||||
let receipts_root = ordered_trie_root(
|
||||
pair.at(1)?.iter().map(|r| r.as_raw())
|
||||
);
|
||||
let receipts_root = ordered_trie_root(pair.at(1)?.iter().map(|r| r.as_raw()));
|
||||
|
||||
let block = abridged_block.to_block(parent_hash, cur_number, receipts_root)?;
|
||||
let block_bytes = block.rlp_bytes(Seal::With);
|
||||
|
@ -118,6 +118,8 @@ pub struct CommonParams {
|
||||
pub node_permission_contract: Option<Address>,
|
||||
/// Maximum contract code size that can be deployed.
|
||||
pub max_code_size: u64,
|
||||
/// Number of first block where max code size limit is active.
|
||||
pub max_code_size_transition: BlockNumber,
|
||||
/// Transaction permission managing contract address.
|
||||
pub transaction_permission_contract: Option<Address>,
|
||||
}
|
||||
@ -125,11 +127,20 @@ pub struct CommonParams {
|
||||
impl CommonParams {
|
||||
/// Schedule for an EVM in the post-EIP-150-era of the Ethereum main net.
|
||||
pub fn schedule(&self, block_number: u64) -> ::vm::Schedule {
|
||||
let mut schedule = ::vm::Schedule::new_post_eip150(self.max_code_size as _, true, true, true);
|
||||
let mut schedule = ::vm::Schedule::new_post_eip150(self.max_code_size(block_number) as _, true, true, true);
|
||||
self.update_schedule(block_number, &mut schedule);
|
||||
schedule
|
||||
}
|
||||
|
||||
/// Returns max code size at given block.
|
||||
pub fn max_code_size(&self, block_number: u64) -> u64 {
|
||||
if block_number >= self.max_code_size_transition {
|
||||
self.max_code_size
|
||||
} else {
|
||||
u64::max_value()
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply common spec config parameters to the schedule.
|
||||
pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) {
|
||||
schedule.have_create2 = block_number >= self.eip86_transition;
|
||||
@ -226,6 +237,7 @@ impl From<ethjson::spec::Params> for CommonParams {
|
||||
registrar: p.registrar.map_or_else(Address::new, Into::into),
|
||||
node_permission_contract: p.node_permission_contract.map(Into::into),
|
||||
max_code_size: p.max_code_size.map_or(u64::max_value(), Into::into),
|
||||
max_code_size_transition: p.max_code_size_transition.map_or(0, Into::into),
|
||||
transaction_permission_contract: p.transaction_permission_contract.map(Into::into),
|
||||
wasm_activation_transition: p.wasm_activation_transition.map_or(
|
||||
BlockNumber::max_value(),
|
||||
@ -697,7 +709,7 @@ impl Spec {
|
||||
author: *genesis.author(),
|
||||
timestamp: genesis.timestamp(),
|
||||
difficulty: *genesis.difficulty(),
|
||||
gas_limit: *genesis.gas_limit(),
|
||||
gas_limit: U256::max_value(),
|
||||
last_hashes: Arc::new(Vec::new()),
|
||||
gas_used: 0.into(),
|
||||
};
|
||||
@ -706,7 +718,7 @@ impl Spec {
|
||||
let tx = Transaction {
|
||||
nonce: self.engine.account_start_nonce(0),
|
||||
action: Action::Call(a),
|
||||
gas: U256::from(50_000_000), // TODO: share with client.
|
||||
gas: U256::max_value(),
|
||||
gas_price: U256::default(),
|
||||
value: U256::default(),
|
||||
data: d,
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
use std::cell::{RefCell, RefMut};
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::{HashMap, BTreeMap, HashSet};
|
||||
use std::collections::{HashMap, BTreeMap, BTreeSet, HashSet};
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use hash::{KECCAK_NULL_RLP, KECCAK_EMPTY};
|
||||
@ -833,29 +833,65 @@ impl<B: Backend> State<B> {
|
||||
}))
|
||||
}
|
||||
|
||||
fn query_pod(&mut self, query: &PodState) -> trie::Result<()> {
|
||||
for (address, pod_account) in query.get() {
|
||||
if !self.ensure_cached(address, RequireCache::Code, true, |a| a.is_some())? {
|
||||
continue
|
||||
/// Populate a PodAccount map from this state, with another state as the account and storage query.
|
||||
pub fn to_pod_diff<X: Backend>(&mut self, query: &State<X>) -> trie::Result<PodState> {
|
||||
assert!(self.checkpoints.borrow().is_empty());
|
||||
|
||||
// Merge PodAccount::to_pod for cache of self and `query`.
|
||||
let all_addresses = self.cache.borrow().keys().cloned()
|
||||
.chain(query.cache.borrow().keys().cloned())
|
||||
.collect::<BTreeSet<_>>();
|
||||
|
||||
Ok(PodState::from(all_addresses.into_iter().fold(Ok(BTreeMap::new()), |m: trie::Result<_>, address| {
|
||||
let mut m = m?;
|
||||
|
||||
let account = self.ensure_cached(&address, RequireCache::Code, true, |acc| {
|
||||
acc.map(|acc| {
|
||||
// Merge all modified storage keys.
|
||||
let all_keys = {
|
||||
let self_keys = acc.storage_changes().keys().cloned()
|
||||
.collect::<BTreeSet<_>>();
|
||||
|
||||
if let Some(ref query_storage) = query.cache.borrow().get(&address)
|
||||
.and_then(|opt| {
|
||||
Some(opt.account.as_ref()?.storage_changes().keys().cloned()
|
||||
.collect::<BTreeSet<_>>())
|
||||
})
|
||||
{
|
||||
self_keys.union(&query_storage).cloned().collect::<Vec<_>>()
|
||||
} else {
|
||||
self_keys.into_iter().collect::<Vec<_>>()
|
||||
}
|
||||
};
|
||||
|
||||
// Storage must be fetched after ensure_cached to avoid borrow problem.
|
||||
(*acc.balance(), *acc.nonce(), all_keys, acc.code().map(|x| x.to_vec()))
|
||||
})
|
||||
})?;
|
||||
|
||||
if let Some((balance, nonce, storage_keys, code)) = account {
|
||||
let storage = storage_keys.into_iter().fold(Ok(BTreeMap::new()), |s: trie::Result<_>, key| {
|
||||
let mut s = s?;
|
||||
|
||||
s.insert(key, self.storage_at(&address, &key)?);
|
||||
Ok(s)
|
||||
})?;
|
||||
|
||||
m.insert(address, PodAccount {
|
||||
balance, nonce, storage, code
|
||||
});
|
||||
}
|
||||
|
||||
// needs to be split into two parts for the refcell code here
|
||||
// to work.
|
||||
for key in pod_account.storage.keys() {
|
||||
self.storage_at(address, key)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(m)
|
||||
})?))
|
||||
}
|
||||
|
||||
/// Returns a `StateDiff` describing the difference from `orig` to `self`.
|
||||
/// Consumes self.
|
||||
pub fn diff_from<X: Backend>(&self, orig: State<X>) -> trie::Result<StateDiff> {
|
||||
pub fn diff_from<X: Backend>(&self, mut orig: State<X>) -> trie::Result<StateDiff> {
|
||||
let pod_state_post = self.to_pod();
|
||||
let mut state_pre = orig;
|
||||
state_pre.query_pod(&pod_state_post)?;
|
||||
Ok(pod_state::diff_pod(&state_pre.to_pod(), &pod_state_post))
|
||||
let pod_state_pre = orig.to_pod_diff(self)?;
|
||||
Ok(pod_state::diff_pod(&pod_state_pre, &pod_state_post))
|
||||
}
|
||||
|
||||
// load required account data from the databases.
|
||||
@ -2180,4 +2216,72 @@ mod tests {
|
||||
assert!(state.exists(&d).unwrap());
|
||||
assert!(!state.exists(&e).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_trace_diff_suicided_accounts() {
|
||||
use pod_account;
|
||||
|
||||
let a = 10.into();
|
||||
let db = get_temp_state_db();
|
||||
let (root, db) = {
|
||||
let mut state = State::new(db, U256::from(0), Default::default());
|
||||
state.add_balance(&a, &100.into(), CleanupMode::ForceCreate).unwrap();
|
||||
state.commit().unwrap();
|
||||
state.drop()
|
||||
};
|
||||
|
||||
let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||
let original = state.clone();
|
||||
state.kill_account(&a);
|
||||
|
||||
let diff = state.diff_from(original).unwrap();
|
||||
let diff_map = diff.get();
|
||||
assert_eq!(diff_map.len(), 1);
|
||||
assert!(diff_map.get(&a).is_some());
|
||||
assert_eq!(diff_map.get(&a),
|
||||
pod_account::diff_pod(Some(&PodAccount {
|
||||
balance: U256::from(100),
|
||||
nonce: U256::zero(),
|
||||
code: Some(Default::default()),
|
||||
storage: Default::default()
|
||||
}), None).as_ref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_trace_diff_unmodified_storage() {
|
||||
use pod_account;
|
||||
|
||||
let a = 10.into();
|
||||
let db = get_temp_state_db();
|
||||
|
||||
let (root, db) = {
|
||||
let mut state = State::new(db, U256::from(0), Default::default());
|
||||
state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64))).unwrap();
|
||||
state.commit().unwrap();
|
||||
state.drop()
|
||||
};
|
||||
|
||||
let mut state = State::from_existing(db, root, U256::from(0u8), Default::default()).unwrap();
|
||||
let original = state.clone();
|
||||
state.set_storage(&a, H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64))).unwrap();
|
||||
|
||||
let diff = state.diff_from(original).unwrap();
|
||||
let diff_map = diff.get();
|
||||
assert_eq!(diff_map.len(), 1);
|
||||
assert!(diff_map.get(&a).is_some());
|
||||
assert_eq!(diff_map.get(&a),
|
||||
pod_account::diff_pod(Some(&PodAccount {
|
||||
balance: U256::zero(),
|
||||
nonce: U256::zero(),
|
||||
code: Some(Default::default()),
|
||||
storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(20u64)))]
|
||||
.into_iter().collect(),
|
||||
}), Some(&PodAccount {
|
||||
balance: U256::zero(),
|
||||
nonce: U256::zero(),
|
||||
code: Some(Default::default()),
|
||||
storage: vec![(H256::from(&U256::from(1u64)), H256::from(&U256::from(100u64)))]
|
||||
.into_iter().collect(),
|
||||
})).as_ref());
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Traces config.
|
||||
use bloomchain::Config as BloomConfig;
|
||||
|
||||
/// Traces config.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
@ -22,6 +23,8 @@ pub struct Config {
|
||||
/// Indicates if tracing should be enabled or not.
|
||||
/// If it's None, it will be automatically configured.
|
||||
pub enabled: bool,
|
||||
/// Traces blooms configuration.
|
||||
pub blooms: BloomConfig,
|
||||
/// Preferef cache-size.
|
||||
pub pref_cache_size: usize,
|
||||
/// Max cache-size.
|
||||
@ -32,6 +35,10 @@ impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
enabled: false,
|
||||
blooms: BloomConfig {
|
||||
levels: 3,
|
||||
elements_per_index: 16,
|
||||
},
|
||||
pref_cache_size: 15 * 1024 * 1024,
|
||||
max_cache_size: 20 * 1024 * 1024,
|
||||
}
|
||||
|
@ -15,15 +15,19 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Trace database.
|
||||
use std::ops::Deref;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::Arc;
|
||||
use bloomchain::{Number, Config as BloomConfig};
|
||||
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
|
||||
use heapsize::HeapSizeOf;
|
||||
use ethereum_types::{H256, H264, Bloom};
|
||||
use ethereum_types::{H256, H264};
|
||||
use kvdb::{KeyValueDB, DBTransaction};
|
||||
use parking_lot::RwLock;
|
||||
use header::BlockNumber;
|
||||
use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras};
|
||||
use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
|
||||
use blooms;
|
||||
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
|
||||
use cache_manager::CacheManager;
|
||||
|
||||
@ -33,8 +37,8 @@ const TRACE_DB_VER: &'static [u8] = b"1.0";
|
||||
enum TraceDBIndex {
|
||||
/// Block traces index.
|
||||
BlockTraces = 0,
|
||||
/// Blooms index.
|
||||
Blooms = 2,
|
||||
/// Trace bloom group index.
|
||||
BloomGroups = 1,
|
||||
}
|
||||
|
||||
impl Key<FlatBlockTraces> for H256 {
|
||||
@ -48,37 +52,80 @@ impl Key<FlatBlockTraces> for H256 {
|
||||
}
|
||||
}
|
||||
|
||||
impl Key<Bloom> for H256 {
|
||||
type Target = H264;
|
||||
/// Wrapper around `blooms::GroupPosition` so it could be
|
||||
/// uniquely identified in the database.
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
||||
struct TraceGroupPosition(blooms::GroupPosition);
|
||||
|
||||
fn key(&self) -> H264 {
|
||||
let mut result = H264::default();
|
||||
result[0] = TraceDBIndex::Blooms as u8;
|
||||
result[1..33].copy_from_slice(self);
|
||||
result
|
||||
impl From<GroupPosition> for TraceGroupPosition {
|
||||
fn from(position: GroupPosition) -> Self {
|
||||
TraceGroupPosition(From::from(position))
|
||||
}
|
||||
}
|
||||
|
||||
impl HeapSizeOf for TraceGroupPosition {
|
||||
fn heap_size_of_children(&self) -> usize {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper data structure created cause [u8; 6] does not implement Deref to &[u8].
|
||||
pub struct TraceGroupKey([u8; 6]);
|
||||
|
||||
impl Deref for TraceGroupKey {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Key<blooms::BloomGroup> for TraceGroupPosition {
|
||||
type Target = TraceGroupKey;
|
||||
|
||||
fn key(&self) -> Self::Target {
|
||||
let mut result = [0u8; 6];
|
||||
result[0] = TraceDBIndex::BloomGroups as u8;
|
||||
result[1] = self.0.level;
|
||||
result[2] = self.0.index as u8;
|
||||
result[3] = (self.0.index >> 8) as u8;
|
||||
result[4] = (self.0.index >> 16) as u8;
|
||||
result[5] = (self.0.index >> 24) as u8;
|
||||
TraceGroupKey(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Hash, Eq, PartialEq)]
|
||||
enum CacheId {
|
||||
Trace(H256),
|
||||
Bloom(H256),
|
||||
Bloom(TraceGroupPosition),
|
||||
}
|
||||
|
||||
/// Trace database.
|
||||
pub struct TraceDB<T> where T: DatabaseExtras {
|
||||
// cache
|
||||
traces: RwLock<HashMap<H256, FlatBlockTraces>>,
|
||||
blooms: RwLock<HashMap<H256, Bloom>>,
|
||||
blooms: RwLock<HashMap<TraceGroupPosition, blooms::BloomGroup>>,
|
||||
cache_manager: RwLock<CacheManager<CacheId>>,
|
||||
// db
|
||||
tracesdb: Arc<KeyValueDB>,
|
||||
// config,
|
||||
bloom_config: BloomConfig,
|
||||
// tracing enabled
|
||||
enabled: bool,
|
||||
// extras
|
||||
extras: Arc<T>,
|
||||
}
|
||||
|
||||
impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
|
||||
fn blooms_at(&self, position: &GroupPosition) -> Option<BloomGroup> {
|
||||
let position = TraceGroupPosition::from(position.clone());
|
||||
let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, &position).map(Into::into);
|
||||
self.note_used(CacheId::Bloom(position));
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TraceDB<T> where T: DatabaseExtras {
|
||||
/// Creates new instance of `TraceDB`.
|
||||
pub fn new(config: Config, tracesdb: Arc<KeyValueDB>, extras: Arc<T>) -> Self {
|
||||
@ -90,12 +137,13 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
|
||||
tracesdb.write(batch).expect("failed to update version");
|
||||
|
||||
TraceDB {
|
||||
traces: RwLock::new(HashMap::new()),
|
||||
blooms: RwLock::new(HashMap::new()),
|
||||
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
|
||||
tracesdb,
|
||||
tracesdb: tracesdb,
|
||||
bloom_config: config.blooms,
|
||||
enabled: config.enabled,
|
||||
extras,
|
||||
traces: RwLock::default(),
|
||||
blooms: RwLock::default(),
|
||||
extras: extras,
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,12 +188,6 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
|
||||
result
|
||||
}
|
||||
|
||||
fn bloom(&self, block_hash: &H256) -> Option<Bloom> {
|
||||
let result = self.tracesdb.read_with_cache(db::COL_TRACE, &self.blooms, block_hash);
|
||||
self.note_used(CacheId::Bloom(block_hash.clone()));
|
||||
result
|
||||
}
|
||||
|
||||
/// Returns vector of transaction traces for given block.
|
||||
fn transactions_traces(&self, block_hash: &H256) -> Option<Vec<FlatTransactionTraces>> {
|
||||
self.traces(block_hash).map(Into::into)
|
||||
@ -222,16 +264,47 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
|
||||
return;
|
||||
}
|
||||
|
||||
// now let's rebuild the blooms
|
||||
if !request.enacted.is_empty() {
|
||||
let range_start = request.block_number as Number + 1 - request.enacted.len();
|
||||
let range_end = range_start + request.retracted;
|
||||
let replaced_range = range_start..range_end;
|
||||
let enacted_blooms = request.enacted
|
||||
.iter()
|
||||
// all traces are expected to be found here. That's why `expect` has been used
|
||||
// instead of `filter_map`. If some traces haven't been found, it meens that
|
||||
// traces database is corrupted or incomplete.
|
||||
.map(|block_hash| if block_hash == &request.block_hash {
|
||||
request.traces.bloom()
|
||||
} else {
|
||||
self.traces(block_hash).expect("Traces database is incomplete.").bloom()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let chain = BloomGroupChain::new(self.bloom_config, self);
|
||||
let trace_blooms = chain.replace(&replaced_range, enacted_blooms);
|
||||
let blooms_to_insert = trace_blooms.into_iter()
|
||||
.map(|p| (From::from(p.0), From::from(p.1)))
|
||||
.collect::<HashMap<TraceGroupPosition, blooms::BloomGroup>>();
|
||||
|
||||
let blooms_keys: Vec<_> = blooms_to_insert.keys().cloned().collect();
|
||||
let mut blooms = self.blooms.write();
|
||||
batch.extend_with_cache(db::COL_TRACE, &mut *blooms, blooms_to_insert, CacheUpdatePolicy::Remove);
|
||||
// note_used must be called after locking blooms to avoid cache/traces deadlock on garbage collection
|
||||
for key in blooms_keys {
|
||||
self.note_used(CacheId::Bloom(key));
|
||||
}
|
||||
}
|
||||
|
||||
// insert new block traces into the cache and the database
|
||||
let mut traces = self.traces.write();
|
||||
let mut blooms = self.blooms.write();
|
||||
// it's important to use overwrite here,
|
||||
// cause this value might be queried by hash later
|
||||
batch.write_with_cache(db::COL_TRACE, &mut *blooms, request.block_hash, request.traces.bloom(), CacheUpdatePolicy::Overwrite);
|
||||
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
|
||||
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
|
||||
self.note_used(CacheId::Trace(request.block_hash));
|
||||
self.note_used(CacheId::Bloom(request.block_hash));
|
||||
{
|
||||
let mut traces = self.traces.write();
|
||||
// it's important to use overwrite here,
|
||||
// cause this value might be queried by hash later
|
||||
batch.write_with_cache(db::COL_TRACE, &mut *traces, request.block_hash, request.traces, CacheUpdatePolicy::Overwrite);
|
||||
// note_used must be called after locking traces to avoid cache/traces deadlock on garbage collection
|
||||
self.note_used(CacheId::Trace(request.block_hash.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec<usize>) -> Option<LocalizedTrace> {
|
||||
@ -318,17 +391,15 @@ impl<T> TraceDatabase for TraceDB<T> where T: DatabaseExtras {
|
||||
}
|
||||
|
||||
fn filter(&self, filter: &Filter) -> Vec<LocalizedTrace> {
|
||||
let possibilities = filter.bloom_possibilities();
|
||||
// + 1, cause filters are inclusive
|
||||
(filter.range.start..filter.range.end + 1).into_iter()
|
||||
.map(|n| n as BlockNumber)
|
||||
.filter_map(|n| self.extras.block_hash(n).map(|hash| (n, hash)))
|
||||
.filter(|&(_,ref hash)| {
|
||||
let bloom = self.bloom(hash).expect("hash exists; qed");
|
||||
possibilities.iter().any(|p| bloom.contains_bloom(p))
|
||||
})
|
||||
.flat_map(|(number, hash)| {
|
||||
let traces = self.traces(&hash).expect("hash exists; qed");
|
||||
let chain = BloomGroupChain::new(self.bloom_config, self);
|
||||
let numbers = chain.filter(filter);
|
||||
numbers.into_iter()
|
||||
.flat_map(|n| {
|
||||
let number = n as BlockNumber;
|
||||
let hash = self.extras.block_hash(number)
|
||||
.expect("Expected to find block hash. Extras db is probably corrupted");
|
||||
let traces = self.traces(&hash)
|
||||
.expect("Expected to find a trace. Db is probably corrupted.");
|
||||
self.matching_block_traces(filter, traces, hash, number)
|
||||
})
|
||||
.collect()
|
||||
|
@ -17,6 +17,7 @@
|
||||
//! Trace filters type definitions
|
||||
|
||||
use std::ops::Range;
|
||||
use bloomchain::{Filter as BloomFilter, Number};
|
||||
use ethereum_types::{Address, Bloom, BloomInput};
|
||||
use trace::flat::FlatTrace;
|
||||
use super::trace::{Action, Res};
|
||||
@ -87,9 +88,19 @@ pub struct Filter {
|
||||
pub to_address: AddressesFilter,
|
||||
}
|
||||
|
||||
impl BloomFilter for Filter {
|
||||
fn bloom_possibilities(&self) -> Vec<Bloom> {
|
||||
self.bloom_possibilities()
|
||||
}
|
||||
|
||||
fn range(&self) -> Range<Number> {
|
||||
self.range.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Filter {
|
||||
/// Returns combinations of each address.
|
||||
pub fn bloom_possibilities(&self) -> Vec<Bloom> {
|
||||
fn bloom_possibilities(&self) -> Vec<Bloom> {
|
||||
self.to_address.with_blooms(self.from_address.blooms())
|
||||
}
|
||||
|
||||
@ -117,7 +128,7 @@ impl Filter {
|
||||
from_matches && to_matches
|
||||
},
|
||||
Action::Reward(ref reward) => {
|
||||
self.to_address.matches(&reward.author)
|
||||
self.from_address.matches_all() && self.to_address.matches(&reward.author)
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -341,12 +352,48 @@ mod tests {
|
||||
subtraces: 0
|
||||
};
|
||||
|
||||
assert!(f0.matches(&trace));
|
||||
assert!(f1.matches(&trace));
|
||||
assert!(!f0.matches(&trace));
|
||||
assert!(!f1.matches(&trace));
|
||||
assert!(f2.matches(&trace));
|
||||
assert!(f3.matches(&trace));
|
||||
assert!(f4.matches(&trace));
|
||||
assert!(f5.matches(&trace));
|
||||
assert!(!f5.matches(&trace));
|
||||
assert!(!f6.matches(&trace));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filter_match_block_reward_fix_8070() {
|
||||
let f0 = Filter {
|
||||
range: (0..0),
|
||||
from_address: vec![1.into()].into(),
|
||||
to_address: vec![].into(),
|
||||
};
|
||||
|
||||
let f1 = Filter {
|
||||
range: (0..0),
|
||||
from_address: vec![].into(),
|
||||
to_address: vec![].into(),
|
||||
};
|
||||
|
||||
let f2 = Filter {
|
||||
range: (0..0),
|
||||
from_address: vec![].into(),
|
||||
to_address: vec![2.into()].into(),
|
||||
};
|
||||
|
||||
let trace = FlatTrace {
|
||||
action: Action::Reward(Reward {
|
||||
author: 2.into(),
|
||||
value: 10.into(),
|
||||
reward_type: RewardType::Block,
|
||||
}),
|
||||
result: Res::None,
|
||||
trace_address: vec![0].into_iter().collect(),
|
||||
subtraces: 0,
|
||||
};
|
||||
|
||||
assert!(!f0.matches(&trace));
|
||||
assert!(f1.matches(&trace));
|
||||
assert!(f2.matches(&trace));
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ pub mod blocks {
|
||||
use super::{Kind, BlockLike};
|
||||
|
||||
use engines::EthEngine;
|
||||
use error::Error;
|
||||
use error::{Error, BlockError};
|
||||
use header::Header;
|
||||
use verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered};
|
||||
|
||||
@ -88,6 +88,10 @@ pub mod blocks {
|
||||
fn create(input: Self::Input, engine: &EthEngine) -> Result<Self::Unverified, Error> {
|
||||
match verify_block_basic(&input.header, &input.bytes, engine) {
|
||||
Ok(()) => Ok(input),
|
||||
Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => {
|
||||
debug!(target: "client", "Block received too early {}: {:?}", input.hash(), oob);
|
||||
Err(BlockError::TemporarilyInvalid(oob).into())
|
||||
},
|
||||
Err(e) => {
|
||||
warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e);
|
||||
Err(e)
|
||||
|
@ -37,7 +37,7 @@ use client::BlockChainClient;
|
||||
use engines::EthEngine;
|
||||
use error::{BlockError, Error};
|
||||
use header::{BlockNumber, Header};
|
||||
use transaction::SignedTransaction;
|
||||
use transaction::{SignedTransaction, UnverifiedTransaction};
|
||||
use views::BlockView;
|
||||
|
||||
/// Preprocessed block data gathered in `verify_block_unordered` call
|
||||
@ -68,11 +68,9 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &EthEngine) ->
|
||||
verify_header_params(&u, engine, false)?;
|
||||
engine.verify_block_basic(&u)?;
|
||||
}
|
||||
// Verify transactions.
|
||||
// TODO: either use transaction views or cache the decoded transactions.
|
||||
let v = BlockView::new(bytes);
|
||||
for t in v.transactions() {
|
||||
engine.verify_transaction_basic(&t, &header)?;
|
||||
|
||||
for t in UntrustedRlp::new(bytes).at(1)?.iter().map(|rlp| rlp.as_val::<UnverifiedTransaction>()) {
|
||||
engine.verify_transaction_basic(&t?, &header)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -348,6 +346,8 @@ mod tests {
|
||||
use time::get_time;
|
||||
use transaction::{SignedTransaction, Transaction, UnverifiedTransaction, Action};
|
||||
use types::log_entry::{LogEntry, LocalizedLogEntry};
|
||||
use rlp;
|
||||
use triehash::ordered_trie_root;
|
||||
|
||||
fn check_ok(result: Result<(), Error>) {
|
||||
result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e));
|
||||
@ -454,11 +454,11 @@ mod tests {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn blocks_with_blooms(&self, _blooms: &[Bloom], _from_block: BlockNumber, _to_block: BlockNumber) -> Vec<BlockNumber> {
|
||||
fn blocks_with_bloom(&self, _bloom: &Bloom, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec<BlockNumber> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
fn logs<F>(&self, _blocks: Vec<H256>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||
where F: Fn(&LogEntry) -> bool, Self: Sized {
|
||||
unimplemented!()
|
||||
}
|
||||
@ -501,6 +501,27 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_block_basic_with_invalid_transactions() {
|
||||
let spec = Spec::new_test();
|
||||
let engine = &*spec.engine;
|
||||
|
||||
let block = {
|
||||
let mut rlp = rlp::RlpStream::new_list(3);
|
||||
let mut header = Header::default();
|
||||
// that's an invalid transaction list rlp
|
||||
let invalid_transactions = vec![vec![0u8]];
|
||||
header.set_transactions_root(ordered_trie_root(&invalid_transactions));
|
||||
header.set_gas_limit(engine.params().min_gas_limit);
|
||||
rlp.append(&header);
|
||||
rlp.append_list::<Vec<u8>, _>(&invalid_transactions);
|
||||
rlp.append_raw(&rlp::EMPTY_LIST_RLP, 1);
|
||||
rlp.out()
|
||||
};
|
||||
|
||||
assert!(basic_test(&block, engine).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_block() {
|
||||
use rlp::RlpStream;
|
||||
|
@ -119,8 +119,8 @@ pub struct Schedule {
|
||||
|
||||
/// Wasm cost table
|
||||
pub struct WasmCosts {
|
||||
/// Arena allocator cost, per byte
|
||||
pub alloc: u32,
|
||||
/// Default opcode cost
|
||||
pub regular: u32,
|
||||
/// Div operations multiplier.
|
||||
pub div: u32,
|
||||
/// Div operations multiplier.
|
||||
@ -135,17 +135,20 @@ pub struct WasmCosts {
|
||||
pub initial_mem: u32,
|
||||
/// Grow memory cost, per page (64kb)
|
||||
pub grow_mem: u32,
|
||||
/// Memory copy cost, per byte
|
||||
pub memcpy: u32,
|
||||
/// Max stack height (native WebAssembly stack limiter)
|
||||
pub max_stack_height: u32,
|
||||
/// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div`
|
||||
pub opcodes_mul: u32,
|
||||
/// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div`
|
||||
pub opcodes_div: u32,
|
||||
|
||||
}
|
||||
|
||||
impl Default for WasmCosts {
|
||||
fn default() -> Self {
|
||||
WasmCosts {
|
||||
alloc: 2,
|
||||
regular: 1,
|
||||
div: 16,
|
||||
mul: 4,
|
||||
mem: 2,
|
||||
@ -153,6 +156,8 @@ impl Default for WasmCosts {
|
||||
static_address: 40,
|
||||
initial_mem: 4096,
|
||||
grow_mem: 8192,
|
||||
memcpy: 1,
|
||||
max_stack_height: 64*1024,
|
||||
opcodes_mul: 3,
|
||||
opcodes_div: 8,
|
||||
}
|
||||
|
@ -7,9 +7,9 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
byteorder = "1.0"
|
||||
ethereum-types = "0.2"
|
||||
log = "0.3"
|
||||
parity-wasm = "0.23"
|
||||
parity-wasm = "0.27"
|
||||
libc = "0.2"
|
||||
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
||||
pwasm-utils = "0.1"
|
||||
vm = { path = "../vm" }
|
||||
ethcore-logger = { path = "../../logger" }
|
||||
wasmi = { git = "https://github.com/pepyakin/wasmi" }
|
||||
wasmi = { version = "0.2" }
|
||||
|
@ -130,7 +130,7 @@ pub fn run_fixture(fixture: &Fixture) -> Vec<Fail> {
|
||||
Err(e) => { return Fail::load(e); },
|
||||
};
|
||||
|
||||
let mut ext = FakeExt::new();
|
||||
let mut ext = FakeExt::new().with_wasm();
|
||||
params.code = Some(Arc::new(
|
||||
if let Source::Constructor { ref arguments, ref sender, ref at, .. } = fixture.source {
|
||||
match construct(&mut ext, source, arguments.clone().into(), sender.clone().into(), at.clone().into()) {
|
||||
|
@ -19,7 +19,7 @@
|
||||
use std::cell::RefCell;
|
||||
use wasmi::{
|
||||
self, Signature, Error, FuncRef, FuncInstance, MemoryDescriptor,
|
||||
MemoryRef, MemoryInstance,
|
||||
MemoryRef, MemoryInstance, memory_units,
|
||||
};
|
||||
|
||||
/// Internal ids all functions runtime supports. This is just a glue for wasmi interpreter
|
||||
@ -219,7 +219,10 @@ impl ImportResolver {
|
||||
let mut mem_ref = self.memory.borrow_mut();
|
||||
if mem_ref.is_none() {
|
||||
*mem_ref = Some(
|
||||
MemoryInstance::alloc(0, Some(0)).expect("Memory allocation (0, 0) should not fail; qed")
|
||||
MemoryInstance::alloc(
|
||||
memory_units::Pages(0),
|
||||
Some(memory_units::Pages(0)),
|
||||
).expect("Memory allocation (0, 0) should not fail; qed")
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -229,7 +232,7 @@ impl ImportResolver {
|
||||
|
||||
/// Returns memory size module initially requested
|
||||
pub fn memory_size(&self) -> Result<u32, Error> {
|
||||
Ok(self.memory_ref().size())
|
||||
Ok(self.memory_ref().current_size().0 as u32)
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +284,10 @@ impl wasmi::ModuleImportResolver for ImportResolver {
|
||||
{
|
||||
Err(Error::Instantiation("Module requested too much memory".to_owned()))
|
||||
} else {
|
||||
let mem = MemoryInstance::alloc(descriptor.initial(), descriptor.maximum())?;
|
||||
let mem = MemoryInstance::alloc(
|
||||
memory_units::Pages(descriptor.initial() as usize),
|
||||
descriptor.maximum().map(|x| memory_units::Pages(x as usize)),
|
||||
)?;
|
||||
*self.memory.borrow_mut() = Some(mem.clone());
|
||||
Ok(mem)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ extern crate ethereum_types;
|
||||
extern crate libc;
|
||||
extern crate parity_wasm;
|
||||
extern crate vm;
|
||||
extern crate wasm_utils;
|
||||
extern crate pwasm_utils as wasm_utils;
|
||||
extern crate wasmi;
|
||||
|
||||
mod runtime;
|
||||
@ -34,7 +34,7 @@ mod panic_payload;
|
||||
mod parser;
|
||||
|
||||
use vm::{GasLeft, ReturnData, ActionParams};
|
||||
use wasmi::Error as InterpreterError;
|
||||
use wasmi::{Error as InterpreterError, Trap};
|
||||
|
||||
use runtime::{Runtime, RuntimeContext};
|
||||
|
||||
@ -42,17 +42,29 @@ use ethereum_types::U256;
|
||||
|
||||
/// Wrapped interpreter error
|
||||
#[derive(Debug)]
|
||||
pub struct Error(InterpreterError);
|
||||
pub enum Error {
|
||||
Interpreter(InterpreterError),
|
||||
Trap(Trap),
|
||||
}
|
||||
|
||||
impl From<InterpreterError> for Error {
|
||||
fn from(e: InterpreterError) -> Self {
|
||||
Error(e)
|
||||
Error::Interpreter(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Trap> for Error {
|
||||
fn from(e: Trap) -> Self {
|
||||
Error::Trap(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for vm::Error {
|
||||
fn from(e: Error) -> Self {
|
||||
vm::Error::Wasm(format!("Wasm runtime error: {:?}", e.0))
|
||||
match e {
|
||||
Error::Interpreter(e) => vm::Error::Wasm(format!("Wasm runtime error: {:?}", e)),
|
||||
Error::Trap(e) => vm::Error::Wasm(format!("Wasm contract trap: {:?}", e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,19 +77,25 @@ impl From<runtime::Error> for vm::Error {
|
||||
}
|
||||
}
|
||||
|
||||
enum ExecutionOutcome {
|
||||
Suicide,
|
||||
Return,
|
||||
NotSpecial,
|
||||
}
|
||||
|
||||
impl vm::Vm for WasmInterpreter {
|
||||
|
||||
fn exec(&mut self, params: ActionParams, ext: &mut vm::Ext) -> vm::Result<GasLeft> {
|
||||
let (module, data) = parser::payload(¶ms, ext.schedule().wasm())?;
|
||||
|
||||
let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error)?;
|
||||
let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?;
|
||||
|
||||
let instantiation_resolover = env::ImportResolver::with_limit(16);
|
||||
let instantiation_resolver = env::ImportResolver::with_limit(16);
|
||||
|
||||
let module_instance = wasmi::ModuleInstance::new(
|
||||
&loaded_module,
|
||||
&wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolover)
|
||||
).map_err(Error)?;
|
||||
&wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolver)
|
||||
).map_err(Error::Interpreter)?;
|
||||
|
||||
let adjusted_gas = params.gas * U256::from(ext.schedule().wasm().opcodes_div) /
|
||||
U256::from(ext.schedule().wasm().opcodes_mul);
|
||||
@ -87,13 +105,13 @@ impl vm::Vm for WasmInterpreter {
|
||||
return Err(vm::Error::Wasm("Wasm interpreter cannot run contracts with gas (wasm adjusted) >= 2^64".to_owned()));
|
||||
}
|
||||
|
||||
let initial_memory = instantiation_resolover.memory_size().map_err(Error)?;
|
||||
let initial_memory = instantiation_resolver.memory_size().map_err(Error::Interpreter)?;
|
||||
trace!(target: "wasm", "Contract requested {:?} pages of initial memory", initial_memory);
|
||||
|
||||
let (gas_left, result) = {
|
||||
let mut runtime = Runtime::with_params(
|
||||
ext,
|
||||
instantiation_resolover.memory_ref(),
|
||||
instantiation_resolver.memory_ref(),
|
||||
// cannot overflow, checked above
|
||||
adjusted_gas.low_u64(),
|
||||
data.to_vec(),
|
||||
@ -114,33 +132,29 @@ impl vm::Vm for WasmInterpreter {
|
||||
assert!(runtime.schedule().wasm().initial_mem < 1 << 16);
|
||||
runtime.charge(|s| initial_memory as u64 * s.wasm().initial_mem as u64)?;
|
||||
|
||||
let module_instance = module_instance.run_start(&mut runtime).map_err(Error)?;
|
||||
let module_instance = module_instance.run_start(&mut runtime).map_err(Error::Trap)?;
|
||||
|
||||
match module_instance.invoke_export("call", &[], &mut runtime) {
|
||||
Ok(_) => { },
|
||||
Err(InterpreterError::Host(boxed)) => {
|
||||
match boxed.downcast_ref::<runtime::Error>() {
|
||||
None => {
|
||||
return Err(vm::Error::Wasm("Invalid user error used in interpreter".to_owned()));
|
||||
}
|
||||
Some(runtime_err) => {
|
||||
match *runtime_err {
|
||||
runtime::Error::Suicide => {
|
||||
// Suicide uses trap to break execution
|
||||
}
|
||||
ref any_err => {
|
||||
trace!(target: "wasm", "Error executing contract: {:?}", boxed);
|
||||
return Err(vm::Error::from(Error::from(InterpreterError::Host(Box::new(any_err.clone())))));
|
||||
}
|
||||
}
|
||||
}
|
||||
let invoke_result = module_instance.invoke_export("call", &[], &mut runtime);
|
||||
|
||||
let mut execution_outcome = ExecutionOutcome::NotSpecial;
|
||||
if let Err(InterpreterError::Trap(ref trap)) = invoke_result {
|
||||
if let wasmi::TrapKind::Host(ref boxed) = *trap.kind() {
|
||||
let ref runtime_err = boxed.downcast_ref::<runtime::Error>()
|
||||
.expect("Host errors other than runtime::Error never produced; qed");
|
||||
|
||||
match **runtime_err {
|
||||
runtime::Error::Suicide => { execution_outcome = ExecutionOutcome::Suicide; },
|
||||
runtime::Error::Return => { execution_outcome = ExecutionOutcome::Return; },
|
||||
_ => {}
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
trace!(target: "wasm", "Error executing contract: {:?}", err);
|
||||
return Err(vm::Error::from(Error::from(err)))
|
||||
}
|
||||
}
|
||||
|
||||
if let (ExecutionOutcome::NotSpecial, Err(e)) = (execution_outcome, invoke_result) {
|
||||
trace!(target: "wasm", "Error executing contract: {:?}", e);
|
||||
return Err(vm::Error::from(Error::from(e)));
|
||||
}
|
||||
|
||||
(
|
||||
runtime.gas_left().expect("Cannot fail since it was not updated since last charge"),
|
||||
runtime.into_result(),
|
||||
|
@ -22,14 +22,18 @@ use parity_wasm::elements::{self, Deserialize};
|
||||
use parity_wasm::peek_size;
|
||||
|
||||
fn gas_rules(wasm_costs: &vm::WasmCosts) -> rules::Set {
|
||||
rules::Set::new({
|
||||
let mut vals = ::std::collections::HashMap::with_capacity(4);
|
||||
vals.insert(rules::InstructionType::Load, wasm_costs.mem as u32);
|
||||
vals.insert(rules::InstructionType::Store, wasm_costs.mem as u32);
|
||||
vals.insert(rules::InstructionType::Div, wasm_costs.div as u32);
|
||||
vals.insert(rules::InstructionType::Mul, wasm_costs.mul as u32);
|
||||
vals
|
||||
}).with_grow_cost(wasm_costs.grow_mem)
|
||||
rules::Set::new(
|
||||
wasm_costs.regular,
|
||||
{
|
||||
let mut vals = ::std::collections::HashMap::with_capacity(8);
|
||||
vals.insert(rules::InstructionType::Load, rules::Metering::Fixed(wasm_costs.mem as u32));
|
||||
vals.insert(rules::InstructionType::Store, rules::Metering::Fixed(wasm_costs.mem as u32));
|
||||
vals.insert(rules::InstructionType::Div, rules::Metering::Fixed(wasm_costs.div as u32));
|
||||
vals.insert(rules::InstructionType::Mul, rules::Metering::Fixed(wasm_costs.mul as u32));
|
||||
vals
|
||||
})
|
||||
.with_grow_cost(wasm_costs.grow_mem)
|
||||
.with_forbidden_floats()
|
||||
}
|
||||
|
||||
/// Splits payload to code and data according to params.params_type, also
|
||||
@ -71,7 +75,12 @@ pub fn payload<'a>(params: &'a vm::ActionParams, wasm_costs: &vm::WasmCosts)
|
||||
let contract_module = wasm_utils::inject_gas_counter(
|
||||
deserialized_module,
|
||||
&gas_rules(wasm_costs),
|
||||
);
|
||||
).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: bytecode invalid")))?;
|
||||
|
||||
let contract_module = wasm_utils::stack_height::inject_limiter(
|
||||
contract_module,
|
||||
wasm_costs.max_stack_height,
|
||||
).map_err(|_| vm::Error::Wasm(format!("Wasm contract error: stack limiter failure")))?;
|
||||
|
||||
let data = match params.params_type {
|
||||
vm::ParamsType::Embedded => {
|
||||
|
@ -1,6 +1,6 @@
|
||||
use ethereum_types::{U256, H256, Address};
|
||||
use vm::{self, CallType};
|
||||
use wasmi::{self, MemoryRef, RuntimeArgs, RuntimeValue, Error as InterpreterError};
|
||||
use wasmi::{self, MemoryRef, RuntimeArgs, RuntimeValue, Error as InterpreterError, Trap, TrapKind};
|
||||
use super::panic_payload;
|
||||
|
||||
pub struct RuntimeContext {
|
||||
@ -32,6 +32,8 @@ pub enum Error {
|
||||
MemoryAccessViolation,
|
||||
/// Native code resulted in suicide
|
||||
Suicide,
|
||||
/// Native code requested execution to finish
|
||||
Return,
|
||||
/// Suicide was requested but coudn't complete
|
||||
SuicideAbort,
|
||||
/// Invalid gas state inside interpreter
|
||||
@ -52,15 +54,40 @@ pub enum Error {
|
||||
Other,
|
||||
/// Syscall signature mismatch
|
||||
InvalidSyscall,
|
||||
/// Unreachable instruction encountered
|
||||
Unreachable,
|
||||
/// Invalid virtual call
|
||||
InvalidVirtualCall,
|
||||
/// Division by zero
|
||||
DivisionByZero,
|
||||
/// Invalid conversion to integer
|
||||
InvalidConversionToInt,
|
||||
/// Stack overflow
|
||||
StackOverflow,
|
||||
/// Panic with message
|
||||
Panic(String),
|
||||
}
|
||||
|
||||
impl wasmi::HostError for Error { }
|
||||
|
||||
impl From<Trap> for Error {
|
||||
fn from(trap: Trap) -> Self {
|
||||
match *trap.kind() {
|
||||
TrapKind::Unreachable => Error::Unreachable,
|
||||
TrapKind::MemoryAccessOutOfBounds => Error::MemoryAccessViolation,
|
||||
TrapKind::TableAccessOutOfBounds | TrapKind::ElemUninitialized => Error::InvalidVirtualCall,
|
||||
TrapKind::DivisionByZero => Error::DivisionByZero,
|
||||
TrapKind::InvalidConversionToInt => Error::InvalidConversionToInt,
|
||||
TrapKind::UnexpectedSignature => Error::InvalidVirtualCall,
|
||||
TrapKind::StackOverflow => Error::StackOverflow,
|
||||
TrapKind::Host(_) => Error::Other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<InterpreterError> for Error {
|
||||
fn from(interpreter_err: InterpreterError) -> Self {
|
||||
match interpreter_err {
|
||||
fn from(err: InterpreterError) -> Self {
|
||||
match err {
|
||||
InterpreterError::Value(_) => Error::InvalidSyscall,
|
||||
InterpreterError::Memory(_) => Error::MemoryAccessViolation,
|
||||
_ => Error::Other,
|
||||
@ -78,6 +105,7 @@ impl ::std::fmt::Display for Error {
|
||||
Error::InvalidGasState => write!(f, "Invalid gas state"),
|
||||
Error::BalanceQueryError => write!(f, "Balance query resulted in an error"),
|
||||
Error::Suicide => write!(f, "Suicide result"),
|
||||
Error::Return => write!(f, "Return result"),
|
||||
Error::Unknown => write!(f, "Unknown runtime function invoked"),
|
||||
Error::AllocationFailed => write!(f, "Memory allocation failed (OOM)"),
|
||||
Error::BadUtf8 => write!(f, "String encoding is bad utf-8 sequence"),
|
||||
@ -85,6 +113,11 @@ impl ::std::fmt::Display for Error {
|
||||
Error::Log => write!(f, "Error occured while logging an event"),
|
||||
Error::InvalidSyscall => write!(f, "Invalid syscall signature encountered at runtime"),
|
||||
Error::Other => write!(f, "Other unspecified error"),
|
||||
Error::Unreachable => write!(f, "Unreachable instruction encountered"),
|
||||
Error::InvalidVirtualCall => write!(f, "Invalid virtual call"),
|
||||
Error::DivisionByZero => write!(f, "Division by zero"),
|
||||
Error::StackOverflow => write!(f, "Stack overflow"),
|
||||
Error::InvalidConversionToInt => write!(f, "Invalid conversion to integer"),
|
||||
Error::Panic(ref msg) => write!(f, "Panic: {}", msg),
|
||||
}
|
||||
}
|
||||
@ -143,12 +176,14 @@ impl<'a> Runtime<'a> {
|
||||
/// Intuition about the return value sense is to aswer the question 'are we allowed to continue?'
|
||||
fn charge_gas(&mut self, amount: u64) -> bool {
|
||||
let prev = self.gas_counter;
|
||||
if prev + amount > self.gas_limit {
|
||||
// exceeds gas
|
||||
false
|
||||
} else {
|
||||
self.gas_counter = prev + amount;
|
||||
true
|
||||
match prev.checked_add(amount) {
|
||||
// gas charge overflow protection
|
||||
None => false,
|
||||
Some(val) if val > self.gas_limit => false,
|
||||
Some(_) => {
|
||||
self.gas_counter = prev + amount;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,8 +238,8 @@ impl<'a> Runtime<'a> {
|
||||
/// Read from the storage to wasm memory
|
||||
pub fn storage_read(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let key = self.h256_at(args.nth(0)?)?;
|
||||
let val_ptr: u32 = args.nth(1)?;
|
||||
let key = self.h256_at(args.nth_checked(0)?)?;
|
||||
let val_ptr: u32 = args.nth_checked(1)?;
|
||||
|
||||
let val = self.ext.storage_at(&key).map_err(|_| Error::StorageReadError)?;
|
||||
|
||||
@ -218,8 +253,8 @@ impl<'a> Runtime<'a> {
|
||||
/// Write to storage from wasm memory
|
||||
pub fn storage_write(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let key = self.h256_at(args.nth(0)?)?;
|
||||
let val_ptr: u32 = args.nth(1)?;
|
||||
let key = self.h256_at(args.nth_checked(0)?)?;
|
||||
let val_ptr: u32 = args.nth_checked(1)?;
|
||||
|
||||
let val = self.h256_at(val_ptr)?;
|
||||
let former_val = self.ext.storage_at(&key).map_err(|_| Error::StorageUpdateError)?;
|
||||
@ -250,14 +285,14 @@ impl<'a> Runtime<'a> {
|
||||
/// * pointer in sandboxed memory where result is
|
||||
/// * the length of the result
|
||||
pub fn ret(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let ptr: u32 = args.nth(0)?;
|
||||
let len: u32 = args.nth(1)?;
|
||||
let ptr: u32 = args.nth_checked(0)?;
|
||||
let len: u32 = args.nth_checked(1)?;
|
||||
|
||||
trace!(target: "wasm", "Contract ret: {} bytes @ {}", len, ptr);
|
||||
|
||||
self.result = self.memory.get(ptr, len as usize)?;
|
||||
|
||||
Ok(())
|
||||
Err(Error::Return)
|
||||
}
|
||||
|
||||
/// Destroy the runtime, returning currently recorded result of the execution
|
||||
@ -273,7 +308,7 @@ impl<'a> Runtime<'a> {
|
||||
|
||||
/// Report gas cost with the params passed in wasm stack
|
||||
fn gas(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let amount: u32 = args.nth(0)?;
|
||||
let amount: u32 = args.nth_checked(0)?;
|
||||
if self.charge_gas(amount as u64) {
|
||||
Ok(())
|
||||
} else {
|
||||
@ -288,7 +323,11 @@ impl<'a> Runtime<'a> {
|
||||
|
||||
/// Write input bytes to the memory location using the passed pointer
|
||||
fn fetch_input(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let ptr: u32 = args.nth(0)?;
|
||||
let ptr: u32 = args.nth_checked(0)?;
|
||||
|
||||
let args_len = self.args.len() as u64;
|
||||
self.charge(|s| args_len * s.wasm().memcpy as u64)?;
|
||||
|
||||
self.memory.set(ptr, &self.args[..])?;
|
||||
Ok(())
|
||||
}
|
||||
@ -298,8 +337,8 @@ impl<'a> Runtime<'a> {
|
||||
/// Contract can invoke this when he encounters unrecoverable error.
|
||||
fn panic(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let payload_ptr: u32 = args.nth(0)?;
|
||||
let payload_len: u32 = args.nth(1)?;
|
||||
let payload_ptr: u32 = args.nth_checked(0)?;
|
||||
let payload_len: u32 = args.nth_checked(1)?;
|
||||
|
||||
let raw_payload = self.memory.get(payload_ptr, payload_len as usize)?;
|
||||
let payload = panic_payload::decode(&raw_payload);
|
||||
@ -333,26 +372,26 @@ impl<'a> Runtime<'a> {
|
||||
{
|
||||
trace!(target: "wasm", "runtime: CALL({:?})", call_type);
|
||||
|
||||
let gas: u64 = args.nth(0)?;
|
||||
let gas: u64 = args.nth_checked(0)?;
|
||||
trace!(target: "wasm", " gas: {:?}", gas);
|
||||
|
||||
let address = self.address_at(args.nth(1)?)?;
|
||||
let address = self.address_at(args.nth_checked(1)?)?;
|
||||
trace!(target: "wasm", " address: {:?}", address);
|
||||
|
||||
let vofs = if use_val { 1 } else { 0 };
|
||||
let val = if use_val { Some(self.u256_at(args.nth(2)?)?) } else { None };
|
||||
let val = if use_val { Some(self.u256_at(args.nth_checked(2)?)?) } else { None };
|
||||
trace!(target: "wasm", " val: {:?}", val);
|
||||
|
||||
let input_ptr: u32 = args.nth(2 + vofs)?;
|
||||
let input_ptr: u32 = args.nth_checked(2 + vofs)?;
|
||||
trace!(target: "wasm", " input_ptr: {:?}", input_ptr);
|
||||
|
||||
let input_len: u32 = args.nth(3 + vofs)?;
|
||||
let input_len: u32 = args.nth_checked(3 + vofs)?;
|
||||
trace!(target: "wasm", " input_len: {:?}", input_len);
|
||||
|
||||
let result_ptr: u32 = args.nth(4 + vofs)?;
|
||||
let result_ptr: u32 = args.nth_checked(4 + vofs)?;
|
||||
trace!(target: "wasm", " result_ptr: {:?}", result_ptr);
|
||||
|
||||
let result_alloc_len: u32 = args.nth(5 + vofs)?;
|
||||
let result_alloc_len: u32 = args.nth_checked(5 + vofs)?;
|
||||
trace!(target: "wasm", " result_len: {:?}", result_alloc_len);
|
||||
|
||||
if let Some(ref val) = val {
|
||||
@ -453,7 +492,7 @@ impl<'a> Runtime<'a> {
|
||||
/// Returns value (in Wei) passed to contract
|
||||
pub fn value(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let val = self.context.value;
|
||||
self.return_u256_ptr(args.nth(0)?, val)
|
||||
self.return_u256_ptr(args.nth_checked(0)?, val)
|
||||
}
|
||||
|
||||
/// Creates a new contract
|
||||
@ -470,13 +509,13 @@ impl<'a> Runtime<'a> {
|
||||
// fn create(endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32;
|
||||
//
|
||||
trace!(target: "wasm", "runtime: CREATE");
|
||||
let endowment = self.u256_at(args.nth(0)?)?;
|
||||
let endowment = self.u256_at(args.nth_checked(0)?)?;
|
||||
trace!(target: "wasm", " val: {:?}", endowment);
|
||||
let code_ptr: u32 = args.nth(1)?;
|
||||
let code_ptr: u32 = args.nth_checked(1)?;
|
||||
trace!(target: "wasm", " code_ptr: {:?}", code_ptr);
|
||||
let code_len: u32 = args.nth(2)?;
|
||||
let code_len: u32 = args.nth_checked(2)?;
|
||||
trace!(target: "wasm", " code_len: {:?}", code_len);
|
||||
let result_ptr: u32 = args.nth(3)?;
|
||||
let result_ptr: u32 = args.nth_checked(3)?;
|
||||
trace!(target: "wasm", "result_ptr: {:?}", result_ptr);
|
||||
|
||||
let code = self.memory.get(code_ptr, code_len as usize)?;
|
||||
@ -518,13 +557,13 @@ impl<'a> Runtime<'a> {
|
||||
|
||||
fn debug(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let msg_ptr: u32 = args.nth(0)?;
|
||||
let msg_len: u32 = args.nth(1)?;
|
||||
trace!(target: "wasm", "Contract debug message: {}", {
|
||||
let msg_ptr: u32 = args.nth_checked(0)?;
|
||||
let msg_len: u32 = args.nth_checked(1)?;
|
||||
|
||||
let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?)
|
||||
.map_err(|_| Error::BadUtf8)?;
|
||||
|
||||
trace!(target: "wasm", "Contract debug message: {}", msg);
|
||||
String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?)
|
||||
.map_err(|_| Error::BadUtf8)?
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -532,7 +571,7 @@ impl<'a> Runtime<'a> {
|
||||
/// Pass suicide to state runtime
|
||||
pub fn suicide(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let refund_address = self.address_at(args.nth(0)?)?;
|
||||
let refund_address = self.address_at(args.nth_checked(0)?)?;
|
||||
|
||||
if self.ext.exists(&refund_address).map_err(|_| Error::SuicideAbort)? {
|
||||
trace!(target: "wasm", "Suicide: refund to existing address {}", refund_address);
|
||||
@ -551,8 +590,8 @@ impl<'a> Runtime<'a> {
|
||||
/// Signature: `fn blockhash(number: i64, dest: *mut u8)`
|
||||
pub fn blockhash(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
self.adjusted_charge(|schedule| schedule.blockhash_gas as u64)?;
|
||||
let hash = self.ext.blockhash(&U256::from(args.nth::<u64>(0)?));
|
||||
self.memory.set(args.nth(1)?, &*hash)?;
|
||||
let hash = self.ext.blockhash(&U256::from(args.nth_checked::<u64>(0)?));
|
||||
self.memory.set(args.nth_checked(1)?, &*hash)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -565,37 +604,37 @@ impl<'a> Runtime<'a> {
|
||||
/// Signature: `fn coinbase(dest: *mut u8)`
|
||||
pub fn coinbase(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let coinbase = self.ext.env_info().author;
|
||||
self.return_address_ptr(args.nth(0)?, coinbase)
|
||||
self.return_address_ptr(args.nth_checked(0)?, coinbase)
|
||||
}
|
||||
|
||||
/// Signature: `fn difficulty(dest: *mut u8)`
|
||||
pub fn difficulty(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let difficulty = self.ext.env_info().difficulty;
|
||||
self.return_u256_ptr(args.nth(0)?, difficulty)
|
||||
self.return_u256_ptr(args.nth_checked(0)?, difficulty)
|
||||
}
|
||||
|
||||
/// Signature: `fn gaslimit(dest: *mut u8)`
|
||||
pub fn gaslimit(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let gas_limit = self.ext.env_info().gas_limit;
|
||||
self.return_u256_ptr(args.nth(0)?, gas_limit)
|
||||
self.return_u256_ptr(args.nth_checked(0)?, gas_limit)
|
||||
}
|
||||
|
||||
/// Signature: `fn address(dest: *mut u8)`
|
||||
pub fn address(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let address = self.context.address;
|
||||
self.return_address_ptr(args.nth(0)?, address)
|
||||
self.return_address_ptr(args.nth_checked(0)?, address)
|
||||
}
|
||||
|
||||
/// Signature: `sender(dest: *mut u8)`
|
||||
pub fn sender(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let sender = self.context.sender;
|
||||
self.return_address_ptr(args.nth(0)?, sender)
|
||||
self.return_address_ptr(args.nth_checked(0)?, sender)
|
||||
}
|
||||
|
||||
/// Signature: `origin(dest: *mut u8)`
|
||||
pub fn origin(&mut self, args: RuntimeArgs) -> Result<()> {
|
||||
let origin = self.context.origin;
|
||||
self.return_address_ptr(args.nth(0)?, origin)
|
||||
self.return_address_ptr(args.nth_checked(0)?, origin)
|
||||
}
|
||||
|
||||
/// Signature: `timestamp() -> i64`
|
||||
@ -607,10 +646,10 @@ impl<'a> Runtime<'a> {
|
||||
/// Signature: `fn elog(topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32)`
|
||||
pub fn elog(&mut self, args: RuntimeArgs) -> Result<()>
|
||||
{
|
||||
let topic_ptr: u32 = args.nth(0)?;
|
||||
let topic_count: u32 = args.nth(1)?;
|
||||
let data_ptr: u32 = args.nth(2)?;
|
||||
let data_len: u32 = args.nth(3)?;
|
||||
let topic_ptr: u32 = args.nth_checked(0)?;
|
||||
let topic_count: u32 = args.nth_checked(1)?;
|
||||
let data_ptr: u32 = args.nth_checked(2)?;
|
||||
let data_len: u32 = args.nth_checked(3)?;
|
||||
|
||||
if topic_count > 4 {
|
||||
return Err(Error::Log.into());
|
||||
@ -643,7 +682,7 @@ impl<'a> Runtime<'a> {
|
||||
|
||||
mod ext_impl {
|
||||
|
||||
use wasmi::{Externals, RuntimeArgs, RuntimeValue, Error};
|
||||
use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap};
|
||||
use env::ids::*;
|
||||
|
||||
macro_rules! void {
|
||||
@ -663,7 +702,7 @@ mod ext_impl {
|
||||
&mut self,
|
||||
index: usize,
|
||||
args: RuntimeArgs,
|
||||
) -> Result<Option<RuntimeValue>, Error> {
|
||||
) -> Result<Option<RuntimeValue>, Trap> {
|
||||
match index {
|
||||
STORAGE_WRITE_FUNC => void!(self.storage_write(args)),
|
||||
STORAGE_READ_FUNC => void!(self.storage_read(args)),
|
||||
|
@ -207,7 +207,7 @@ fn dispersion() {
|
||||
result,
|
||||
vec![0u8, 0, 125, 11, 197, 7, 255, 8, 19, 0]
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(93_972));
|
||||
assert_eq!(gas_left, U256::from(94_013));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -235,7 +235,7 @@ fn suicide_not() {
|
||||
result,
|
||||
vec![0u8]
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(94_970));
|
||||
assert_eq!(gas_left, U256::from(94_984));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -267,7 +267,7 @@ fn suicide() {
|
||||
};
|
||||
|
||||
assert!(ext.suicides.contains(&refund));
|
||||
assert_eq!(gas_left, U256::from(94_933));
|
||||
assert_eq!(gas_left, U256::from(94_925));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -297,7 +297,7 @@ fn create() {
|
||||
assert!(ext.calls.contains(
|
||||
&FakeCall {
|
||||
call_type: FakeCallType::Create,
|
||||
gas: U256::from(60_917),
|
||||
gas: U256::from(60_914),
|
||||
sender_address: None,
|
||||
receive_address: None,
|
||||
value: Some(1_000_000_000.into()),
|
||||
@ -305,7 +305,7 @@ fn create() {
|
||||
code_address: None,
|
||||
}
|
||||
));
|
||||
assert_eq!(gas_left, U256::from(60_903));
|
||||
assert_eq!(gas_left, U256::from(60_900));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -465,7 +465,7 @@ fn realloc() {
|
||||
}
|
||||
};
|
||||
assert_eq!(result, vec![0u8; 2]);
|
||||
assert_eq!(gas_left, U256::from(94_352));
|
||||
assert_eq!(gas_left, U256::from(94_372));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -541,7 +541,7 @@ fn keccak() {
|
||||
};
|
||||
|
||||
assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"));
|
||||
assert_eq!(gas_left, U256::from(84_223));
|
||||
assert_eq!(gas_left, U256::from(84_240));
|
||||
}
|
||||
|
||||
// math_* tests check the ability of wasm contract to perform big integer operations
|
||||
@ -570,7 +570,7 @@ fn math_add() {
|
||||
U256::from_dec_str("1888888888888888888888888888887").unwrap(),
|
||||
(&result[..]).into()
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(93_818));
|
||||
assert_eq!(gas_left, U256::from(93_814));
|
||||
}
|
||||
|
||||
// multiplication
|
||||
@ -592,7 +592,7 @@ fn math_mul() {
|
||||
U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(),
|
||||
(&result[..]).into()
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(93_304));
|
||||
assert_eq!(gas_left, U256::from(93_300));
|
||||
}
|
||||
|
||||
// subtraction
|
||||
@ -614,7 +614,7 @@ fn math_sub() {
|
||||
U256::from_dec_str("111111111111111111111111111111").unwrap(),
|
||||
(&result[..]).into()
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(93_831));
|
||||
assert_eq!(gas_left, U256::from(93_826));
|
||||
}
|
||||
|
||||
// subtraction with overflow
|
||||
@ -656,7 +656,7 @@ fn math_div() {
|
||||
U256::from_dec_str("1125000").unwrap(),
|
||||
(&result[..]).into()
|
||||
);
|
||||
assert_eq!(gas_left, U256::from(90_607));
|
||||
assert_eq!(gas_left, U256::from(90_603));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -684,7 +684,7 @@ fn storage_metering() {
|
||||
};
|
||||
|
||||
// 0 -> not 0
|
||||
assert_eq!(gas_left, U256::from(74_410));
|
||||
assert_eq!(gas_left, U256::from(74_338));
|
||||
|
||||
// #2
|
||||
|
||||
@ -703,7 +703,7 @@ fn storage_metering() {
|
||||
};
|
||||
|
||||
// not 0 -> not 0
|
||||
assert_eq!(gas_left, U256::from(89_410));
|
||||
assert_eq!(gas_left, U256::from(89_338));
|
||||
}
|
||||
|
||||
// This test checks the ability of wasm contract to invoke
|
||||
@ -791,7 +791,7 @@ fn externs() {
|
||||
"Gas limit requested and returned does not match"
|
||||
);
|
||||
|
||||
assert_eq!(gas_left, U256::from(92_089));
|
||||
assert_eq!(gas_left, U256::from(92_110));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -817,7 +817,7 @@ fn embedded_keccak() {
|
||||
};
|
||||
|
||||
assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"));
|
||||
assert_eq!(gas_left, U256::from(84_223));
|
||||
assert_eq!(gas_left, U256::from(84_240));
|
||||
}
|
||||
|
||||
/// This test checks the correctness of log extern
|
||||
@ -852,5 +852,5 @@ fn events() {
|
||||
assert_eq!(&log_entry.data, b"gnihtemos");
|
||||
|
||||
assert_eq!(&result, b"gnihtemos");
|
||||
assert_eq!(gas_left, U256::from(81_235));
|
||||
assert_eq!(gas_left, U256::from(81_292));
|
||||
}
|
||||
|
@ -9,4 +9,4 @@ tiny-keccak = "1.3"
|
||||
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
||||
ethkey = { path = "../ethkey" }
|
||||
ethereum-types = "0.2"
|
||||
subtle = "0.1"
|
||||
subtle = "0.5"
|
||||
|
@ -308,7 +308,7 @@ pub mod ecies {
|
||||
hmac.raw_result(&mut mac);
|
||||
|
||||
// constant time compare to avoid timing attack.
|
||||
if ::subtle::arrays_equal(&mac[..], msg_mac) != 1 {
|
||||
if ::subtle::slices_equal(&mac[..], msg_mac) != 1 {
|
||||
return Err(Error::InvalidMessage);
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,10 @@ ethereum-types = "0.2"
|
||||
dir = { path = "../util/dir" }
|
||||
smallvec = "0.4"
|
||||
parity-wordlist = "1.0"
|
||||
subtle = "0.5"
|
||||
tempdir = "0.3"
|
||||
|
||||
[dev-dependencies]
|
||||
matches = "0.1"
|
||||
|
||||
[lib]
|
||||
|
@ -266,7 +266,7 @@ fn execute<S, I>(command: I) -> Result<String, Error> where I: IntoIterator<Item
|
||||
let password = load_password(&args.arg_password)?;
|
||||
let account_ref = open_args_vault_account(&store, address, &args)?;
|
||||
let signature = store.sign(&account_ref, &password, &message)?;
|
||||
Ok(format!("0x{:?}", signature))
|
||||
Ok(format!("0x{}", signature))
|
||||
} else if args.cmd_public {
|
||||
let address = args.arg_address.parse().map_err(|_| ethstore::Error::InvalidAccount)?;
|
||||
let password = load_password(&args.arg_password)?;
|
||||
|
@ -21,6 +21,7 @@ use crypto::Keccak256;
|
||||
use random::Random;
|
||||
use smallvec::SmallVec;
|
||||
use account::{Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf};
|
||||
use subtle;
|
||||
|
||||
/// Encrypted data
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
@ -136,7 +137,7 @@ impl Crypto {
|
||||
|
||||
let mac = crypto::derive_mac(&derived_right_bits, &self.ciphertext).keccak256();
|
||||
|
||||
if mac != self.mac {
|
||||
if subtle::slices_equal(&mac, &self.mac) == 0 {
|
||||
return Err(Error::InvalidPassword);
|
||||
}
|
||||
|
||||
@ -158,7 +159,7 @@ impl Crypto {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ethkey::{Generator, Random};
|
||||
use super::Crypto;
|
||||
use super::{Crypto, Error};
|
||||
|
||||
#[test]
|
||||
fn crypto_with_secret_create() {
|
||||
@ -169,11 +170,10 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn crypto_with_secret_invalid_password() {
|
||||
let keypair = Random.generate().unwrap();
|
||||
let crypto = Crypto::with_secret(keypair.secret(), "this is sparta", 10240);
|
||||
let _ = crypto.secret("this is sparta!").unwrap();
|
||||
assert_matches!(crypto.secret("this is sparta!"), Err(Error::InvalidPassword))
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -28,6 +28,7 @@ extern crate rustc_hex;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate smallvec;
|
||||
extern crate subtle;
|
||||
extern crate time;
|
||||
extern crate tiny_keccak;
|
||||
extern crate tempdir;
|
||||
@ -42,6 +43,10 @@ extern crate log;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
pub mod accounts_dir;
|
||||
pub mod ethkey;
|
||||
|
||||
|
@ -8,6 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
log = "0.3"
|
||||
mime = "0.3"
|
||||
mime_guess = "2.0.0-alpha.2"
|
||||
@ -24,4 +25,5 @@ ethabi-derive = "5.0"
|
||||
ethabi-contract = "5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
hyper = "0.11"
|
||||
parking_lot = "0.5"
|
||||
|
@ -22,7 +22,8 @@ use std::sync::Arc;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use hash::keccak_buffer;
|
||||
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
|
||||
use fetch::{self, Fetch};
|
||||
use futures_cpupool::CpuPool;
|
||||
use futures::{Future, IntoFuture};
|
||||
use parity_reactor::Remote;
|
||||
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
|
||||
@ -36,7 +37,7 @@ pub trait HashFetch: Send + Sync + 'static {
|
||||
/// 2. `on_done` - callback function invoked when the content is ready (or there was error during fetch)
|
||||
///
|
||||
/// This function may fail immediately when fetch cannot be initialized or content cannot be resolved.
|
||||
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<Fn(Result<PathBuf, Error>) + Send>);
|
||||
}
|
||||
|
||||
/// Hash-fetching error.
|
||||
@ -56,7 +57,7 @@ pub enum Error {
|
||||
/// IO Error while validating hash.
|
||||
IO(io::Error),
|
||||
/// Error during fetch.
|
||||
Fetch(FetchError),
|
||||
Fetch(fetch::Error),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -76,8 +77,8 @@ impl PartialEq for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FetchError> for Error {
|
||||
fn from(error: FetchError) -> Self {
|
||||
impl From<fetch::Error> for Error {
|
||||
fn from(error: fetch::Error) -> Self {
|
||||
Error::Fetch(error)
|
||||
}
|
||||
}
|
||||
@ -88,14 +89,9 @@ impl From<io::Error> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>) -> Result<PathBuf, Error> {
|
||||
let response = result?;
|
||||
if !response.is_success() {
|
||||
return Err(Error::InvalidStatus);
|
||||
}
|
||||
|
||||
fn validate_hash(path: PathBuf, hash: H256, body: fetch::BodyReader) -> Result<PathBuf, Error> {
|
||||
// Read the response
|
||||
let mut reader = io::BufReader::new(response);
|
||||
let mut reader = io::BufReader::new(body);
|
||||
let mut writer = io::BufWriter::new(fs::File::create(&path)?);
|
||||
io::copy(&mut reader, &mut writer)?;
|
||||
writer.flush()?;
|
||||
@ -111,24 +107,19 @@ fn validate_hash(path: PathBuf, hash: H256, result: Result<Response, FetchError>
|
||||
}
|
||||
|
||||
/// Default Hash-fetching client using on-chain contract to resolve hashes to URLs.
|
||||
pub struct Client<F: Fetch + 'static = FetchClient> {
|
||||
pub struct Client<F: Fetch + 'static = fetch::Client> {
|
||||
pool: CpuPool,
|
||||
contract: URLHintContract,
|
||||
fetch: F,
|
||||
remote: Remote,
|
||||
random_path: Arc<Fn() -> PathBuf + Sync + Send>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Creates new instance of the `Client` given on-chain contract client and task runner.
|
||||
pub fn new(contract: Arc<ContractClient>, remote: Remote) -> Self {
|
||||
Client::with_fetch(contract, FetchClient::new().unwrap(), remote)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Fetch + 'static> Client<F> {
|
||||
/// Creates new instance of the `Client` given on-chain contract client, fetch service and task runner.
|
||||
pub fn with_fetch(contract: Arc<ContractClient>, fetch: F, remote: Remote) -> Self {
|
||||
pub fn with_fetch(contract: Arc<ContractClient>, pool: CpuPool, fetch: F, remote: Remote) -> Self {
|
||||
Client {
|
||||
pool,
|
||||
contract: URLHintContract::new(contract),
|
||||
fetch: fetch,
|
||||
remote: remote,
|
||||
@ -138,11 +129,12 @@ impl<F: Fetch + 'static> Client<F> {
|
||||
}
|
||||
|
||||
impl<F: Fetch + 'static> HashFetch for Client<F> {
|
||||
fn fetch(&self, hash: H256, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
|
||||
fn fetch(&self, hash: H256, abort: fetch::Abort, on_done: Box<Fn(Result<PathBuf, Error>) + Send>) {
|
||||
debug!(target: "fetch", "Fetching: {:?}", hash);
|
||||
|
||||
let random_path = self.random_path.clone();
|
||||
let remote_fetch = self.fetch.clone();
|
||||
let pool = self.pool.clone();
|
||||
let future = self.contract.resolve(hash)
|
||||
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
|
||||
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
|
||||
@ -160,19 +152,26 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
|
||||
.into_future()
|
||||
.and_then(move |url| {
|
||||
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
|
||||
let future = remote_fetch.fetch(&url).then(move |result| {
|
||||
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
|
||||
let path = random_path();
|
||||
let res = validate_hash(path.clone(), hash, result);
|
||||
if let Err(ref err) = res {
|
||||
trace!(target: "fetch", "Error: {:?}", err);
|
||||
// Remove temporary file in case of error
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
res
|
||||
});
|
||||
remote_fetch.process(future)
|
||||
remote_fetch.fetch(&url, abort).from_err()
|
||||
})
|
||||
.and_then(move |response| {
|
||||
if !response.is_success() {
|
||||
Err(Error::InvalidStatus)
|
||||
} else {
|
||||
Ok(response)
|
||||
}
|
||||
})
|
||||
.and_then(move |response| pool.spawn_fn(move || {
|
||||
debug!(target: "fetch", "Content fetched, validating hash ({:?})", hash);
|
||||
let path = random_path();
|
||||
let res = validate_hash(path.clone(), hash, fetch::BodyReader::new(response));
|
||||
if let Err(ref err) = res {
|
||||
trace!(target: "fetch", "Error: {:?}", err);
|
||||
// Remove temporary file in case of error
|
||||
let _ = fs::remove_file(&path);
|
||||
}
|
||||
res
|
||||
}))
|
||||
.then(move |res| { on_done(res); Ok(()) as Result<(), ()> });
|
||||
|
||||
self.remote.spawn(future);
|
||||
@ -193,14 +192,17 @@ fn random_temp_path() -> PathBuf {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate hyper;
|
||||
use rustc_hex::FromHex;
|
||||
use std::sync::{Arc, mpsc};
|
||||
use parking_lot::Mutex;
|
||||
use futures::future;
|
||||
use fetch::{self, Fetch};
|
||||
use futures_cpupool::CpuPool;
|
||||
use fetch::{self, Fetch, Url};
|
||||
use parity_reactor::Remote;
|
||||
use urlhint::tests::{FakeRegistrar, URLHINT};
|
||||
use super::{Error, Client, HashFetch, random_temp_path};
|
||||
use self::hyper::StatusCode;
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -211,17 +213,13 @@ mod tests {
|
||||
impl Fetch for FakeFetch {
|
||||
type Result = future::Ok<fetch::Response, fetch::Error>;
|
||||
|
||||
fn new() -> Result<Self, fetch::Error> where Self: Sized {
|
||||
Ok(FakeFetch { return_success: true })
|
||||
}
|
||||
|
||||
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
|
||||
fn fetch(&self, url: &str, abort: fetch::Abort) -> Self::Result {
|
||||
assert_eq!(url, "https://parity.io/assets/images/ethcore-black-horizontal.png");
|
||||
let u = Url::parse(url).unwrap();
|
||||
future::ok(if self.return_success {
|
||||
let cursor = ::std::io::Cursor::new(b"result");
|
||||
fetch::Response::from_reader(cursor)
|
||||
fetch::client::Response::new(u, hyper::Response::new().with_body(&b"result"[..]), abort)
|
||||
} else {
|
||||
fetch::Response::not_found()
|
||||
fetch::client::Response::new(u, hyper::Response::new().with_status(StatusCode::NotFound), abort)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -240,11 +238,11 @@ mod tests {
|
||||
// given
|
||||
let contract = Arc::new(FakeRegistrar::new());
|
||||
let fetch = FakeFetch { return_success: false };
|
||||
let client = Client::with_fetch(contract.clone(), fetch, Remote::new_sync());
|
||||
let client = Client::with_fetch(contract.clone(), CpuPool::new(1), fetch, Remote::new_sync());
|
||||
|
||||
// when
|
||||
let (tx, rx) = mpsc::channel();
|
||||
client.fetch(2.into(), Box::new(move |result| {
|
||||
client.fetch(2.into(), Default::default(), Box::new(move |result| {
|
||||
tx.send(result).unwrap();
|
||||
}));
|
||||
|
||||
@ -258,11 +256,11 @@ mod tests {
|
||||
// given
|
||||
let registrar = Arc::new(registrar());
|
||||
let fetch = FakeFetch { return_success: false };
|
||||
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
|
||||
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
|
||||
|
||||
// when
|
||||
let (tx, rx) = mpsc::channel();
|
||||
client.fetch(2.into(), Box::new(move |result| {
|
||||
client.fetch(2.into(), Default::default(), Box::new(move |result| {
|
||||
tx.send(result).unwrap();
|
||||
}));
|
||||
|
||||
@ -276,14 +274,14 @@ mod tests {
|
||||
// given
|
||||
let registrar = Arc::new(registrar());
|
||||
let fetch = FakeFetch { return_success: true };
|
||||
let mut client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
|
||||
let mut client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
|
||||
let path = random_temp_path();
|
||||
let path2 = path.clone();
|
||||
client.random_path = Arc::new(move || path2.clone());
|
||||
|
||||
// when
|
||||
let (tx, rx) = mpsc::channel();
|
||||
client.fetch(2.into(), Box::new(move |result| {
|
||||
client.fetch(2.into(), Default::default(), Box::new(move |result| {
|
||||
tx.send(result).unwrap();
|
||||
}));
|
||||
|
||||
@ -299,16 +297,17 @@ mod tests {
|
||||
// given
|
||||
let registrar = Arc::new(registrar());
|
||||
let fetch = FakeFetch { return_success: true };
|
||||
let client = Client::with_fetch(registrar.clone(), fetch, Remote::new_sync());
|
||||
let client = Client::with_fetch(registrar.clone(), CpuPool::new(1), fetch, Remote::new_sync());
|
||||
|
||||
// when
|
||||
let (tx, rx) = mpsc::channel();
|
||||
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(), Box::new(move |result| {
|
||||
tx.send(result).unwrap();
|
||||
}));
|
||||
client.fetch("0x06b0a4f426f6713234b2d4b2468640bc4e0bb72657a920ad24c5087153c593c8".into(),
|
||||
Default::default(),
|
||||
Box::new(move |result| { tx.send(result).unwrap(); }));
|
||||
|
||||
// then
|
||||
let result = rx.recv().unwrap();
|
||||
assert!(result.is_ok(), "Should return path, got: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ extern crate ethabi;
|
||||
extern crate ethcore_bytes as bytes;
|
||||
extern crate ethereum_types;
|
||||
extern crate futures;
|
||||
extern crate futures_cpupool;
|
||||
extern crate keccak_hash as hash;
|
||||
extern crate mime;
|
||||
extern crate mime_guess;
|
||||
@ -46,3 +47,4 @@ mod client;
|
||||
pub mod urlhint;
|
||||
|
||||
pub use client::{HashFetch, Client, Error};
|
||||
pub use fetch::Abort;
|
||||
|
@ -113,6 +113,9 @@ pub struct Params {
|
||||
/// See main EthashParams docs.
|
||||
#[serde(rename="maxCodeSize")]
|
||||
pub max_code_size: Option<Uint>,
|
||||
/// See main EthashParams docs.
|
||||
#[serde(rename="maxCodeSizeTransition")]
|
||||
pub max_code_size_transition: Option<Uint>,
|
||||
/// Transaction permission contract address.
|
||||
#[serde(rename="transactionPermissionContract")]
|
||||
pub transaction_permission_contract: Option<Address>,
|
||||
|
@ -462,7 +462,7 @@
|
||||
<key>OVERWRITE_PERMISSIONS</key>
|
||||
<false/>
|
||||
<key>VERSION</key>
|
||||
<string>1.10.0</string>
|
||||
<string>1.10.5</string>
|
||||
</dict>
|
||||
<key>UUID</key>
|
||||
<string>2DCD5B81-7BAF-4DA1-9251-6274B089FD36</string>
|
||||
|
@ -3,6 +3,4 @@
|
||||
test -f /usr/local/libexec/uninstall-parity.sh && /usr/local/libexec/uninstall-parity.sh || true
|
||||
killall -9 parity && sleep 5
|
||||
su $USER -c "open /Applications/Parity\ Ethereum.app"
|
||||
sleep 5
|
||||
su $USER -c "open http://127.0.0.1:8180/"
|
||||
exit 0
|
||||
|
@ -10,7 +10,7 @@
|
||||
!define DESCRIPTION "Fast, light, robust Ethereum implementation"
|
||||
!define VERSIONMAJOR 1
|
||||
!define VERSIONMINOR 10
|
||||
!define VERSIONBUILD 0
|
||||
!define VERSIONBUILD 5
|
||||
!define ARGS ""
|
||||
!define FIRST_START_ARGS "--mode=passive ui"
|
||||
|
||||
|
@ -94,7 +94,7 @@ fn new(n: NewAccount) -> Result<String, String> {
|
||||
let secret_store = Box::new(secret_store(dir, Some(n.iterations))?);
|
||||
let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default());
|
||||
let new_account = acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))?;
|
||||
Ok(format!("0x{:?}", new_account))
|
||||
Ok(format!("0x{:x}", new_account))
|
||||
}
|
||||
|
||||
fn list(list_cmd: ListAccounts) -> Result<String, String> {
|
||||
@ -103,7 +103,7 @@ fn list(list_cmd: ListAccounts) -> Result<String, String> {
|
||||
let acc_provider = AccountProvider::new(secret_store, AccountProviderSettings::default());
|
||||
let accounts = acc_provider.accounts().map_err(|e| format!("{}", e))?;
|
||||
let result = accounts.into_iter()
|
||||
.map(|a| format!("0x{:?}", a))
|
||||
.map(|a| format!("0x{:x}", a))
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n");
|
||||
|
||||
|
@ -384,6 +384,10 @@ usage! {
|
||||
"--no-serve-light",
|
||||
"Disable serving of light peers.",
|
||||
|
||||
ARG arg_warp_barrier: (Option<u64>) = None, or |c: &Config| c.network.as_ref()?.warp_barrier.clone(),
|
||||
"--warp-barrier=[NUM]",
|
||||
"When warp enabled never attempt regular sync before warping to block NUM.",
|
||||
|
||||
ARG arg_port: (u16) = 30303u16, or |c: &Config| c.network.as_ref()?.port.clone(),
|
||||
"--port=[PORT]",
|
||||
"Override the port on which the node should listen.",
|
||||
@ -478,7 +482,7 @@ usage! {
|
||||
"--ws-apis=[APIS]",
|
||||
"Specify the APIs available through the WebSockets interface. APIS is a comma-delimited list of API name. Possible name are web3, eth, pubsub, net, personal, parity, parity_set, traces, rpc, parity_accounts, pubsub, parity_pubsub, shh, shh_pubsub, signer, secretstore.",
|
||||
|
||||
ARG arg_ws_origins: (String) = "chrome-extension://*,moz-extension://*", or |c: &Config| c.websockets.as_ref()?.origins.as_ref().map(|vec| vec.join(",")),
|
||||
ARG arg_ws_origins: (String) = "parity://*,chrome-extension://*,moz-extension://*", or |c: &Config| c.websockets.as_ref()?.origins.as_ref().map(|vec| vec.join(",")),
|
||||
"--ws-origins=[URL]",
|
||||
"Specify Origin header values allowed to connect. Special options: \"all\", \"none\".",
|
||||
|
||||
@ -1034,6 +1038,7 @@ struct Ui {
|
||||
#[serde(deny_unknown_fields)]
|
||||
struct Network {
|
||||
warp: Option<bool>,
|
||||
warp_barrier: Option<u64>,
|
||||
port: Option<u16>,
|
||||
min_peers: Option<u16>,
|
||||
max_peers: Option<u16>,
|
||||
@ -1613,6 +1618,7 @@ mod tests {
|
||||
flag_geth: false,
|
||||
flag_testnet: false,
|
||||
flag_import_geth_keys: false,
|
||||
arg_warp_barrier: None,
|
||||
arg_datadir: None,
|
||||
arg_networkid: None,
|
||||
arg_peers: None,
|
||||
@ -1717,6 +1723,7 @@ mod tests {
|
||||
}),
|
||||
network: Some(Network {
|
||||
warp: Some(false),
|
||||
warp_barrier: None,
|
||||
port: None,
|
||||
min_peers: Some(10),
|
||||
max_peers: Some(20),
|
||||
|
@ -143,7 +143,7 @@ impl Configuration {
|
||||
if self.args.cmd_signer_new_token {
|
||||
Cmd::SignerToken(ws_conf, ui_conf, logger_config.clone())
|
||||
} else if self.args.cmd_signer_sign {
|
||||
let pwfile = self.args.arg_password.first().map(|pwfile| {
|
||||
let pwfile = self.accounts_config()?.password_files.first().map(|pwfile| {
|
||||
PathBuf::from(pwfile)
|
||||
});
|
||||
Cmd::SignerSign {
|
||||
@ -180,7 +180,7 @@ impl Configuration {
|
||||
iterations: self.args.arg_keys_iterations,
|
||||
path: dirs.keys,
|
||||
spec: spec,
|
||||
password_file: self.args.arg_password.first().map(|x| x.to_owned()),
|
||||
password_file: self.accounts_config()?.password_files.first().map(|x| x.to_owned()),
|
||||
};
|
||||
AccountCmd::New(new_acc)
|
||||
} else if self.args.cmd_account_list {
|
||||
@ -214,8 +214,8 @@ impl Configuration {
|
||||
iterations: self.args.arg_keys_iterations,
|
||||
path: dirs.keys,
|
||||
spec: spec,
|
||||
wallet_path: self.args.arg_wallet_import_path.unwrap().clone(),
|
||||
password_file: self.args.arg_password.first().map(|x| x.to_owned()),
|
||||
wallet_path: self.args.arg_wallet_import_path.clone().unwrap(),
|
||||
password_file: self.accounts_config()?.password_files.first().map(|x| x.to_owned()),
|
||||
};
|
||||
Cmd::ImportPresaleWallet(presale_cmd)
|
||||
} else if self.args.cmd_import {
|
||||
@ -356,6 +356,7 @@ impl Configuration {
|
||||
wal: wal,
|
||||
vm_type: vm_type,
|
||||
warp_sync: warp_sync,
|
||||
warp_barrier: self.args.arg_warp_barrier,
|
||||
public_node: public_node,
|
||||
geth_compatibility: geth_compatibility,
|
||||
net_settings: self.network_settings()?,
|
||||
@ -438,7 +439,7 @@ impl Configuration {
|
||||
LogConfig {
|
||||
mode: self.args.arg_logging.clone(),
|
||||
color: !self.args.flag_no_color && !cfg!(windows),
|
||||
file: self.args.arg_log_file.clone(),
|
||||
file: self.args.arg_log_file.as_ref().map(|log_file| replace_home(&self.directories().base, log_file)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -487,7 +488,7 @@ impl Configuration {
|
||||
iterations: self.args.arg_keys_iterations,
|
||||
refresh_time: self.args.arg_accounts_refresh,
|
||||
testnet: self.args.flag_testnet,
|
||||
password_files: self.args.arg_password.clone(),
|
||||
password_files: self.args.arg_password.iter().map(|s| replace_home(&self.directories().base, s)).collect(),
|
||||
unlocked_accounts: to_addresses(&self.args.arg_unlock)?,
|
||||
enable_hardware_wallets: !self.args.flag_no_hardware_wallets,
|
||||
enable_fast_unlock: self.args.flag_fast_unlock,
|
||||
@ -560,11 +561,13 @@ impl Configuration {
|
||||
}
|
||||
|
||||
fn ui_config(&self) -> UiConfiguration {
|
||||
let ui = self.ui_enabled();
|
||||
UiConfiguration {
|
||||
enabled: self.ui_enabled(),
|
||||
enabled: ui.enabled,
|
||||
interface: self.ui_interface(),
|
||||
port: self.ui_port(),
|
||||
hosts: self.ui_hosts(),
|
||||
info_page_only: ui.info_page_only,
|
||||
}
|
||||
}
|
||||
|
||||
@ -703,8 +706,10 @@ impl Configuration {
|
||||
|
||||
match self.args.arg_reserved_peers {
|
||||
Some(ref path) => {
|
||||
let path = replace_home(&self.directories().base, path);
|
||||
|
||||
let mut buffer = String::new();
|
||||
let mut node_file = File::open(path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?;
|
||||
let mut node_file = File::open(&path).map_err(|e| format!("Error opening reserved nodes file: {}", e))?;
|
||||
node_file.read_to_string(&mut buffer).map_err(|_| "Error reading reserved node file")?;
|
||||
let lines = buffer.lines().map(|s| s.trim().to_owned()).filter(|s| !s.is_empty() && !s.starts_with("#")).collect::<Vec<_>>();
|
||||
|
||||
@ -891,6 +896,12 @@ impl Configuration {
|
||||
let ui = self.ui_config();
|
||||
let http = self.http_config()?;
|
||||
|
||||
let support_token_api =
|
||||
// never enabled for public node
|
||||
!self.args.flag_public_node
|
||||
// enabled when not unlocking unless the ui is forced
|
||||
&& (self.args.arg_unlock.is_none() || ui.enabled);
|
||||
|
||||
let conf = WsConfiguration {
|
||||
enabled: self.ws_enabled(),
|
||||
interface: self.ws_interface(),
|
||||
@ -899,7 +910,7 @@ impl Configuration {
|
||||
hosts: self.ws_hosts(),
|
||||
origins: self.ws_origins(),
|
||||
signer_path: self.directories().signer.into(),
|
||||
support_token_api: !self.args.flag_public_node,
|
||||
support_token_api,
|
||||
ui_address: ui.address(),
|
||||
dapps_address: http.address(),
|
||||
};
|
||||
@ -939,6 +950,7 @@ impl Configuration {
|
||||
_ => return Err("Invalid value for `--releases-track`. See `--help` for more information.".into()),
|
||||
},
|
||||
path: default_hypervisor_path(),
|
||||
max_size: 128 * 1024 * 1024,
|
||||
})
|
||||
}
|
||||
|
||||
@ -1104,16 +1116,22 @@ impl Configuration {
|
||||
})
|
||||
}
|
||||
|
||||
fn ui_enabled(&self) -> bool {
|
||||
fn ui_enabled(&self) -> UiEnabled {
|
||||
if self.args.flag_force_ui {
|
||||
return true;
|
||||
return UiEnabled {
|
||||
enabled: true,
|
||||
info_page_only: false,
|
||||
};
|
||||
}
|
||||
|
||||
let ui_disabled = self.args.arg_unlock.is_some() ||
|
||||
self.args.flag_geth ||
|
||||
self.args.flag_no_ui;
|
||||
|
||||
!ui_disabled && cfg!(feature = "ui-enabled")
|
||||
return UiEnabled {
|
||||
enabled: (self.args.cmd_ui || !ui_disabled) && cfg!(feature = "ui-enabled"),
|
||||
info_page_only: !self.args.cmd_ui,
|
||||
}
|
||||
}
|
||||
|
||||
fn verifier_settings(&self) -> VerifierSettings {
|
||||
@ -1134,6 +1152,12 @@ impl Configuration {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
struct UiEnabled {
|
||||
pub enabled: bool,
|
||||
pub info_page_only: bool,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
@ -1335,7 +1359,7 @@ mod tests {
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8546,
|
||||
apis: ApiSet::UnsafeContext,
|
||||
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
|
||||
origins: Some(vec!["parity://*".into(),"chrome-extension://*".into(), "moz-extension://*".into()]),
|
||||
hosts: Some(vec![]),
|
||||
signer_path: expected.into(),
|
||||
ui_address: Some("127.0.0.1:8180".into()),
|
||||
@ -1346,6 +1370,7 @@ mod tests {
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
}, LogConfig {
|
||||
color: true,
|
||||
mode: None,
|
||||
@ -1381,6 +1406,7 @@ mod tests {
|
||||
network_id: None,
|
||||
public_node: false,
|
||||
warp_sync: true,
|
||||
warp_barrier: None,
|
||||
acc_conf: Default::default(),
|
||||
gas_pricer_conf: Default::default(),
|
||||
miner_extras: Default::default(),
|
||||
@ -1389,7 +1415,8 @@ mod tests {
|
||||
require_consensus: true,
|
||||
filter: UpdateFilter::Critical,
|
||||
track: ReleaseTrack::Unknown,
|
||||
path: default_hypervisor_path()
|
||||
path: default_hypervisor_path(),
|
||||
max_size: 128 * 1024 * 1024,
|
||||
},
|
||||
mode: Default::default(),
|
||||
tracing: Default::default(),
|
||||
@ -1459,9 +1486,30 @@ mod tests {
|
||||
let conf3 = parse(&["parity", "--auto-update=xxx"]);
|
||||
|
||||
// then
|
||||
assert_eq!(conf0.update_policy().unwrap(), UpdatePolicy{enable_downloading: true, require_consensus: true, filter: UpdateFilter::Critical, track: ReleaseTrack::Testing, path: default_hypervisor_path()});
|
||||
assert_eq!(conf1.update_policy().unwrap(), UpdatePolicy{enable_downloading: true, require_consensus: false, filter: UpdateFilter::All, track: ReleaseTrack::Unknown, path: default_hypervisor_path()});
|
||||
assert_eq!(conf2.update_policy().unwrap(), UpdatePolicy{enable_downloading: false, require_consensus: true, filter: UpdateFilter::All, track: ReleaseTrack::Beta, path: default_hypervisor_path()});
|
||||
assert_eq!(conf0.update_policy().unwrap(), UpdatePolicy {
|
||||
enable_downloading: true,
|
||||
require_consensus: true,
|
||||
filter: UpdateFilter::Critical,
|
||||
track: ReleaseTrack::Testing,
|
||||
path: default_hypervisor_path(),
|
||||
max_size: 128 * 1024 * 1024,
|
||||
});
|
||||
assert_eq!(conf1.update_policy().unwrap(), UpdatePolicy {
|
||||
enable_downloading: true,
|
||||
require_consensus: false,
|
||||
filter: UpdateFilter::All,
|
||||
track: ReleaseTrack::Unknown,
|
||||
path: default_hypervisor_path(),
|
||||
max_size: 128 * 1024 * 1024,
|
||||
});
|
||||
assert_eq!(conf2.update_policy().unwrap(), UpdatePolicy {
|
||||
enable_downloading: false,
|
||||
require_consensus: true,
|
||||
filter: UpdateFilter::All,
|
||||
track: ReleaseTrack::Beta,
|
||||
path: default_hypervisor_path(),
|
||||
max_size: 128 * 1024 * 1024,
|
||||
});
|
||||
assert!(conf3.update_policy().is_err());
|
||||
}
|
||||
|
||||
@ -1570,10 +1618,26 @@ mod tests {
|
||||
// when
|
||||
let conf0 = parse(&["parity", "--geth"]);
|
||||
let conf1 = parse(&["parity", "--geth", "--force-ui"]);
|
||||
let conf2 = parse(&["parity", "--geth", "ui"]);
|
||||
let conf3 = parse(&["parity"]);
|
||||
|
||||
// then
|
||||
assert_eq!(conf0.ui_enabled(), false);
|
||||
assert_eq!(conf1.ui_enabled(), true);
|
||||
assert_eq!(conf0.ui_enabled(), UiEnabled {
|
||||
enabled: false,
|
||||
info_page_only: true,
|
||||
});
|
||||
assert_eq!(conf1.ui_enabled(), UiEnabled {
|
||||
enabled: true,
|
||||
info_page_only: false,
|
||||
});
|
||||
assert_eq!(conf2.ui_enabled(), UiEnabled {
|
||||
enabled: true,
|
||||
info_page_only: false,
|
||||
});
|
||||
assert_eq!(conf3.ui_enabled(), UiEnabled {
|
||||
enabled: true,
|
||||
info_page_only: true,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1584,7 +1648,10 @@ mod tests {
|
||||
let conf0 = parse(&["parity", "--unlock", "0x0"]);
|
||||
|
||||
// then
|
||||
assert_eq!(conf0.ui_enabled(), false);
|
||||
assert_eq!(conf0.ui_enabled(), UiEnabled {
|
||||
enabled: false,
|
||||
info_page_only: true,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1596,6 +1663,8 @@ mod tests {
|
||||
let conf1 = parse(&["parity", "--ui-path=signer", "--ui-no-validation"]);
|
||||
let conf2 = parse(&["parity", "--ui-path=signer", "--ui-port", "3123"]);
|
||||
let conf3 = parse(&["parity", "--ui-path=signer", "--ui-interface", "test"]);
|
||||
let conf4 = parse(&["parity", "--ui-path=signer", "--force-ui"]);
|
||||
let conf5 = parse(&["parity", "--ui-path=signer", "ui"]);
|
||||
|
||||
// then
|
||||
assert_eq!(conf0.directories().signer, "signer".to_owned());
|
||||
@ -1604,33 +1673,60 @@ mod tests {
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
});
|
||||
assert!(conf0.ws_config().unwrap().hosts.is_some());
|
||||
|
||||
assert!(conf1.ws_config().unwrap().hosts.is_some());
|
||||
assert_eq!(conf1.ws_config().unwrap().origins, None);
|
||||
assert_eq!(conf1.directories().signer, "signer".to_owned());
|
||||
assert_eq!(conf1.ui_config(), UiConfiguration {
|
||||
enabled: true,
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
});
|
||||
assert_eq!(conf1.dapps_config().extra_embed_on, vec![("127.0.0.1".to_owned(), 3000)]);
|
||||
assert_eq!(conf1.ws_config().unwrap().origins, None);
|
||||
|
||||
assert!(conf2.ws_config().unwrap().hosts.is_some());
|
||||
assert_eq!(conf2.directories().signer, "signer".to_owned());
|
||||
assert_eq!(conf2.ui_config(), UiConfiguration {
|
||||
enabled: true,
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 3123,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
});
|
||||
assert!(conf2.ws_config().unwrap().hosts.is_some());
|
||||
|
||||
assert!(conf3.ws_config().unwrap().hosts.is_some());
|
||||
assert_eq!(conf3.directories().signer, "signer".to_owned());
|
||||
assert_eq!(conf3.ui_config(), UiConfiguration {
|
||||
enabled: true,
|
||||
interface: "test".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
});
|
||||
|
||||
assert!(conf4.ws_config().unwrap().hosts.is_some());
|
||||
assert_eq!(conf4.directories().signer, "signer".to_owned());
|
||||
assert_eq!(conf4.ui_config(), UiConfiguration {
|
||||
enabled: true,
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: false,
|
||||
});
|
||||
|
||||
assert!(conf5.ws_config().unwrap().hosts.is_some());
|
||||
assert_eq!(conf5.directories().signer, "signer".to_owned());
|
||||
assert_eq!(conf5.ui_config(), UiConfiguration {
|
||||
enabled: true,
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8180,
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: false,
|
||||
});
|
||||
assert!(conf3.ws_config().unwrap().hosts.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -23,6 +23,7 @@ use dir::helpers::replace_home;
|
||||
use ethcore::client::{Client, BlockChainClient, BlockId};
|
||||
use ethsync::LightSync;
|
||||
use futures::{Future, future, IntoFuture};
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash_fetch::fetch::Client as FetchClient;
|
||||
use hash_fetch::urlhint::ContractClient;
|
||||
use light::client::LightChainClient;
|
||||
@ -156,8 +157,10 @@ pub struct Dependencies {
|
||||
pub sync_status: Arc<SyncStatus>,
|
||||
pub contract_client: Arc<ContractClient>,
|
||||
pub fetch: FetchClient,
|
||||
pub pool: CpuPool,
|
||||
pub signer: Arc<SignerService>,
|
||||
pub ui_address: Option<(String, u16)>,
|
||||
pub info_page_only: bool,
|
||||
}
|
||||
|
||||
pub fn new(configuration: Configuration, deps: Dependencies) -> Result<Option<Middleware>, String> {
|
||||
@ -249,7 +252,7 @@ mod server {
|
||||
let web_proxy_tokens = Arc::new(move |token| signer.web_proxy_access_token_domain(&token));
|
||||
|
||||
Ok(parity_dapps::Middleware::dapps(
|
||||
deps.fetch.pool(),
|
||||
deps.pool,
|
||||
deps.node_health,
|
||||
deps.ui_address,
|
||||
extra_embed_on,
|
||||
@ -269,12 +272,13 @@ mod server {
|
||||
dapps_domain: &str,
|
||||
) -> Result<Middleware, String> {
|
||||
Ok(parity_dapps::Middleware::ui(
|
||||
deps.fetch.pool(),
|
||||
deps.pool,
|
||||
deps.node_health,
|
||||
dapps_domain,
|
||||
deps.contract_client,
|
||||
deps.sync_status,
|
||||
deps.fetch,
|
||||
deps.info_page_only,
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
#![warn(missing_docs)]
|
||||
|
||||
extern crate ansi_term;
|
||||
extern crate app_dirs;
|
||||
extern crate ctrlc;
|
||||
extern crate docopt;
|
||||
#[macro_use]
|
||||
|
@ -25,7 +25,7 @@ use migrations;
|
||||
/// Database is assumed to be at default version, when no version file is found.
|
||||
const DEFAULT_VERSION: u32 = 5;
|
||||
/// Current version of database models.
|
||||
const CURRENT_VERSION: u32 = 13;
|
||||
const CURRENT_VERSION: u32 = 12;
|
||||
/// First version of the consolidated database.
|
||||
const CONSOLIDATION_VERSION: u32 = 9;
|
||||
/// Defines how many items are migrated to the new version of database at once.
|
||||
@ -136,7 +136,6 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R
|
||||
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
|
||||
manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?;
|
||||
manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?;
|
||||
manager.add_migration(migrations::ToV13::default()).map_err(|_| Error::MigrationImpossible)?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
use std::{str, fs, fmt};
|
||||
use std::time::Duration;
|
||||
use ethereum_types::{U256, Address};
|
||||
use futures_cpupool::CpuPool;
|
||||
use parity_version::version_data;
|
||||
use journaldb::Algorithm;
|
||||
use ethcore::spec::{Spec, SpecParams};
|
||||
@ -240,7 +241,7 @@ impl Default for GasPricerConfig {
|
||||
}
|
||||
|
||||
impl GasPricerConfig {
|
||||
pub fn to_gas_pricer(&self, fetch: FetchClient) -> GasPricer {
|
||||
pub fn to_gas_pricer(&self, fetch: FetchClient, p: CpuPool) -> GasPricer {
|
||||
match *self {
|
||||
GasPricerConfig::Fixed(u) => GasPricer::Fixed(u),
|
||||
GasPricerConfig::Calibrated { usd_per_tx, recalibration_period, .. } => {
|
||||
@ -249,7 +250,8 @@ impl GasPricerConfig {
|
||||
usd_per_tx: usd_per_tx,
|
||||
recalibration_period: recalibration_period,
|
||||
},
|
||||
fetch
|
||||
fetch,
|
||||
p,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,7 @@ pub struct UiConfiguration {
|
||||
pub interface: String,
|
||||
pub port: u16,
|
||||
pub hosts: Option<Vec<String>>,
|
||||
pub info_page_only: bool,
|
||||
}
|
||||
|
||||
impl UiConfiguration {
|
||||
@ -110,10 +111,11 @@ impl From<UiConfiguration> for HttpConfiguration {
|
||||
impl Default for UiConfiguration {
|
||||
fn default() -> Self {
|
||||
UiConfiguration {
|
||||
enabled: true && cfg!(feature = "ui-enabled"),
|
||||
enabled: cfg!(feature = "ui-enabled"),
|
||||
port: 8180,
|
||||
interface: "127.0.0.1".into(),
|
||||
hosts: Some(vec![]),
|
||||
info_page_only: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -162,7 +164,7 @@ impl Default for WsConfiguration {
|
||||
interface: "127.0.0.1".into(),
|
||||
port: 8546,
|
||||
apis: ApiSet::UnsafeContext,
|
||||
origins: Some(vec!["chrome-extension://*".into(), "moz-extension://*".into()]),
|
||||
origins: Some(vec!["parity://*".into(),"chrome-extension://*".into(), "moz-extension://*".into()]),
|
||||
hosts: Some(Vec::new()),
|
||||
signer_path: replace_home(&data_dir, "$BASE/signer").into(),
|
||||
support_token_api: true,
|
||||
@ -227,7 +229,7 @@ pub fn new_ws<D: rpc_apis::Dependencies>(
|
||||
let allowed_hosts = into_domains(with_domain(conf.hosts, domain, &Some(url.clone().into()), &None));
|
||||
|
||||
let signer_path;
|
||||
let path = match conf.support_token_api && conf.ui_address.is_some() {
|
||||
let path = match conf.support_token_api {
|
||||
true => {
|
||||
signer_path = ::signer::codes_path(&conf.signer_path);
|
||||
Some(signer_path.as_path())
|
||||
|
@ -28,6 +28,7 @@ use ethcore::miner::Miner;
|
||||
use ethcore::snapshot::SnapshotService;
|
||||
use ethcore_logger::RotatingLogger;
|
||||
use ethsync::{ManageNetwork, SyncProvider, LightSync};
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash_fetch::fetch::Client as FetchClient;
|
||||
use jsonrpc_core::{self as core, MetaIoHandler};
|
||||
use light::client::LightChainClient;
|
||||
@ -225,6 +226,7 @@ pub struct FullDependencies {
|
||||
pub dapps_address: Option<Host>,
|
||||
pub ws_address: Option<Host>,
|
||||
pub fetch: FetchClient,
|
||||
pub pool: CpuPool,
|
||||
pub remote: parity_reactor::Remote,
|
||||
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
||||
pub gas_price_percentile: usize,
|
||||
@ -253,7 +255,7 @@ impl FullDependencies {
|
||||
}
|
||||
}
|
||||
|
||||
let nonces = Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.fetch.pool())));
|
||||
let nonces = Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone())));
|
||||
let dispatcher = FullDispatcher::new(
|
||||
self.client.clone(),
|
||||
self.miner.clone(),
|
||||
@ -355,6 +357,7 @@ impl FullDependencies {
|
||||
&self.net_service,
|
||||
self.dapps_service.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
).to_delegate())
|
||||
},
|
||||
Api::Traces => {
|
||||
@ -430,6 +433,7 @@ pub struct LightDependencies<T> {
|
||||
pub dapps_address: Option<Host>,
|
||||
pub ws_address: Option<Host>,
|
||||
pub fetch: FetchClient,
|
||||
pub pool: CpuPool,
|
||||
pub geth_compatibility: bool,
|
||||
pub remote: parity_reactor::Remote,
|
||||
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
||||
@ -451,7 +455,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||
self.on_demand.clone(),
|
||||
self.cache.clone(),
|
||||
self.transaction_queue.clone(),
|
||||
Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.fetch.pool()))),
|
||||
Arc::new(Mutex::new(dispatch::Reservations::with_pool(self.pool.clone()))),
|
||||
self.gas_price_percentile,
|
||||
);
|
||||
|
||||
@ -564,6 +568,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||
self.sync.clone(),
|
||||
self.dapps_service.clone(),
|
||||
self.fetch.clone(),
|
||||
self.pool.clone(),
|
||||
).to_delegate())
|
||||
},
|
||||
Api::Traces => {
|
||||
|
@ -20,7 +20,7 @@ use std::time::{Duration, Instant};
|
||||
use std::thread;
|
||||
use std::net::{TcpListener};
|
||||
|
||||
use ansi_term::Colour;
|
||||
use ansi_term::{Colour, Style};
|
||||
use ctrlc::CtrlC;
|
||||
use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
|
||||
use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient};
|
||||
@ -34,8 +34,8 @@ use ethcore::verification::queue::VerifierSettings;
|
||||
use ethcore_logger::{Config as LogConfig, RotatingLogger};
|
||||
use ethsync::{self, SyncConfig};
|
||||
use fdlimit::raise_fd_limit;
|
||||
use hash_fetch::fetch::{Fetch, Client as FetchClient};
|
||||
use hash_fetch;
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash_fetch::{self, fetch};
|
||||
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
|
||||
use journaldb::Algorithm;
|
||||
use light::Cache as LightDataCache;
|
||||
@ -99,6 +99,7 @@ pub struct RunCmd {
|
||||
pub net_conf: ethsync::NetworkConfiguration,
|
||||
pub network_id: Option<u64>,
|
||||
pub warp_sync: bool,
|
||||
pub warp_barrier: Option<u64>,
|
||||
pub public_node: bool,
|
||||
pub acc_conf: AccountsConfig,
|
||||
pub gas_pricer_conf: GasPricerConfig,
|
||||
@ -294,8 +295,10 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
// start the network.
|
||||
light_sync.start_network();
|
||||
|
||||
let cpu_pool = CpuPool::new(4);
|
||||
|
||||
// fetch service
|
||||
let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
||||
let fetch = fetch::Client::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
||||
let passwords = passwords_from_files(&cmd.acc_conf.password_files)?;
|
||||
|
||||
// prepare account provider
|
||||
@ -303,7 +306,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
let rpc_stats = Arc::new(informant::RpcStats::default());
|
||||
|
||||
// the dapps server
|
||||
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.ui_conf, &cmd.logger_config));
|
||||
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config));
|
||||
let (node_health, dapps_deps) = {
|
||||
let contract_client = ::dapps::LightRegistrar {
|
||||
client: client.clone(),
|
||||
@ -328,7 +331,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
let sync_status = Arc::new(LightSyncStatus(light_sync.clone()));
|
||||
let node_health = node_health::NodeHealth::new(
|
||||
sync_status.clone(),
|
||||
node_health::TimeChecker::new(&cmd.ntp_servers, fetch.pool()),
|
||||
node_health::TimeChecker::new(&cmd.ntp_servers, cpu_pool.clone()),
|
||||
event_loop.remote(),
|
||||
);
|
||||
|
||||
@ -337,8 +340,10 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
node_health,
|
||||
contract_client: Arc::new(contract_client),
|
||||
fetch: fetch.clone(),
|
||||
pool: cpu_pool.clone(),
|
||||
signer: signer_service.clone(),
|
||||
ui_address: cmd.ui_conf.redirection_address(),
|
||||
info_page_only: cmd.ui_conf.info_page_only,
|
||||
})
|
||||
};
|
||||
|
||||
@ -363,6 +368,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
|
||||
dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()),
|
||||
ws_address: cmd.ws_conf.address(),
|
||||
fetch: fetch,
|
||||
pool: cpu_pool.clone(),
|
||||
geth_compatibility: cmd.geth_compatibility,
|
||||
remote: event_loop.remote(),
|
||||
whisper_rpc: whisper_factory,
|
||||
@ -493,7 +499,7 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
}
|
||||
|
||||
sync_config.fork_block = spec.fork_block();
|
||||
let mut warp_sync = cmd.warp_sync;
|
||||
let mut warp_sync = spec.engine.supports_warp() && cmd.warp_sync;
|
||||
if warp_sync {
|
||||
// Logging is not initialized yet, so we print directly to stderr
|
||||
if fat_db {
|
||||
@ -507,7 +513,11 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
warp_sync = false;
|
||||
}
|
||||
}
|
||||
sync_config.warp_sync = spec.engine.supports_warp() && warp_sync;
|
||||
sync_config.warp_sync = match (warp_sync, cmd.warp_barrier) {
|
||||
(true, Some(block)) => ethsync::WarpSync::OnlyAndAfter(block),
|
||||
(true, _) => ethsync::WarpSync::Enabled,
|
||||
_ => ethsync::WarpSync::Disabled,
|
||||
};
|
||||
sync_config.download_old_blocks = cmd.download_old_blocks;
|
||||
sync_config.serve_light = cmd.serve_light;
|
||||
|
||||
@ -516,12 +526,14 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
// prepare account provider
|
||||
let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?);
|
||||
|
||||
let cpu_pool = CpuPool::new(4);
|
||||
|
||||
// fetch service
|
||||
let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
||||
let fetch = fetch::Client::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?;
|
||||
|
||||
// create miner
|
||||
let initial_min_gas_price = cmd.gas_pricer_conf.initial_min();
|
||||
let miner = Miner::new(cmd.miner_options, cmd.gas_pricer_conf.to_gas_pricer(fetch.clone()), &spec, Some(account_provider.clone()));
|
||||
let miner = Miner::new(cmd.miner_options, cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), cpu_pool.clone()), &spec, Some(account_provider.clone()));
|
||||
miner.set_author(cmd.miner_extras.author);
|
||||
miner.set_gas_floor_target(cmd.miner_extras.gas_floor_target);
|
||||
miner.set_gas_ceil_target(cmd.miner_extras.gas_ceil_target);
|
||||
@ -680,15 +692,12 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
let contract_client = Arc::new(::dapps::FullRegistrar::new(client.clone()));
|
||||
|
||||
// the updater service
|
||||
let mut updater_fetch = fetch.clone();
|
||||
// parity binaries should be smaller than 128MB
|
||||
updater_fetch.set_limit(Some(128 * 1024 * 1024));
|
||||
|
||||
let updater_fetch = fetch.clone();
|
||||
let updater = Updater::new(
|
||||
Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
|
||||
Arc::downgrade(&sync_provider),
|
||||
update_policy,
|
||||
hash_fetch::Client::with_fetch(contract_client.clone(), updater_fetch, event_loop.remote())
|
||||
hash_fetch::Client::with_fetch(contract_client.clone(), cpu_pool.clone(), updater_fetch, event_loop.remote())
|
||||
);
|
||||
service.add_notify(updater.clone());
|
||||
|
||||
@ -699,7 +708,7 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
false => Some(account_provider.clone())
|
||||
};
|
||||
|
||||
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.ui_conf, &cmd.logger_config));
|
||||
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.logger_config));
|
||||
|
||||
// the dapps server
|
||||
let (node_health, dapps_deps) = {
|
||||
@ -724,7 +733,7 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
let sync_status = Arc::new(SyncStatus(sync, client, net_conf));
|
||||
let node_health = node_health::NodeHealth::new(
|
||||
sync_status.clone(),
|
||||
node_health::TimeChecker::new(&cmd.ntp_servers, fetch.pool()),
|
||||
node_health::TimeChecker::new(&cmd.ntp_servers, cpu_pool.clone()),
|
||||
event_loop.remote(),
|
||||
);
|
||||
(node_health.clone(), dapps::Dependencies {
|
||||
@ -732,8 +741,10 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
node_health,
|
||||
contract_client,
|
||||
fetch: fetch.clone(),
|
||||
pool: cpu_pool.clone(),
|
||||
signer: signer_service.clone(),
|
||||
ui_address: cmd.ui_conf.redirection_address(),
|
||||
info_page_only: cmd.ui_conf.info_page_only,
|
||||
})
|
||||
};
|
||||
let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps.clone())?;
|
||||
@ -759,6 +770,7 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
dapps_address: cmd.dapps_conf.address(cmd.http_conf.address()),
|
||||
ws_address: cmd.ws_conf.address(),
|
||||
fetch: fetch.clone(),
|
||||
pool: cpu_pool.clone(),
|
||||
remote: event_loop.remote(),
|
||||
whisper_rpc: whisper_factory,
|
||||
gas_price_percentile: cmd.gas_price_percentile,
|
||||
@ -873,6 +885,11 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
|
||||
}
|
||||
|
||||
pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> Result<(bool, Option<String>), String> {
|
||||
if cmd.ui_conf.enabled && !cmd.ui_conf.info_page_only {
|
||||
warn!("{}", Style::new().bold().paint("Parity browser interface is deprecated. It's going to be removed in the next version, use standalone Parity UI instead."));
|
||||
warn!("{}", Style::new().bold().paint("Standalone Parity UI: https://github.com/Parity-JS/shell/releases"));
|
||||
}
|
||||
|
||||
if cmd.ui && cmd.dapps_conf.enabled {
|
||||
// Check if Parity is already running
|
||||
let addr = format!("{}:{}", cmd.ui_conf.interface, cmd.ui_conf.port);
|
||||
|
@ -32,10 +32,10 @@ pub struct NewToken {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
pub fn new_service(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration, logger_config: &LogConfig) -> rpc_apis::SignerService {
|
||||
let signer_path = ws_conf.signer_path.clone();
|
||||
pub fn new_service(ws_conf: &rpc::WsConfiguration, logger_config: &LogConfig) -> rpc_apis::SignerService {
|
||||
let logger_config_color = logger_config.color;
|
||||
let signer_enabled = ui_conf.enabled;
|
||||
let signer_path = ws_conf.signer_path.clone();
|
||||
let signer_enabled = ws_conf.support_token_api;
|
||||
|
||||
rpc_apis::SignerService::new(move || {
|
||||
generate_new_token(&signer_path, logger_config_color).map_err(|e| format!("{:?}", e))
|
||||
@ -56,6 +56,24 @@ pub fn execute(ws_conf: rpc::WsConfiguration, ui_conf: rpc::UiConfiguration, log
|
||||
pub fn generate_token_and_url(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration, logger_config: &LogConfig) -> Result<NewToken, String> {
|
||||
let code = generate_new_token(&ws_conf.signer_path, logger_config.color).map_err(|err| format!("Error generating token: {:?}", err))?;
|
||||
let auth_url = format!("http://{}:{}/#/auth?token={}", ui_conf.interface, ui_conf.port, code);
|
||||
let colored = |s: String| match logger_config.color {
|
||||
true => format!("{}", White.bold().paint(s)),
|
||||
false => s,
|
||||
};
|
||||
|
||||
if !ui_conf.enabled {
|
||||
return Ok(NewToken {
|
||||
token: code.clone(),
|
||||
url: auth_url.clone(),
|
||||
message: format!(
|
||||
r#"
|
||||
Generated token:
|
||||
{}
|
||||
"#,
|
||||
colored(code)
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
// And print in to the console
|
||||
Ok(NewToken {
|
||||
@ -67,10 +85,7 @@ Open: {}
|
||||
to authorize your browser.
|
||||
Or use the generated token:
|
||||
{}"#,
|
||||
match logger_config.color {
|
||||
true => format!("{}", White.bold().paint(auth_url)),
|
||||
false => auth_url
|
||||
},
|
||||
colored(auth_url),
|
||||
code
|
||||
)
|
||||
})
|
||||
|
@ -9,8 +9,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
fetch = { path = "../util/fetch" }
|
||||
futures = "0.1"
|
||||
futures-cpupool = "0.1"
|
||||
log = "0.3"
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
hyper = "0.11"
|
||||
parking_lot = "0.5"
|
||||
|
@ -19,6 +19,7 @@
|
||||
//! A simple client to get the current ETH price using an external API.
|
||||
|
||||
extern crate futures;
|
||||
extern crate futures_cpupool;
|
||||
extern crate serde_json;
|
||||
|
||||
#[macro_use]
|
||||
@ -29,10 +30,12 @@ pub extern crate fetch;
|
||||
use std::cmp;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::io::Read;
|
||||
use std::str;
|
||||
|
||||
use fetch::{Client as FetchClient, Fetch};
|
||||
use futures::Future;
|
||||
use futures::{Future, Stream};
|
||||
use futures::future::{self, Either};
|
||||
use futures_cpupool::CpuPool;
|
||||
use serde_json::Value;
|
||||
|
||||
/// Current ETH price information.
|
||||
@ -48,7 +51,7 @@ pub enum Error {
|
||||
/// The API returned an unexpected status code.
|
||||
StatusCode(&'static str),
|
||||
/// The API returned an unexpected status content.
|
||||
UnexpectedResponse(String),
|
||||
UnexpectedResponse(Option<String>),
|
||||
/// There was an error when trying to reach the API.
|
||||
Fetch(fetch::Error),
|
||||
/// IO error when reading API response.
|
||||
@ -65,6 +68,7 @@ impl From<fetch::Error> for Error {
|
||||
|
||||
/// A client to get the current ETH price using an external API.
|
||||
pub struct Client<F = FetchClient> {
|
||||
pool: CpuPool,
|
||||
api_endpoint: String,
|
||||
fetch: F,
|
||||
}
|
||||
@ -85,23 +89,25 @@ impl<F> cmp::PartialEq for Client<F> {
|
||||
|
||||
impl<F: Fetch> Client<F> {
|
||||
/// Creates a new instance of the `Client` given a `fetch::Client`.
|
||||
pub fn new(fetch: F) -> Client<F> {
|
||||
pub fn new(fetch: F, pool: CpuPool) -> Client<F> {
|
||||
let api_endpoint = "https://api.etherscan.io/api?module=stats&action=ethprice".to_owned();
|
||||
Client { api_endpoint, fetch }
|
||||
Client { pool, api_endpoint, fetch }
|
||||
}
|
||||
|
||||
/// Gets the current ETH price and calls `set_price` with the result.
|
||||
pub fn get<G: Fn(PriceInfo) + Sync + Send + 'static>(&self, set_price: G) {
|
||||
self.fetch.process_and_forget(self.fetch.fetch(&self.api_endpoint)
|
||||
.map_err(|err| Error::Fetch(err))
|
||||
.and_then(move |mut response| {
|
||||
let future = self.fetch.fetch(&self.api_endpoint, fetch::Abort::default())
|
||||
.from_err()
|
||||
.and_then(|response| {
|
||||
if !response.is_success() {
|
||||
return Err(Error::StatusCode(response.status().canonical_reason().unwrap_or("unknown")));
|
||||
let s = Error::StatusCode(response.status().canonical_reason().unwrap_or("unknown"));
|
||||
return Either::A(future::err(s));
|
||||
}
|
||||
let mut result = String::new();
|
||||
response.read_to_string(&mut result)?;
|
||||
|
||||
let value: Option<Value> = serde_json::from_str(&result).ok();
|
||||
Either::B(response.concat2().from_err())
|
||||
})
|
||||
.map(move |body| {
|
||||
let body_str = str::from_utf8(&body).ok();
|
||||
let value: Option<Value> = body_str.and_then(|s| serde_json::from_str(s).ok());
|
||||
|
||||
let ethusd = value
|
||||
.as_ref()
|
||||
@ -114,63 +120,65 @@ impl<F: Fetch> Client<F> {
|
||||
set_price(PriceInfo { ethusd });
|
||||
Ok(())
|
||||
},
|
||||
None => Err(Error::UnexpectedResponse(result)),
|
||||
None => Err(Error::UnexpectedResponse(body_str.map(From::from))),
|
||||
}
|
||||
})
|
||||
.map_err(|err| {
|
||||
warn!("Failed to auto-update latest ETH price: {:?}", err);
|
||||
err
|
||||
})
|
||||
);
|
||||
});
|
||||
self.pool.spawn(future).forget()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
extern crate hyper;
|
||||
extern crate parking_lot;
|
||||
|
||||
use self::parking_lot::Mutex;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use fetch;
|
||||
use fetch::Fetch;
|
||||
use futures;
|
||||
use futures::future::{Future, FutureResult};
|
||||
use fetch::{Fetch, Url};
|
||||
use futures_cpupool::CpuPool;
|
||||
use futures::future::{self, FutureResult};
|
||||
use Client;
|
||||
use self::hyper::StatusCode;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct FakeFetch(Option<String>, Arc<Mutex<u64>>);
|
||||
|
||||
impl FakeFetch {
|
||||
fn new() -> Result<Self, fetch::Error> {
|
||||
Ok(FakeFetch(None, Default::default()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Fetch for FakeFetch {
|
||||
type Result = FutureResult<fetch::Response, fetch::Error>;
|
||||
fn new() -> Result<Self, fetch::Error> where Self: Sized { Ok(FakeFetch(None, Default::default())) }
|
||||
fn fetch_with_abort(&self, url: &str, _abort: fetch::Abort) -> Self::Result {
|
||||
|
||||
fn fetch(&self, url: &str, abort: fetch::Abort) -> Self::Result {
|
||||
assert_eq!(url, "https://api.etherscan.io/api?module=stats&action=ethprice");
|
||||
let u = Url::parse(url).unwrap();
|
||||
let mut val = self.1.lock();
|
||||
*val = *val + 1;
|
||||
if let Some(ref response) = self.0 {
|
||||
let data = ::std::io::Cursor::new(response.clone());
|
||||
futures::future::ok(fetch::Response::from_reader(data))
|
||||
let r = hyper::Response::new().with_body(response.clone());
|
||||
future::ok(fetch::client::Response::new(u, r, abort))
|
||||
} else {
|
||||
futures::future::ok(fetch::Response::not_found())
|
||||
let r = hyper::Response::new().with_status(StatusCode::NotFound);
|
||||
future::ok(fetch::client::Response::new(u, r, abort))
|
||||
}
|
||||
}
|
||||
|
||||
// this guarantees that the calls to price_info::Client::get will block for execution
|
||||
fn process_and_forget<F, I, E>(&self, f: F) where
|
||||
F: Future<Item=I, Error=E> + Send + 'static,
|
||||
I: Send + 'static,
|
||||
E: Send + 'static,
|
||||
{
|
||||
let _ = f.wait();
|
||||
}
|
||||
}
|
||||
|
||||
fn price_info_ok(response: &str) -> Client<FakeFetch> {
|
||||
Client::new(FakeFetch(Some(response.to_owned()), Default::default()))
|
||||
Client::new(FakeFetch(Some(response.to_owned()), Default::default()), CpuPool::new(1))
|
||||
}
|
||||
|
||||
fn price_info_not_found() -> Client<FakeFetch> {
|
||||
Client::new(FakeFetch::new().unwrap())
|
||||
Client::new(FakeFetch::new().unwrap(), CpuPool::new(1))
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -129,9 +129,8 @@ impl LightFetch {
|
||||
}
|
||||
}
|
||||
BlockId::Hash(h) => {
|
||||
reqs.push(request::HeaderByHash(h.into()).into());
|
||||
|
||||
let idx = reqs.len();
|
||||
reqs.push(request::HeaderByHash(h.into()).into());
|
||||
Ok(HeaderRef::Unresolved(idx, h.into()))
|
||||
}
|
||||
_ => Err(errors::unknown_block()) // latest, earliest, and pending will have all already returned.
|
||||
|
@ -21,7 +21,8 @@ use std::io;
|
||||
use std::sync::Arc;
|
||||
|
||||
use ethsync::ManageNetwork;
|
||||
use fetch::Fetch;
|
||||
use fetch::{self, Fetch};
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash::keccak_buffer;
|
||||
|
||||
use jsonrpc_core::{Result, BoxFuture};
|
||||
@ -36,15 +37,17 @@ pub struct ParitySetClient<F> {
|
||||
net: Arc<ManageNetwork>,
|
||||
dapps: Option<Arc<DappsService>>,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
}
|
||||
|
||||
impl<F: Fetch> ParitySetClient<F> {
|
||||
/// Creates new `ParitySetClient` with given `Fetch`.
|
||||
pub fn new(net: Arc<ManageNetwork>, dapps: Option<Arc<DappsService>>, fetch: F) -> Self {
|
||||
pub fn new(net: Arc<ManageNetwork>, dapps: Option<Arc<DappsService>>, fetch: F, p: CpuPool) -> Self {
|
||||
ParitySetClient {
|
||||
net: net,
|
||||
dapps: dapps,
|
||||
fetch: fetch,
|
||||
pool: p,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -125,14 +128,16 @@ impl<F: Fetch> ParitySet for ParitySetClient<F> {
|
||||
}
|
||||
|
||||
fn hash_content(&self, url: String) -> BoxFuture<H256> {
|
||||
self.fetch.process(self.fetch.fetch(&url).then(move |result| {
|
||||
let future = self.fetch.fetch(&url, Default::default()).then(move |result| {
|
||||
result
|
||||
.map_err(errors::fetch)
|
||||
.and_then(|response| {
|
||||
keccak_buffer(&mut io::BufReader::new(response)).map_err(errors::fetch)
|
||||
.and_then(move |response| {
|
||||
let mut reader = io::BufReader::new(fetch::BodyReader::new(response));
|
||||
keccak_buffer(&mut reader).map_err(errors::fetch)
|
||||
})
|
||||
.map(Into::into)
|
||||
}))
|
||||
});
|
||||
Box::new(self.pool.spawn(future))
|
||||
}
|
||||
|
||||
fn dapps_refresh(&self) -> Result<bool> {
|
||||
|
@ -23,6 +23,7 @@ use ethcore::client::MiningBlockChainClient;
|
||||
use ethcore::mode::Mode;
|
||||
use ethsync::ManageNetwork;
|
||||
use fetch::{self, Fetch};
|
||||
use futures_cpupool::CpuPool;
|
||||
use hash::keccak_buffer;
|
||||
use updater::{Service as UpdateService};
|
||||
|
||||
@ -41,6 +42,7 @@ pub struct ParitySetClient<C, M, U, F = fetch::Client> {
|
||||
net: Arc<ManageNetwork>,
|
||||
dapps: Option<Arc<DappsService>>,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
eip86_transition: u64,
|
||||
}
|
||||
|
||||
@ -55,6 +57,7 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
|
||||
net: &Arc<ManageNetwork>,
|
||||
dapps: Option<Arc<DappsService>>,
|
||||
fetch: F,
|
||||
pool: CpuPool,
|
||||
) -> Self {
|
||||
ParitySetClient {
|
||||
client: client.clone(),
|
||||
@ -63,6 +66,7 @@ impl<C, M, U, F> ParitySetClient<C, M, U, F>
|
||||
net: net.clone(),
|
||||
dapps: dapps,
|
||||
fetch: fetch,
|
||||
pool: pool,
|
||||
eip86_transition: client.eip86_transition(),
|
||||
}
|
||||
}
|
||||
@ -166,14 +170,16 @@ impl<C, M, U, F> ParitySet for ParitySetClient<C, M, U, F> where
|
||||
}
|
||||
|
||||
fn hash_content(&self, url: String) -> BoxFuture<H256> {
|
||||
self.fetch.process(self.fetch.fetch(&url).then(move |result| {
|
||||
let future = self.fetch.fetch(&url, Default::default()).then(move |result| {
|
||||
result
|
||||
.map_err(errors::fetch)
|
||||
.and_then(|response| {
|
||||
keccak_buffer(&mut io::BufReader::new(response)).map_err(errors::fetch)
|
||||
.and_then(move |response| {
|
||||
let mut reader = io::BufReader::new(fetch::BodyReader::new(response));
|
||||
keccak_buffer(&mut reader).map_err(errors::fetch)
|
||||
})
|
||||
.map(Into::into)
|
||||
}))
|
||||
});
|
||||
Box::new(self.pool.spawn(future))
|
||||
}
|
||||
|
||||
fn dapps_refresh(&self) -> Result<bool> {
|
||||
|
@ -230,6 +230,18 @@ fn eth_get_block() {
|
||||
assert_eq!(tester.handler.handle_request_sync(req_block).unwrap(), res_block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eth_get_block_by_hash() {
|
||||
let chain = extract_chain!("BlockchainTests/bcGasPricerTest/RPC_API_Test");
|
||||
let tester = EthTester::from_chain(&chain);
|
||||
|
||||
// We're looking for block number 4 from "RPC_API_Test_Frontier"
|
||||
let req_block = r#"{"method":"eth_getBlockByHash","params":["0x9c9bdab4cb53fd834e790b13545597f026494d42112e84c0aca9dd6bcc545295",false],"id":1,"jsonrpc":"2.0"}"#;
|
||||
|
||||
let res_block = r#"{"jsonrpc":"2.0","result":{"author":"0x8888f1f195afa192cfee860698584c030f4c9db1","difficulty":"0x200c0","extraData":"0x","gasLimit":"0x1dd8112","gasUsed":"0x5458","hash":"0x9c9bdab4cb53fd834e790b13545597f026494d42112e84c0aca9dd6bcc545295","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x8888f1f195afa192cfee860698584c030f4c9db1","mixHash":"0xaddea8d25bb0f955fa6c1d58d74ab8a3fec99d37943e2a261e3b12f97d6bff7c","nonce":"0x8e18bed16d5a88da","number":"0x4","parentHash":"0x2cbf4fc930c5b4c87598f43fc8eb26dccdab2f58a7d0d3ca92ec60a5444a330e","receiptsRoot":"0x7ed8026cf72ed0e98e6fd53ab406e51ffd34397d9da0052494ff41376fda7b5f","sealFields":["0xa0addea8d25bb0f955fa6c1d58d74ab8a3fec99d37943e2a261e3b12f97d6bff7c","0x888e18bed16d5a88da"],"sha3Uncles":"0x75cc08a7cb2cf8081446659fecb2633fb6b922d26edd59bd2272b1f5cae1c78b","size":"0x661","stateRoot":"0x68805721294e365020aca15ed56c360d9dc2cf03cbeff84c9b84b8aed023bfb5","timestamp":"0x59d662ff","totalDifficulty":"0xa0180","transactions":["0xb094b9dc356dbb8b256402c6d5709288066ad6a372c90c9c516f14277545fd58"],"transactionsRoot":"0x97a593d8d7e15b57f5c6bb25bc6c325463ef99f874bc08a78656c3ab5cb23262","uncles":["0xa1e9c9ecd2af999e0723aae1dc55dd9789ca618e0b34badcc8ac7d9a3dad3af2","0x81d429b6b6635214a2b0f976cc4b2ed49808140d6bede50129bc10d22ac9249e"]},"id":1}"#;
|
||||
assert_eq!(tester.handler.handle_request_sync(req_block).unwrap(), res_block);
|
||||
}
|
||||
|
||||
// a frontier-like test with an expanded gas limit and balance on known account.
|
||||
const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
|
||||
"name": "Frontier (Test)",
|
||||
|
@ -16,9 +16,10 @@
|
||||
|
||||
//! Test implementation of fetch client.
|
||||
|
||||
use std::{io, thread};
|
||||
use std::thread;
|
||||
use jsonrpc_core::futures::{self, Future};
|
||||
use fetch::{self, Fetch};
|
||||
use fetch::{self, Fetch, Url};
|
||||
use hyper;
|
||||
|
||||
/// Test implementation of fetcher. Will always return the same file.
|
||||
#[derive(Default, Clone)]
|
||||
@ -27,15 +28,12 @@ pub struct TestFetch;
|
||||
impl Fetch for TestFetch {
|
||||
type Result = Box<Future<Item = fetch::Response, Error = fetch::Error> + Send + 'static>;
|
||||
|
||||
fn new() -> Result<Self, fetch::Error> where Self: Sized {
|
||||
Ok(TestFetch)
|
||||
}
|
||||
|
||||
fn fetch_with_abort(&self, _url: &str, _abort: fetch::Abort) -> Self::Result {
|
||||
fn fetch(&self, url: &str, abort: fetch::Abort) -> Self::Result {
|
||||
let u = Url::parse(url).unwrap();
|
||||
let (tx, rx) = futures::oneshot();
|
||||
thread::spawn(move || {
|
||||
let cursor = io::Cursor::new(b"Some content");
|
||||
tx.send(fetch::Response::from_reader(cursor)).unwrap();
|
||||
let r = hyper::Response::new().with_body(&b"Some content"[..]);
|
||||
tx.send(fetch::Response::new(u, r, abort)).unwrap();
|
||||
});
|
||||
|
||||
Box::new(rx.map_err(|_| fetch::Error::Aborted))
|
||||
|
@ -22,6 +22,7 @@ use ethereum_types::{U256, Address};
|
||||
use ethcore::miner::MinerService;
|
||||
use ethcore::client::TestBlockChainClient;
|
||||
use ethsync::ManageNetwork;
|
||||
use futures_cpupool::CpuPool;
|
||||
|
||||
use jsonrpc_core::IoHandler;
|
||||
use v1::{ParitySet, ParitySetClient};
|
||||
@ -53,7 +54,8 @@ fn parity_set_client(
|
||||
net: &Arc<TestManageNetwork>,
|
||||
) -> TestParitySetClient {
|
||||
let dapps_service = Arc::new(TestDappsService);
|
||||
ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), Some(dapps_service), TestFetch::default())
|
||||
let pool = CpuPool::new(1);
|
||||
ParitySetClient::new(client, miner, updater, &(net.clone() as Arc<ManageNetwork>), Some(dapps_service), TestFetch::default(), pool)
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -55,7 +55,13 @@ macro_rules! impl_uint {
|
||||
|
||||
impl fmt::LowerHex for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:#x}", self.0)
|
||||
// TODO: remove this once updated to new version of primitives
|
||||
// including https://github.com/paritytech/primitives/pull/33
|
||||
// replace with `::core::fmt::LowerHex::fmt(self.0, f)`
|
||||
if f.alternate() {
|
||||
write!(f, "0x");
|
||||
}
|
||||
write!(f, "{:x}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,19 +108,19 @@ impl_uint!(U64, u64, 1);
|
||||
|
||||
impl serde::Serialize for U128 {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
|
||||
serializer.serialize_str(&format!("0x{:x}", self.0))
|
||||
serializer.serialize_str(&format!("{:#x}", self))
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for U256 {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
|
||||
serializer.serialize_str(&format!("0x{:x}", self.0))
|
||||
serializer.serialize_str(&format!("{:#x}", self))
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for U64 {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
|
||||
serializer.serialize_str(&format!("0x{:x}", self.0))
|
||||
serializer.serialize_str(&format!("{:#x}", self))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,12 @@ set_env_win () {
|
||||
echo "@ signtool sign /f "\%"1 /p "\%"2 /tr http://timestamp.comodoca.com /du https://parity.io "\%"3" > sign.cmd
|
||||
}
|
||||
build () {
|
||||
if [[ "windows" = $IDENT ]]
|
||||
then
|
||||
# This is a nasty hack till we figure out the proper cargo caching strategy
|
||||
echo "Remove index"
|
||||
rm -rf cargo/registry/index/*.
|
||||
fi
|
||||
echo "Build parity:"
|
||||
cargo build --target $PLATFORM --features final --release
|
||||
echo "Build evmbin:"
|
||||
@ -303,11 +309,12 @@ case $BUILD_PLATFORM in
|
||||
x86_64-unknown-snap-gnu)
|
||||
ARC="amd64"
|
||||
EXT="snap"
|
||||
apt update
|
||||
apt install -y expect zip rhash
|
||||
snapcraft clean
|
||||
echo "Prepare snapcraft.yaml for build on Gitlab CI in Docker image"
|
||||
sed -i 's/git/'"$VER"'/g' snap/snapcraft.yaml
|
||||
if [[ "$CI_BUILD_REF_NAME" = "beta" || "$VER" == *1.9* ]];
|
||||
if [[ "$CI_BUILD_REF_NAME" = "stable" || "$VER" == *1.10* ]];
|
||||
then
|
||||
sed -i -e 's/grade: devel/grade: stable/' snap/snapcraft.yaml;
|
||||
fi
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user