diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9d82928cf..591807e24 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,12 +23,12 @@ linux-stable: script: - cargo build --release $CARGOFLAGS - strip target/release/parity - - md5sum target/release/parity >> parity.md5 + - md5sum target/release/parity > parity.md5 - sh scripts/deb-build.sh amd64 - cp target/release/parity deb/usr/bin/parity - export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") - dpkg-deb -b deb "parity_"$VER"_amd64.deb" - - md5sum "parity_"$VER"_amd64.deb" >> "parity_"$VER"_amd64.deb.md5" + - md5sum "parity_"$VER"_amd64.deb" > "parity_"$VER"_amd64.deb.md5" - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity @@ -93,7 +93,7 @@ linux-centos: - export CC="gcc" - cargo build --release $CARGOFLAGS - strip target/release/parity - - md5sum target/release/parity >> parity.md5 + - md5sum target/release/parity > parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity @@ -118,12 +118,12 @@ linux-i686: - export HOST_CXX=g++ - cargo build --target i686-unknown-linux-gnu --release $CARGOFLAGS - strip target/i686-unknown-linux-gnu/release/parity - - md5sum target/i686-unknown-linux-gnu/release/parity >> parity.md5 + - md5sum target/i686-unknown-linux-gnu/release/parity > parity.md5 - sh scripts/deb-build.sh i386 - cp target/i686-unknown-linux-gnu/release/parity deb/usr/bin/parity - export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") - dpkg-deb -b deb "parity_"$VER"_i386.deb" - - md5sum "parity_"$VER"_i386.deb" >> "parity_"$VER"_i386.deb.md5" + - md5sum "parity_"$VER"_i386.deb" > "parity_"$VER"_i386.deb.md5" - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/i686-unknown-linux-gnu/parity --body target/i686-unknown-linux-gnu/release/parity @@ -158,12 +158,12 @@ linux-armv7: - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release $CARGOFLAGS - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity - - md5sum target/armv7-unknown-linux-gnueabihf/release/parity >> parity.md5 + - md5sum target/armv7-unknown-linux-gnueabihf/release/parity > parity.md5 - sh scripts/deb-build.sh armhf - cp target/armv7-unknown-linux-gnueabihf/release/parity deb/usr/bin/parity - export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") - dpkg-deb -b deb "parity_"$VER"_armhf.deb" - - md5sum "parity_"$VER"_armhf.deb" >> "parity_"$VER"_armhf.deb.md5" + - md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5" - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity @@ -198,12 +198,12 @@ linux-arm: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release $CARGOFLAGS - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity - - md5sum target/arm-unknown-linux-gnueabihf/release/parity >> parity.md5 + - md5sum target/arm-unknown-linux-gnueabihf/release/parity > parity.md5 - sh scripts/deb-build.sh armhf - cp target/arm-unknown-linux-gnueabihf/release/parity deb/usr/bin/parity - export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") - dpkg-deb -b deb "parity_"$VER"_armhf.deb" - - md5sum "parity_"$VER"_armhf.deb" >> "parity_"$VER"_armhf.deb.md5" + - md5sum "parity_"$VER"_armhf.deb" > "parity_"$VER"_armhf.deb.md5" - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity @@ -238,7 +238,7 @@ linux-armv6: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release $CARGOFLAGS - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity - - md5sum target/arm-unknown-linux-gnueabi/release/parity >> parity.md5 + - md5sum target/arm-unknown-linux-gnueabi/release/parity > parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity @@ -271,12 +271,12 @@ linux-aarch64: - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release $CARGOFLAGS - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity - - md5sum target/aarch64-unknown-linux-gnu/release/parity >> parity.md5 + - md5sum target/aarch64-unknown-linux-gnu/release/parity > parity.md5 - sh scripts/deb-build.sh arm64 - cp target/aarch64-unknown-linux-gnu/release/parity deb/usr/bin/parity - export VER=$(grep -m 1 version Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") - dpkg-deb -b deb "parity_"$VER"_arm64.deb" - - md5sum "parity_"$VER"_arm64.deb" >> "parity_"$VER"_arm64.deb.md5" + - md5sum "parity_"$VER"_arm64.deb" > "parity_"$VER"_arm64.deb.md5" - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity @@ -301,7 +301,7 @@ darwin: script: - cargo build --release $CARGOFLAGS - rm -rf parity.md5 - - md5sum target/release/parity >> parity.md5 + - md5sum target/release/parity > parity.md5 - aws configure set aws_access_key_id $s3_key - aws configure set aws_secret_access_key $s3_secret - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity diff --git a/Cargo.lock b/Cargo.lock index 8b11ea606..95be811e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1249,7 +1249,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#5d1134b5f836aafccb648e300817524081ac91e3" +source = "git+https://github.com/ethcore/js-precompiled.git#bf33dd4aabd2adb2178576db5a4d23b8902d39b8" dependencies = [ "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 041064121..2c1414201 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -47,7 +47,7 @@ pub struct ContentFetcher { resolver: R, cache: Arc>, sync: Arc, - embeddable_at: Option, + embeddable_on: Option<(String, u16)>, } impl Drop for ContentFetcher { @@ -59,7 +59,7 @@ impl Drop for ContentFetcher { impl ContentFetcher { - pub fn new(resolver: R, sync_status: Arc, embeddable_at: Option) -> Self { + pub fn new(resolver: R, sync_status: Arc, embeddable_on: Option<(String, u16)>) -> Self { let mut dapps_path = env::temp_dir(); dapps_path.push(random_filename()); @@ -68,17 +68,17 @@ impl ContentFetcher { resolver: resolver, sync: sync_status, cache: Arc::new(Mutex::new(ContentCache::default())), - embeddable_at: embeddable_at, + embeddable_on: embeddable_on, } } - fn still_syncing(port: Option) -> Box { + fn still_syncing(address: Option<(String, u16)>) -> Box { Box::new(ContentHandler::error( StatusCode::ServiceUnavailable, "Sync In Progress", "Your node is still syncing. We cannot resolve any content before it's fully synced.", Some("Refresh"), - port, + address, )) } @@ -145,7 +145,7 @@ impl ContentFetcher { match content { // Don't serve dapps if we are still syncing (but serve content) Some(URLHintResult::Dapp(_)) if self.sync.is_major_importing() => { - (None, Self::still_syncing(self.embeddable_at)) + (None, Self::still_syncing(self.embeddable_on.clone())) }, Some(URLHintResult::Dapp(dapp)) => { let (handler, fetch_control) = ContentFetcherHandler::new( @@ -155,9 +155,9 @@ impl ContentFetcher { id: content_id.clone(), dapps_path: self.dapps_path.clone(), on_done: Box::new(on_done), - embeddable_at: self.embeddable_at, + embeddable_on: self.embeddable_on.clone(), }, - self.embeddable_at, + self.embeddable_on.clone(), ); (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) @@ -172,13 +172,13 @@ impl ContentFetcher { content_path: self.dapps_path.clone(), on_done: Box::new(on_done), }, - self.embeddable_at, + self.embeddable_on.clone(), ); (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) }, None if self.sync.is_major_importing() => { - (None, Self::still_syncing(self.embeddable_at)) + (None, Self::still_syncing(self.embeddable_on.clone())) }, None => { // This may happen when sync status changes in between @@ -188,7 +188,7 @@ impl ContentFetcher { "Resource Not Found", "Requested resource was not found.", None, - self.embeddable_at, + self.embeddable_on.clone(), )) as Box) }, } @@ -293,7 +293,7 @@ struct DappInstaller { id: String, dapps_path: PathBuf, on_done: Box) + Send>, - embeddable_at: Option, + embeddable_on: Option<(String, u16)>, } impl DappInstaller { @@ -386,7 +386,7 @@ impl ContentValidator for DappInstaller { try!(manifest_file.write_all(manifest_str.as_bytes())); // Create endpoint - let app = LocalPageEndpoint::new(target, manifest.clone().into(), PageCache::Enabled, self.embeddable_at); + let app = LocalPageEndpoint::new(target, manifest.clone().into(), PageCache::Enabled, self.embeddable_on.clone()); // Return modified app manifest Ok((manifest.id.clone(), app)) diff --git a/dapps/src/apps/fs.rs b/dapps/src/apps/fs.rs index 104b33035..f0b4ddfa8 100644 --- a/dapps/src/apps/fs.rs +++ b/dapps/src/apps/fs.rs @@ -97,12 +97,12 @@ fn read_manifest(name: &str, mut path: PathBuf) -> EndpointInfo { }) } -pub fn local_endpoints(dapps_path: String, signer_port: Option) -> Endpoints { +pub fn local_endpoints(dapps_path: String, signer_address: Option<(String, u16)>) -> Endpoints { let mut pages = Endpoints::new(); for dapp in local_dapps(dapps_path) { pages.insert( dapp.id, - Box::new(LocalPageEndpoint::new(dapp.path, dapp.info, PageCache::Disabled, signer_port)) + Box::new(LocalPageEndpoint::new(dapp.path, dapp.info, PageCache::Disabled, signer_address.clone())) ); } pages diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index 11919d6d2..3cb0d8256 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -37,26 +37,26 @@ pub fn utils() -> Box { Box::new(PageEndpoint::with_prefix(parity_ui::App::default(), UTILS_PATH.to_owned())) } -pub fn all_endpoints(dapps_path: String, signer_port: Option) -> Endpoints { +pub fn all_endpoints(dapps_path: String, signer_address: Option<(String, u16)>) -> Endpoints { // fetch fs dapps at first to avoid overwriting builtins - let mut pages = fs::local_endpoints(dapps_path, signer_port); + let mut pages = fs::local_endpoints(dapps_path, signer_address.clone()); // NOTE [ToDr] Dapps will be currently embeded on 8180 - insert::(&mut pages, "ui", Embeddable::Yes(signer_port)); - pages.insert("proxy".into(), ProxyPac::boxed(signer_port)); + insert::(&mut pages, "ui", Embeddable::Yes(signer_address.clone())); + pages.insert("proxy".into(), ProxyPac::boxed(signer_address)); pages } fn insert(pages: &mut Endpoints, id: &str, embed_at: Embeddable) { pages.insert(id.to_owned(), Box::new(match embed_at { - Embeddable::Yes(port) => PageEndpoint::new_safe_to_embed(T::default(), port), + Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address), Embeddable::No => PageEndpoint::new(T::default()), })); } enum Embeddable { - Yes(Option), + Yes(Option<(String, u16)>), #[allow(dead_code)] No, } diff --git a/dapps/src/handlers/content.rs b/dapps/src/handlers/content.rs index a67fbcd0b..738a9a890 100644 --- a/dapps/src/handlers/content.rs +++ b/dapps/src/handlers/content.rs @@ -32,7 +32,7 @@ pub struct ContentHandler { content: String, mimetype: Mime, write_pos: usize, - safe_to_embed_at_port: Option, + safe_to_embed_on: Option<(String, u16)>, } impl ContentHandler { @@ -44,31 +44,31 @@ impl ContentHandler { Self::new(StatusCode::NotFound, content, mimetype) } - pub fn html(code: StatusCode, content: String, embeddable_at: Option) -> Self { - Self::new_embeddable(code, content, mime!(Text/Html), embeddable_at) + pub fn html(code: StatusCode, content: String, embeddable_on: Option<(String, u16)>) -> Self { + Self::new_embeddable(code, content, mime!(Text/Html), embeddable_on) } - pub fn error(code: StatusCode, title: &str, message: &str, details: Option<&str>, embeddable_at: Option) -> Self { + pub fn error(code: StatusCode, title: &str, message: &str, details: Option<&str>, embeddable_on: Option<(String, u16)>) -> Self { Self::html(code, format!( include_str!("../error_tpl.html"), title=title, message=message, details=details.unwrap_or_else(|| ""), version=version(), - ), embeddable_at) + ), embeddable_on) } pub fn new(code: StatusCode, content: String, mimetype: Mime) -> Self { Self::new_embeddable(code, content, mimetype, None) } - pub fn new_embeddable(code: StatusCode, content: String, mimetype: Mime, embeddable_at: Option) -> Self { + pub fn new_embeddable(code: StatusCode, content: String, mimetype: Mime, embeddable_on: Option<(String, u16)>) -> Self { ContentHandler { code: code, content: content, mimetype: mimetype, write_pos: 0, - safe_to_embed_at_port: embeddable_at, + safe_to_embed_on: embeddable_on, } } } @@ -85,7 +85,7 @@ impl server::Handler for ContentHandler { fn on_response(&mut self, res: &mut server::Response) -> Next { res.set_status(self.code); res.headers_mut().set(header::ContentType(self.mimetype.clone())); - add_security_headers(&mut res.headers_mut(), self.safe_to_embed_at_port.clone()); + add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.clone()); Next::write() } diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 1a0221bff..a34b58fa7 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -138,7 +138,7 @@ pub struct ContentFetcherHandler { client: Option, installer: H, request_url: Option, - embeddable_at: Option, + embeddable_on: Option<(String, u16)>, } impl Drop for ContentFetcherHandler { @@ -157,7 +157,7 @@ impl ContentFetcherHandler { url: String, control: Control, handler: H, - embeddable_at: Option, + embeddable_on: Option<(String, u16)>, ) -> (Self, Arc) { let fetch_control = Arc::new(FetchControl::default()); @@ -169,7 +169,7 @@ impl ContentFetcherHandler { status: FetchState::NotStarted(url), installer: handler, request_url: None, - embeddable_at: embeddable_at, + embeddable_on: embeddable_on, }; (handler, fetch_control) @@ -208,7 +208,7 @@ impl server::Handler for ContentFetcherHandler< "Unable To Start Dapp Download", "Could not initialize download of the dapp. It might be a problem with the remote server.", Some(&format!("{}", e)), - self.embeddable_at, + self.embeddable_on.clone(), )), } }, @@ -218,7 +218,7 @@ impl server::Handler for ContentFetcherHandler< "Method Not Allowed", "Only GET requests are allowed.", None, - self.embeddable_at, + self.embeddable_on.clone(), )), }) } else { None }; @@ -241,7 +241,7 @@ impl server::Handler for ContentFetcherHandler< "Download Timeout", &format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT), None, - self.embeddable_at, + self.embeddable_on.clone(), ); Self::close_client(&mut self.client); (Some(FetchState::Error(timeout)), Next::write()) @@ -263,7 +263,7 @@ impl server::Handler for ContentFetcherHandler< "Invalid Dapp", "Downloaded bundle does not contain a valid content.", Some(&format!("{:?}", e)), - self.embeddable_at, + self.embeddable_on.clone(), )) }, Ok((id, result)) => { @@ -284,7 +284,7 @@ impl server::Handler for ContentFetcherHandler< "Download Error", "There was an error when fetching the content.", Some(&format!("{:?}", e)), - self.embeddable_at, + self.embeddable_on.clone(), ); (Some(FetchState::Error(error)), Next::write()) }, diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index 3d96e8a40..b575509a5 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -30,18 +30,18 @@ pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl}; use url::Url; use hyper::{server, header, net, uri}; -use signer_address; +use address; /// Adds security-related headers to the Response. -pub fn add_security_headers(headers: &mut header::Headers, embeddable_at: Option) { +pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Option<(String, u16)>) { headers.set_raw("X-XSS-Protection", vec![b"1; mode=block".to_vec()]); headers.set_raw("X-Content-Type-Options", vec![b"nosniff".to_vec()]); // Embedding header: - if let Some(port) = embeddable_at { + if let Some(embeddable_on) = embeddable_on { headers.set_raw( "X-Frame-Options", - vec![format!("ALLOW-FROM http://{}", signer_address(port)).into_bytes()] + vec![format!("ALLOW-FROM http://{}", address(embeddable_on)).into_bytes()] ); } else { // TODO [ToDr] Should we be more strict here (DENY?)? diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 4394d1183..2c9fa33d1 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -112,7 +112,7 @@ pub struct ServerBuilder { handler: Arc, registrar: Arc, sync_status: Arc, - signer_port: Option, + signer_address: Option<(String, u16)>, } impl Extendable for ServerBuilder { @@ -129,7 +129,7 @@ impl ServerBuilder { handler: Arc::new(IoHandler::new()), registrar: registrar, sync_status: Arc::new(|| false), - signer_port: None, + signer_address: None, } } @@ -139,8 +139,8 @@ impl ServerBuilder { } /// Change default signer port. - pub fn with_signer_port(&mut self, signer_port: Option) { - self.signer_port = signer_port; + pub fn with_signer_address(&mut self, signer_address: Option<(String, u16)>) { + self.signer_address = signer_address; } /// Asynchronously start server with no authentication, @@ -152,7 +152,7 @@ impl ServerBuilder { NoAuth, self.handler.clone(), self.dapps_path.clone(), - self.signer_port.clone(), + self.signer_address.clone(), self.registrar.clone(), self.sync_status.clone(), ) @@ -167,7 +167,7 @@ impl ServerBuilder { HttpBasicAuth::single_user(username, password), self.handler.clone(), self.dapps_path.clone(), - self.signer_port.clone(), + self.signer_address.clone(), self.registrar.clone(), self.sync_status.clone(), ) @@ -197,11 +197,11 @@ impl Server { } /// Returns a list of CORS domains for API endpoint. - fn cors_domains(signer_port: Option) -> Vec { - match signer_port { - Some(port) => vec![ + fn cors_domains(signer_address: Option<(String, u16)>) -> Vec { + match signer_address { + Some(signer_address) => vec![ format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("http://{}", signer_address(port)), + format!("http://{}", address(signer_address)), ], None => vec![], } @@ -213,15 +213,15 @@ impl Server { authorization: A, handler: Arc, dapps_path: String, - signer_port: Option, + signer_address: Option<(String, u16)>, registrar: Arc, sync_status: Arc, ) -> Result { let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); - let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status, signer_port)); - let endpoints = Arc::new(apps::all_endpoints(dapps_path, signer_port.clone())); - let cors_domains = Self::cors_domains(signer_port); + let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status, signer_address.clone())); + let endpoints = Arc::new(apps::all_endpoints(dapps_path, signer_address.clone())); + let cors_domains = Self::cors_domains(signer_address.clone()); let special = Arc::new({ let mut special = HashMap::new(); @@ -238,7 +238,7 @@ impl Server { try!(hyper::Server::http(addr)) .handle(move |ctrl| router::Router::new( ctrl, - signer_port.clone(), + signer_address.clone(), content_fetcher.clone(), endpoints.clone(), special.clone(), @@ -302,8 +302,8 @@ pub fn random_filename() -> String { rng.gen_ascii_chars().take(12).collect() } -fn signer_address(port: u16) -> String { - format!("127.0.0.1:{}", port) +fn address(address: (String, u16)) -> String { + format!("{}:{}", address.0, address.1) } #[cfg(test)] @@ -332,7 +332,7 @@ mod util_tests { // when let none = Server::cors_domains(None); - let some = Server::cors_domains(Some(18180)); + let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180))); // then assert_eq!(none, Vec::::new()); diff --git a/dapps/src/page/builtin.rs b/dapps/src/page/builtin.rs index bf5bf088f..40c0e23a6 100644 --- a/dapps/src/page/builtin.rs +++ b/dapps/src/page/builtin.rs @@ -25,7 +25,7 @@ pub struct PageEndpoint { /// Prefix to strip from the path (when `None` deducted from `app_id`) pub prefix: Option, /// Safe to be loaded in frame by other origin. (use wisely!) - safe_to_embed_at_port: Option, + safe_to_embed_on: Option<(String, u16)>, info: EndpointInfo, } @@ -36,7 +36,7 @@ impl PageEndpoint { PageEndpoint { app: Arc::new(app), prefix: None, - safe_to_embed_at_port: None, + safe_to_embed_on: None, info: EndpointInfo::from(info), } } @@ -49,7 +49,7 @@ impl PageEndpoint { PageEndpoint { app: Arc::new(app), prefix: Some(prefix), - safe_to_embed_at_port: None, + safe_to_embed_on: None, info: EndpointInfo::from(info), } } @@ -57,12 +57,12 @@ impl PageEndpoint { /// Creates new `PageEndpoint` which can be safely used in iframe /// even from different origin. It might be dangerous (clickjacking). /// Use wisely! - pub fn new_safe_to_embed(app: T, port: Option) -> Self { + pub fn new_safe_to_embed(app: T, address: Option<(String, u16)>) -> Self { let info = app.info(); PageEndpoint { app: Arc::new(app), prefix: None, - safe_to_embed_at_port: port, + safe_to_embed_on: address, info: EndpointInfo::from(info), } } @@ -79,9 +79,9 @@ impl Endpoint for PageEndpoint { app: BuiltinDapp::new(self.app.clone()), prefix: self.prefix.clone(), path: path, - file: handler::ServedFile::new(self.safe_to_embed_at_port.clone()), + file: handler::ServedFile::new(self.safe_to_embed_on.clone()), cache: PageCache::Disabled, - safe_to_embed_at_port: self.safe_to_embed_at_port.clone(), + safe_to_embed_on: self.safe_to_embed_on.clone(), }) } } diff --git a/dapps/src/page/handler.rs b/dapps/src/page/handler.rs index f908a69c5..74eabf917 100644 --- a/dapps/src/page/handler.rs +++ b/dapps/src/page/handler.rs @@ -60,13 +60,13 @@ pub enum ServedFile { } impl ServedFile { - pub fn new(embeddable_at: Option) -> Self { + pub fn new(embeddable_on: Option<(String, u16)>) -> Self { ServedFile::Error(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Requested dapp resource was not found.", None, - embeddable_at, + embeddable_on, )) } } @@ -97,7 +97,7 @@ pub struct PageHandler { /// Requested path. pub path: EndpointPath, /// Flag indicating if the file can be safely embeded (put in iframe). - pub safe_to_embed_at_port: Option, + pub safe_to_embed_on: Option<(String, u16)>, /// Cache settings for this page. pub cache: PageCache, } @@ -133,7 +133,7 @@ impl server::Handler for PageHandler { self.app.file(&self.extract_path(url.path())) }, _ => None, - }.map_or_else(|| ServedFile::new(self.safe_to_embed_at_port.clone()), |f| ServedFile::File(f)); + }.map_or_else(|| ServedFile::new(self.safe_to_embed_on.clone()), |f| ServedFile::File(f)); Next::write() } @@ -162,7 +162,7 @@ impl server::Handler for PageHandler { } // Security headers: - add_security_headers(&mut res.headers_mut(), self.safe_to_embed_at_port); + add_security_headers(&mut res.headers_mut(), self.safe_to_embed_on.clone()); Next::write() }, ServedFile::Error(ref mut handler) => { @@ -246,7 +246,7 @@ fn should_extract_path_with_appid() { }, file: ServedFile::new(None), cache: Default::default(), - safe_to_embed_at_port: None, + safe_to_embed_on: None, }; // when diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index aa98a68cd..ec24cac36 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -27,17 +27,17 @@ pub struct LocalPageEndpoint { mime: Option, info: Option, cache: PageCache, - embeddable_at: Option, + embeddable_on: Option<(String, u16)>, } impl LocalPageEndpoint { - pub fn new(path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_at: Option) -> Self { + pub fn new(path: PathBuf, info: EndpointInfo, cache: PageCache, embeddable_on: Option<(String, u16)>) -> Self { LocalPageEndpoint { path: path, mime: None, info: Some(info), cache: cache, - embeddable_at: embeddable_at, + embeddable_on: embeddable_on, } } @@ -47,7 +47,7 @@ impl LocalPageEndpoint { mime: Some(mime), info: None, cache: cache, - embeddable_at: None, + embeddable_on: None, } } @@ -68,7 +68,7 @@ impl Endpoint for LocalPageEndpoint { prefix: None, path: path, file: handler::ServedFile::new(None), - safe_to_embed_at_port: self.embeddable_at, + safe_to_embed_on: self.embeddable_on.clone(), cache: self.cache, }) } else { @@ -77,7 +77,7 @@ impl Endpoint for LocalPageEndpoint { prefix: None, path: path, file: handler::ServedFile::new(None), - safe_to_embed_at_port: self.embeddable_at, + safe_to_embed_on: self.embeddable_on.clone(), cache: self.cache, }) } diff --git a/dapps/src/proxypac.rs b/dapps/src/proxypac.rs index 2d7c4e3ce..88ecb6ab3 100644 --- a/dapps/src/proxypac.rs +++ b/dapps/src/proxypac.rs @@ -19,24 +19,24 @@ use endpoint::{Endpoint, Handler, EndpointPath}; use handlers::ContentHandler; use apps::{HOME_PAGE, DAPPS_DOMAIN}; -use signer_address; +use address; pub struct ProxyPac { - signer_port: Option, + signer_address: Option<(String, u16)>, } impl ProxyPac { - pub fn boxed(signer_port: Option) -> Box { + pub fn boxed(signer_address: Option<(String, u16)>) -> Box { Box::new(ProxyPac { - signer_port: signer_port + signer_address: signer_address }) } } impl Endpoint for ProxyPac { fn to_handler(&self, path: EndpointPath) -> Box { - let signer = self.signer_port - .map(signer_address) + let signer = self.signer_address.clone() + .map(address) .unwrap_or_else(|| format!("{}:{}", path.host, path.port)); let content = format!( diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index fe8061ef0..c0a33f2eb 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -20,7 +20,7 @@ pub mod auth; mod host_validation; -use signer_address; +use address; use std::sync::Arc; use std::collections::HashMap; use url::{Url, Host}; @@ -43,7 +43,7 @@ pub enum SpecialEndpoint { pub struct Router { control: Option, - signer_port: Option, + signer_address: Option<(String, u16)>, endpoints: Arc, fetch: Arc, special: Arc>>, @@ -117,22 +117,22 @@ impl server::Handler for Router { "404 Not Found", "Requested content was not found.", None, - self.signer_port, + self.signer_address.clone(), )) }, // Redirect any other GET request to signer. _ if *req.method() == hyper::Method::Get => { - if let Some(port) = self.signer_port { + if let Some(signer_address) = self.signer_address.clone() { trace!(target: "dapps", "Redirecting to signer interface."); - Redirection::boxed(&format!("http://{}", signer_address(port))) + Redirection::boxed(&format!("http://{}", address(signer_address))) } else { trace!(target: "dapps", "Signer disabled, returning 404."); Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Your homepage is not available when Trusted Signer is disabled.", - Some("You can still access dapps by writing a correct address, though. Re-enabled Signer to get your homepage back."), - self.signer_port, + Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), + self.signer_address.clone(), )) } }, @@ -168,7 +168,7 @@ impl server::Handler for Router { impl Router { pub fn new( control: Control, - signer_port: Option, + signer_address: Option<(String, u16)>, content_fetcher: Arc, endpoints: Arc, special: Arc>>, @@ -181,7 +181,7 @@ impl Router { .to_handler(EndpointPath::default()); Router { control: Some(control), - signer_port: signer_port, + signer_address: signer_address, endpoints: endpoints, fetch: content_fetcher, special: special, diff --git a/dapps/src/tests/helpers.rs b/dapps/src/tests/helpers.rs index 1fa2e777a..f7c9e8ba6 100644 --- a/dapps/src/tests/helpers.rs +++ b/dapps/src/tests/helpers.rs @@ -76,7 +76,7 @@ pub fn init_server(hosts: Option>, is_syncing: bool) -> (Server, Arc dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar.clone()); builder.with_sync_status(Arc::new(move || is_syncing)); - builder.with_signer_port(Some(SIGNER_PORT)); + builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))); ( builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap(), registrar, @@ -89,7 +89,7 @@ pub fn serve_with_auth(user: &str, pass: &str) -> Server { let mut dapps_path = env::temp_dir(); dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); let mut builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar); - builder.with_signer_port(Some(SIGNER_PORT)); + builder.with_signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))); builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap() } diff --git a/devtools/src/http_client.rs b/devtools/src/http_client.rs index acba2989e..2440a7cda 100644 --- a/devtools/src/http_client.rs +++ b/devtools/src/http_client.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::thread; use std::time::Duration; use std::io::{Read, Write}; use std::str::{self, Lines}; @@ -42,8 +43,28 @@ pub fn read_block(lines: &mut Lines, all: bool) -> String { block } +fn connect(address: &SocketAddr) -> TcpStream { + let mut retries = 0; + let mut last_error = None; + while retries < 10 { + retries += 1; + + let res = TcpStream::connect(address); + match res { + Ok(stream) => { + return stream; + }, + Err(e) => { + last_error = Some(e); + thread::sleep(Duration::from_millis(retries * 10)); + } + } + } + panic!("Unable to connect to the server. Last error: {:?}", last_error); +} + pub fn request(address: &SocketAddr, request: &str) -> Response { - let mut req = TcpStream::connect(address).unwrap(); + let mut req = connect(address); req.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); req.write_all(request.as_bytes()).unwrap(); diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml new file mode 100644 index 000000000..daf141de7 --- /dev/null +++ b/ethcore/light/Cargo.toml @@ -0,0 +1,16 @@ +[package] +description = "Parity LES primitives" +homepage = "https://ethcore.io" +license = "GPL-3.0" +name = "ethcore-light" +version = "1.5.0" +authors = ["Ethcore "] + +[dependencies] +log = "0.3" +ethcore = { path = ".." } +ethcore-util = { path = "../../util" } +ethcore-network = { path = "../../util/network" } +ethcore-io = { path = "../../util/io" } +rlp = { path = "../../util/rlp" } +time = "0.1" \ No newline at end of file diff --git a/ethcore/light/src/client.rs b/ethcore/light/src/client.rs new file mode 100644 index 000000000..e3b5745b2 --- /dev/null +++ b/ethcore/light/src/client.rs @@ -0,0 +1,115 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light client implementation. Used for raw data queries as well as the header +//! sync. + +use std::sync::Arc; + +use ethcore::engines::Engine; +use ethcore::ids::BlockID; +use ethcore::service::ClientIoMessage; +use ethcore::block_import_error::BlockImportError; +use ethcore::block_status::BlockStatus; +use ethcore::verification::queue::{HeaderQueue, QueueInfo}; +use ethcore::transaction::SignedTransaction; +use ethcore::blockchain_info::BlockChainInfo; + +use io::IoChannel; +use util::hash::H256; +use util::{Bytes, Mutex}; + +use provider::Provider; +use request; + +/// Light client implementation. +pub struct Client { + engine: Arc, + header_queue: HeaderQueue, + message_channel: Mutex>, +} + +impl Client { + /// Import a header as rlp-encoded bytes. + pub fn import_header(&self, bytes: Bytes) -> Result { + let header = ::rlp::decode(&bytes); + + self.header_queue.import(header).map_err(Into::into) + } + + /// Whether the block is already known (but not necessarily part of the canonical chain) + pub fn is_known(&self, _id: BlockID) -> bool { + false + } + + /// Fetch a vector of all pending transactions. + pub fn pending_transactions(&self) -> Vec { + vec![] + } + + /// Inquire about the status of a given block. + pub fn status(&self, _id: BlockID) -> BlockStatus { + BlockStatus::Unknown + } + + /// Get the header queue info. + pub fn queue_info(&self) -> QueueInfo { + self.header_queue.queue_info() + } +} + +// dummy implementation -- may draw from canonical cache further on. +impl Provider for Client { + fn chain_info(&self) -> BlockChainInfo { + unimplemented!() + } + + fn reorg_depth(&self, _a: &H256, _b: &H256) -> Option { + None + } + + fn earliest_state(&self) -> Option { + None + } + + fn block_headers(&self, _req: request::Headers) -> Vec { + Vec::new() + } + + fn block_bodies(&self, _req: request::Bodies) -> Vec { + Vec::new() + } + + fn receipts(&self, _req: request::Receipts) -> Vec { + Vec::new() + } + + fn proofs(&self, _req: request::StateProofs) -> Vec { + Vec::new() + } + + fn code(&self, _req: request::ContractCodes) -> Vec { + Vec::new() + } + + fn header_proofs(&self, _req: request::HeaderProofs) -> Vec { + Vec::new() + } + + fn pending_transactions(&self) -> Vec { + Vec::new() + } +} \ No newline at end of file diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs new file mode 100644 index 000000000..07e6833a7 --- /dev/null +++ b/ethcore/light/src/lib.rs @@ -0,0 +1,47 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Light client logic and implementation. +//! +//! A "light" client stores very little chain-related data locally +//! unlike a full node, which stores all blocks, headers, receipts, and more. +//! +//! This enables the client to have a much lower resource footprint in +//! exchange for the cost of having to ask the network for state data +//! while responding to queries. This makes a light client unsuitable for +//! low-latency applications, but perfectly suitable for simple everyday +//! use-cases like sending transactions from a personal account. +//! +//! It starts by performing a header-only sync, verifying random samples +//! of members of the chain to varying degrees. + +// TODO: remove when integrating with parity. +#![allow(dead_code)] + +pub mod client; +pub mod net; +pub mod provider; +pub mod request; + +extern crate ethcore_util as util; +extern crate ethcore_network as network; +extern crate ethcore_io as io; +extern crate ethcore; +extern crate rlp; +extern crate time; + +#[macro_use] +extern crate log; \ No newline at end of file diff --git a/ethcore/light/src/net/buffer_flow.rs b/ethcore/light/src/net/buffer_flow.rs new file mode 100644 index 000000000..b7bd30f82 --- /dev/null +++ b/ethcore/light/src/net/buffer_flow.rs @@ -0,0 +1,264 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! LES buffer flow management. +//! +//! Every request in the LES protocol leads to a reduction +//! of the requester's buffer value as a rate-limiting mechanism. +//! This buffer value will recharge at a set rate. +//! +//! This module provides an interface for configuration of buffer +//! flow costs and recharge rates. + +use request; +use super::packet; +use super::error::Error; + +use rlp::*; +use util::U256; +use time::{Duration, SteadyTime}; + +/// A request cost specification. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Cost(pub U256, pub U256); + +/// Buffer value. +/// +/// Produced and recharged using `FlowParams`. +/// Definitive updates can be made as well -- these will reset the recharge +/// point to the time of the update. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Buffer { + estimate: U256, + recharge_point: SteadyTime, +} + +impl Buffer { + /// Get the current buffer value. + pub fn current(&self) -> U256 { self.estimate.clone() } + + /// Make a definitive update. + /// This will be the value obtained after receiving + /// a response to a request. + pub fn update_to(&mut self, value: U256) { + self.estimate = value; + self.recharge_point = SteadyTime::now(); + } + + /// Attempt to apply the given cost to the buffer. + /// + /// If successful, the cost will be deducted successfully. + /// + /// If unsuccessful, the structure will be unaltered an an + /// error will be produced. + pub fn deduct_cost(&mut self, cost: U256) -> Result<(), Error> { + match cost > self.estimate { + true => Err(Error::BufferEmpty), + false => { + self.estimate = self.estimate - cost; + Ok(()) + } + } + } +} + +/// A cost table, mapping requests to base and per-request costs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CostTable { + headers: Cost, + bodies: Cost, + receipts: Cost, + state_proofs: Cost, + contract_codes: Cost, + header_proofs: Cost, +} + +impl Default for CostTable { + fn default() -> Self { + // arbitrarily chosen constants. + CostTable { + headers: Cost(100000.into(), 10000.into()), + bodies: Cost(150000.into(), 15000.into()), + receipts: Cost(50000.into(), 5000.into()), + state_proofs: Cost(250000.into(), 25000.into()), + contract_codes: Cost(200000.into(), 20000.into()), + header_proofs: Cost(150000.into(), 15000.into()), + } + } +} + +impl RlpEncodable for CostTable { + fn rlp_append(&self, s: &mut RlpStream) { + fn append_cost(s: &mut RlpStream, msg_id: u8, cost: &Cost) { + s.begin_list(3) + .append(&msg_id) + .append(&cost.0) + .append(&cost.1); + } + + s.begin_list(6); + + append_cost(s, packet::GET_BLOCK_HEADERS, &self.headers); + append_cost(s, packet::GET_BLOCK_BODIES, &self.bodies); + append_cost(s, packet::GET_RECEIPTS, &self.receipts); + append_cost(s, packet::GET_PROOFS, &self.state_proofs); + append_cost(s, packet::GET_CONTRACT_CODES, &self.contract_codes); + append_cost(s, packet::GET_HEADER_PROOFS, &self.header_proofs); + } +} + +impl RlpDecodable for CostTable { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + + let mut headers = None; + let mut bodies = None; + let mut receipts = None; + let mut state_proofs = None; + let mut contract_codes = None; + let mut header_proofs = None; + + for row in rlp.iter() { + let msg_id: u8 = try!(row.val_at(0)); + let cost = { + let base = try!(row.val_at(1)); + let per = try!(row.val_at(2)); + + Cost(base, per) + }; + + match msg_id { + packet::GET_BLOCK_HEADERS => headers = Some(cost), + packet::GET_BLOCK_BODIES => bodies = Some(cost), + packet::GET_RECEIPTS => receipts = Some(cost), + packet::GET_PROOFS => state_proofs = Some(cost), + packet::GET_CONTRACT_CODES => contract_codes = Some(cost), + packet::GET_HEADER_PROOFS => header_proofs = Some(cost), + _ => return Err(DecoderError::Custom("Unrecognized message in cost table")), + } + } + + Ok(CostTable { + headers: try!(headers.ok_or(DecoderError::Custom("No headers cost specified"))), + bodies: try!(bodies.ok_or(DecoderError::Custom("No bodies cost specified"))), + receipts: try!(receipts.ok_or(DecoderError::Custom("No receipts cost specified"))), + state_proofs: try!(state_proofs.ok_or(DecoderError::Custom("No proofs cost specified"))), + contract_codes: try!(contract_codes.ok_or(DecoderError::Custom("No contract codes specified"))), + header_proofs: try!(header_proofs.ok_or(DecoderError::Custom("No header proofs cost specified"))), + }) + } +} + +/// A buffer-flow manager handles costs, recharge, limits +#[derive(Debug, Clone, PartialEq)] +pub struct FlowParams { + costs: CostTable, + limit: U256, + recharge: U256, +} + +impl FlowParams { + /// Create new flow parameters from a request cost table, + /// buffer limit, and (minimum) rate of recharge. + pub fn new(limit: U256, costs: CostTable, recharge: U256) -> Self { + FlowParams { + costs: costs, + limit: limit, + recharge: recharge, + } + } + + /// Get a reference to the buffer limit. + pub fn limit(&self) -> &U256 { &self.limit } + + /// Get a reference to the cost table. + pub fn cost_table(&self) -> &CostTable { &self.costs } + + /// Get a reference to the recharge rate. + pub fn recharge_rate(&self) -> &U256 { &self.recharge } + + /// Compute the actual cost of a request, given the kind of request + /// and number of requests made. + pub fn compute_cost(&self, kind: request::Kind, amount: usize) -> U256 { + let cost = match kind { + request::Kind::Headers => &self.costs.headers, + request::Kind::Bodies => &self.costs.bodies, + request::Kind::Receipts => &self.costs.receipts, + request::Kind::StateProofs => &self.costs.state_proofs, + request::Kind::Codes => &self.costs.contract_codes, + request::Kind::HeaderProofs => &self.costs.header_proofs, + }; + + let amount: U256 = amount.into(); + cost.0 + (amount * cost.1) + } + + /// Create initial buffer parameter. + pub fn create_buffer(&self) -> Buffer { + Buffer { + estimate: self.limit, + recharge_point: SteadyTime::now(), + } + } + + /// Recharge the buffer based on time passed since last + /// update. + pub fn recharge(&self, buf: &mut Buffer) { + let now = SteadyTime::now(); + + // recompute and update only in terms of full seconds elapsed + // in order to keep the estimate as an underestimate. + let elapsed = (now - buf.recharge_point).num_seconds(); + buf.recharge_point = buf.recharge_point + Duration::seconds(elapsed); + + let elapsed: U256 = elapsed.into(); + + buf.estimate = ::std::cmp::min(self.limit, buf.estimate + (elapsed * self.recharge)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_serialize_cost_table() { + let costs = CostTable::default(); + let serialized = ::rlp::encode(&costs); + + let new_costs: CostTable = ::rlp::decode(&*serialized); + + assert_eq!(costs, new_costs); + } + + #[test] + fn buffer_mechanism() { + use std::thread; + use std::time::Duration; + + let flow_params = FlowParams::new(100.into(), Default::default(), 20.into()); + let mut buffer = flow_params.create_buffer(); + + assert!(buffer.deduct_cost(101.into()).is_err()); + assert!(buffer.deduct_cost(10.into()).is_ok()); + + thread::sleep(Duration::from_secs(1)); + + flow_params.recharge(&mut buffer); + + assert_eq!(buffer.estimate, 100.into()); + } +} \ No newline at end of file diff --git a/ethcore/light/src/net/error.rs b/ethcore/light/src/net/error.rs new file mode 100644 index 000000000..e15bd50d3 --- /dev/null +++ b/ethcore/light/src/net/error.rs @@ -0,0 +1,94 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Defines error types and levels of punishment to use upon +//! encountering. + +use rlp::DecoderError; +use network::NetworkError; + +use std::fmt; + +/// Levels of punishment. +/// +/// Currently just encompasses two different kinds of disconnect and +/// no punishment, but this is where reputation systems might come into play. +// In ascending order +#[derive(Debug, PartialEq, Eq)] +pub enum Punishment { + /// Perform no punishment. + None, + /// Disconnect the peer, but don't prevent them from reconnecting. + Disconnect, + /// Disconnect the peer and prevent them from reconnecting. + Disable, +} + +/// Kinds of errors which can be encountered in the course of LES. +#[derive(Debug)] +pub enum Error { + /// An RLP decoding error. + Rlp(DecoderError), + /// A network error. + Network(NetworkError), + /// Out of buffer. + BufferEmpty, + /// Unrecognized packet code. + UnrecognizedPacket(u8), + /// Unexpected handshake. + UnexpectedHandshake, + /// Peer on wrong network (wrong NetworkId or genesis hash) + WrongNetwork, +} + +impl Error { + /// What level of punishment does this error warrant? + pub fn punishment(&self) -> Punishment { + match *self { + Error::Rlp(_) => Punishment::Disable, + Error::Network(_) => Punishment::None, + Error::BufferEmpty => Punishment::Disable, + Error::UnrecognizedPacket(_) => Punishment::Disconnect, + Error::UnexpectedHandshake => Punishment::Disconnect, + Error::WrongNetwork => Punishment::Disable, + } + } +} + +impl From for Error { + fn from(err: DecoderError) -> Self { + Error::Rlp(err) + } +} + +impl From for Error { + fn from(err: NetworkError) -> Self { + Error::Network(err) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Rlp(ref err) => err.fmt(f), + Error::Network(ref err) => err.fmt(f), + Error::BufferEmpty => write!(f, "Out of buffer"), + Error::UnrecognizedPacket(code) => write!(f, "Unrecognized packet: 0x{:x}", code), + Error::UnexpectedHandshake => write!(f, "Unexpected handshake"), + Error::WrongNetwork => write!(f, "Wrong network"), + } + } +} \ No newline at end of file diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs new file mode 100644 index 000000000..e72ce4bb2 --- /dev/null +++ b/ethcore/light/src/net/mod.rs @@ -0,0 +1,506 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! LES Protocol Version 1 implementation. +//! +//! This uses a "Provider" to answer requests. +//! See https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) + +use io::TimerToken; +use network::{NetworkProtocolHandler, NetworkContext, NetworkError, PeerId}; +use rlp::{RlpStream, Stream, UntrustedRlp, View}; +use util::hash::H256; +use util::RwLock; + +use std::collections::{HashMap, HashSet}; +use std::sync::atomic::AtomicUsize; + +use provider::Provider; +use request::{self, Request}; + +use self::buffer_flow::{Buffer, FlowParams}; +use self::error::{Error, Punishment}; +use self::status::{Status, Capabilities}; + +mod buffer_flow; +mod error; +mod status; + +pub use self::status::Announcement; + +const TIMEOUT: TimerToken = 0; +const TIMEOUT_INTERVAL_MS: u64 = 1000; + +// LPV1 +const PROTOCOL_VERSION: u32 = 1; + +// TODO [rob] make configurable. +const PROTOCOL_ID: [u8; 3] = *b"les"; + +// packet ID definitions. +mod packet { + // the status packet. + pub const STATUS: u8 = 0x00; + + // announcement of new block hashes or capabilities. + pub const ANNOUNCE: u8 = 0x01; + + // request and response for block headers + pub const GET_BLOCK_HEADERS: u8 = 0x02; + pub const BLOCK_HEADERS: u8 = 0x03; + + // request and response for block bodies + pub const GET_BLOCK_BODIES: u8 = 0x04; + pub const BLOCK_BODIES: u8 = 0x05; + + // request and response for transaction receipts. + pub const GET_RECEIPTS: u8 = 0x06; + pub const RECEIPTS: u8 = 0x07; + + // request and response for merkle proofs. + pub const GET_PROOFS: u8 = 0x08; + pub const PROOFS: u8 = 0x09; + + // request and response for contract code. + pub const GET_CONTRACT_CODES: u8 = 0x0a; + pub const CONTRACT_CODES: u8 = 0x0b; + + // relay transactions to peers. + pub const SEND_TRANSACTIONS: u8 = 0x0c; + + // request and response for header proofs in a CHT. + pub const GET_HEADER_PROOFS: u8 = 0x0d; + pub const HEADER_PROOFS: u8 = 0x0e; +} + +// A pending peer: one we've sent our status to but +// may not have received one for. +struct PendingPeer { + sent_head: H256, +} + +// data about each peer. +struct Peer { + local_buffer: Buffer, // their buffer relative to us + remote_buffer: Buffer, // our buffer relative to them + current_asking: HashSet, // pending request ids. + status: Status, + capabilities: Capabilities, + remote_flow: FlowParams, + sent_head: H256, // last head we've given them. +} + +/// This is an implementation of the light ethereum network protocol, abstracted +/// over a `Provider` of data and a p2p network. +/// +/// This is simply designed for request-response purposes. Higher level uses +/// of the protocol, such as synchronization, will function as wrappers around +/// this system. +pub struct LightProtocol { + provider: Box, + genesis_hash: H256, + network_id: status::NetworkId, + pending_peers: RwLock>, + peers: RwLock>, + pending_requests: RwLock>, + capabilities: RwLock, + flow_params: FlowParams, // assumed static and same for every peer. + req_id: AtomicUsize, +} + +impl LightProtocol { + /// Make an announcement of new chain head and capabilities to all peers. + /// The announcement is expected to be valid. + pub fn make_announcement(&self, mut announcement: Announcement, io: &NetworkContext) { + let mut reorgs_map = HashMap::new(); + + // calculate reorg info and send packets + for (peer_id, peer_info) in self.peers.write().iter_mut() { + let reorg_depth = reorgs_map.entry(peer_info.sent_head) + .or_insert_with(|| { + match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) { + Some(depth) => depth, + None => { + // both values will always originate locally -- this means something + // has gone really wrong + debug!(target: "les", "couldn't compute reorganization depth between {:?} and {:?}", + &announcement.head_hash, &peer_info.sent_head); + 0 + } + } + }); + + peer_info.sent_head = announcement.head_hash; + announcement.reorg_depth = *reorg_depth; + + if let Err(e) = io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement)) { + debug!(target: "les", "Error sending to peer {}: {}", peer_id, e); + } + } + } +} + +impl LightProtocol { + // called when a peer connects. + fn on_connect(&self, peer: &PeerId, io: &NetworkContext) { + let peer = *peer; + + match self.send_status(peer, io) { + Ok(pending_peer) => { + self.pending_peers.write().insert(peer, pending_peer); + } + Err(e) => { + trace!(target: "les", "Error while sending status: {}", e); + io.disconnect_peer(peer); + } + } + } + + // called when a peer disconnects. + fn on_disconnect(&self, peer: PeerId) { + // TODO: reassign all requests assigned to this peer. + self.pending_peers.write().remove(&peer); + self.peers.write().remove(&peer); + } + + // send status to a peer. + fn send_status(&self, peer: PeerId, io: &NetworkContext) -> Result { + let chain_info = self.provider.chain_info(); + + // TODO: could update capabilities here. + + let status = Status { + head_td: chain_info.total_difficulty, + head_hash: chain_info.best_block_hash, + head_num: chain_info.best_block_number, + genesis_hash: chain_info.genesis_hash, + protocol_version: PROTOCOL_VERSION, + network_id: self.network_id, + last_head: None, + }; + + let capabilities = self.capabilities.read().clone(); + let status_packet = status::write_handshake(&status, &capabilities, &self.flow_params); + + try!(io.send(peer, packet::STATUS, status_packet)); + + Ok(PendingPeer { + sent_head: chain_info.best_block_hash, + }) + } + + // Handle status message from peer. + fn status(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> { + let pending = match self.pending_peers.write().remove(peer) { + Some(pending) => pending, + None => { + return Err(Error::UnexpectedHandshake); + } + }; + + let (status, capabilities, flow_params) = try!(status::parse_handshake(data)); + + trace!(target: "les", "Connected peer with chain head {:?}", (status.head_hash, status.head_num)); + + if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) { + return Err(Error::WrongNetwork); + } + + self.peers.write().insert(*peer, Peer { + local_buffer: self.flow_params.create_buffer(), + remote_buffer: flow_params.create_buffer(), + current_asking: HashSet::new(), + status: status, + capabilities: capabilities, + remote_flow: flow_params, + sent_head: pending.sent_head, + }); + + Ok(()) + } + + // Handle an announcement. + fn announcement(&self, peer: &PeerId, data: UntrustedRlp) -> Result<(), Error> { + if !self.peers.read().contains_key(peer) { + debug!(target: "les", "Ignoring announcement from unknown peer"); + return Ok(()) + } + + let announcement = try!(status::parse_announcement(data)); + let mut peers = self.peers.write(); + + let peer_info = match peers.get_mut(peer) { + Some(info) => info, + None => return Ok(()), + }; + + // update status. + { + // TODO: punish peer if they've moved backwards. + let status = &mut peer_info.status; + let last_head = status.head_hash; + status.head_hash = announcement.head_hash; + status.head_td = announcement.head_td; + status.head_num = announcement.head_num; + status.last_head = Some((last_head, announcement.reorg_depth)); + } + + // update capabilities. + { + let caps = &mut peer_info.capabilities; + caps.serve_headers = caps.serve_headers || announcement.serve_headers; + caps.serve_state_since = caps.serve_state_since.or(announcement.serve_state_since); + caps.serve_chain_since = caps.serve_chain_since.or(announcement.serve_chain_since); + caps.tx_relay = caps.tx_relay || announcement.tx_relay; + } + + // TODO: notify listeners if new best block. + + Ok(()) + } + + // Handle a request for block headers. + fn get_block_headers(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + const MAX_HEADERS: usize = 512; + + let mut present_buffer = match self.peers.read().get(peer) { + Some(peer) => peer.local_buffer.clone(), + None => { + debug!(target: "les", "Ignoring announcement from unknown peer"); + return Ok(()) + } + }; + + self.flow_params.recharge(&mut present_buffer); + let req_id: u64 = try!(data.val_at(0)); + + let req = request::Headers { + block: { + let rlp = try!(data.at(1)); + (try!(rlp.val_at(0)), try!(rlp.val_at(1))) + }, + max: ::std::cmp::min(MAX_HEADERS, try!(data.val_at(2))), + skip: try!(data.val_at(3)), + reverse: try!(data.val_at(4)), + }; + + let max_cost = self.flow_params.compute_cost(request::Kind::Headers, req.max); + try!(present_buffer.deduct_cost(max_cost)); + + let response = self.provider.block_headers(req); + let actual_cost = self.flow_params.compute_cost(request::Kind::Headers, response.len()); + + let cur_buffer = match self.peers.write().get_mut(peer) { + Some(peer) => { + self.flow_params.recharge(&mut peer.local_buffer); + try!(peer.local_buffer.deduct_cost(actual_cost)); + peer.local_buffer.current() + } + None => { + debug!(target: "les", "peer disconnected during serving of request."); + return Ok(()) + } + }; + + io.respond(packet::BLOCK_HEADERS, { + let mut stream = RlpStream::new_list(response.len() + 2); + stream.append(&req_id).append(&cur_buffer); + + for header in response { + stream.append_raw(&header, 1); + } + + stream.out() + }).map_err(Into::into) + } + + // Receive a response for block headers. + fn block_headers(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Handle a request for block bodies. + fn get_block_bodies(&self, peer: &PeerId, io: &NetworkContext, data: UntrustedRlp) -> Result<(), Error> { + const MAX_BODIES: usize = 256; + + let mut present_buffer = match self.peers.read().get(peer) { + Some(peer) => peer.local_buffer.clone(), + None => { + debug!(target: "les", "Ignoring announcement from unknown peer"); + return Ok(()) + } + }; + + self.flow_params.recharge(&mut present_buffer); + let req_id: u64 = try!(data.val_at(0)); + + let req = request::Bodies { + block_hashes: try!(data.iter().skip(1).take(MAX_BODIES).map(|x| x.as_val()).collect()) + }; + + let max_cost = self.flow_params.compute_cost(request::Kind::Bodies, req.block_hashes.len()); + try!(present_buffer.deduct_cost(max_cost)); + + let response = self.provider.block_bodies(req); + let response_len = response.iter().filter(|x| &x[..] != &::rlp::EMPTY_LIST_RLP).count(); + let actual_cost = self.flow_params.compute_cost(request::Kind::Bodies, response_len); + + let cur_buffer = match self.peers.write().get_mut(peer) { + Some(peer) => { + self.flow_params.recharge(&mut peer.local_buffer); + try!(peer.local_buffer.deduct_cost(actual_cost)); + peer.local_buffer.current() + } + None => { + debug!(target: "les", "peer disconnected during serving of request."); + return Ok(()) + } + }; + + io.respond(packet::BLOCK_BODIES, { + let mut stream = RlpStream::new_list(response.len() + 2); + stream.append(&req_id).append(&cur_buffer); + + for body in response { + stream.append_raw(&body, 1); + } + + stream.out() + }).map_err(Into::into) + } + + // Receive a response for block bodies. + fn block_bodies(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Handle a request for receipts. + fn get_receipts(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Receive a response for receipts. + fn receipts(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Handle a request for proofs. + fn get_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Receive a response for proofs. + fn proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Handle a request for contract code. + fn get_contract_code(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Receive a response for contract code. + fn contract_code(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Handle a request for header proofs + fn get_header_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Receive a response for header proofs + fn header_proofs(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } + + // Receive a set of transactions to relay. + fn relay_transactions(&self, _: &PeerId, _: &NetworkContext, _: UntrustedRlp) -> Result<(), Error> { + unimplemented!() + } +} + +impl NetworkProtocolHandler for LightProtocol { + fn initialize(&self, io: &NetworkContext) { + io.register_timer(TIMEOUT, TIMEOUT_INTERVAL_MS).expect("Error registering sync timer."); + } + + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + let rlp = UntrustedRlp::new(data); + + // handle the packet + let res = match packet_id { + packet::STATUS => self.status(peer, rlp), + packet::ANNOUNCE => self.announcement(peer, rlp), + + packet::GET_BLOCK_HEADERS => self.get_block_headers(peer, io, rlp), + packet::BLOCK_HEADERS => self.block_headers(peer, io, rlp), + + packet::GET_BLOCK_BODIES => self.get_block_bodies(peer, io, rlp), + packet::BLOCK_BODIES => self.block_bodies(peer, io, rlp), + + packet::GET_RECEIPTS => self.get_receipts(peer, io, rlp), + packet::RECEIPTS => self.receipts(peer, io, rlp), + + packet::GET_PROOFS => self.get_proofs(peer, io, rlp), + packet::PROOFS => self.proofs(peer, io, rlp), + + packet::GET_CONTRACT_CODES => self.get_contract_code(peer, io, rlp), + packet::CONTRACT_CODES => self.contract_code(peer, io, rlp), + + packet::GET_HEADER_PROOFS => self.get_header_proofs(peer, io, rlp), + packet::HEADER_PROOFS => self.header_proofs(peer, io, rlp), + + packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp), + + other => { + Err(Error::UnrecognizedPacket(other)) + } + }; + + // if something went wrong, figure out how much to punish the peer. + if let Err(e) = res { + match e.punishment() { + Punishment::None => {} + Punishment::Disconnect => { + debug!(target: "les", "Disconnecting peer {}: {}", peer, e); + io.disconnect_peer(*peer) + } + Punishment::Disable => { + debug!(target: "les", "Disabling peer {}: {}", peer, e); + io.disable_peer(*peer) + } + } + } + } + + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + self.on_connect(peer, io); + } + + fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { + self.on_disconnect(*peer); + } + + fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { + match timer { + TIMEOUT => { + // broadcast transactions to peers. + } + _ => warn!(target: "les", "received timeout on unknown token {}", timer), + } + } +} \ No newline at end of file diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs new file mode 100644 index 000000000..5aaea9f3a --- /dev/null +++ b/ethcore/light/src/net/status.rs @@ -0,0 +1,539 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Peer status and capabilities. + +use rlp::{DecoderError, RlpDecodable, RlpEncodable, RlpStream, Stream, UntrustedRlp, View}; +use util::{H256, U256}; + +use super::buffer_flow::FlowParams; + +// recognized handshake/announcement keys. +// unknown keys are to be skipped, known keys have a defined order. +// their string values are defined in the LES spec. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] +enum Key { + ProtocolVersion, + NetworkId, + HeadTD, + HeadHash, + HeadNum, + GenesisHash, + ServeHeaders, + ServeChainSince, + ServeStateSince, + TxRelay, + BufferLimit, + BufferCostTable, + BufferRechargeRate, +} + +impl Key { + // get the string value of this key. + fn as_str(&self) -> &'static str { + match *self { + Key::ProtocolVersion => "protocolVersion", + Key::NetworkId => "networkId", + Key::HeadTD => "headTd", + Key::HeadHash => "headHash", + Key::HeadNum => "headNum", + Key::GenesisHash => "genesisHash", + Key::ServeHeaders => "serveHeaders", + Key::ServeChainSince => "serveChainSince", + Key::ServeStateSince => "serveStateSince", + Key::TxRelay => "txRelay", + Key::BufferLimit => "flowControl/BL", + Key::BufferCostTable => "flowControl/MRC", + Key::BufferRechargeRate => "flowControl/MRR", + } + } + + // try to parse the key value from a string. + fn from_str(s: &str) -> Option { + match s { + "protocolVersion" => Some(Key::ProtocolVersion), + "networkId" => Some(Key::NetworkId), + "headTd" => Some(Key::HeadTD), + "headHash" => Some(Key::HeadHash), + "headNum" => Some(Key::HeadNum), + "genesisHash" => Some(Key::GenesisHash), + "serveHeaders" => Some(Key::ServeHeaders), + "serveChainSince" => Some(Key::ServeChainSince), + "serveStateSince" => Some(Key::ServeStateSince), + "txRelay" => Some(Key::TxRelay), + "flowControl/BL" => Some(Key::BufferLimit), + "flowControl/MRC" => Some(Key::BufferCostTable), + "flowControl/MRR" => Some(Key::BufferRechargeRate), + _ => None + } + } +} + +/// Network ID structure. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +pub enum NetworkId { + /// ID for the mainnet + Mainnet = 1, + /// ID for the testnet + Testnet = 0, +} + +impl NetworkId { + fn from_raw(raw: u32) -> Option { + match raw { + 0 => Some(NetworkId::Testnet), + 1 => Some(NetworkId::Mainnet), + _ => None, + } + } +} + +// helper for decoding key-value pairs in the handshake or an announcement. +struct Parser<'a> { + pos: usize, + rlp: UntrustedRlp<'a>, +} + +impl<'a> Parser<'a> { + // expect a specific next key, and decode the value. + // error on unexpected key or invalid value. + fn expect(&mut self, key: Key) -> Result { + self.expect_raw(key).and_then(|item| item.as_val()) + } + + // expect a specific next key, and get the value's RLP. + // if the key isn't found, the position isn't advanced. + fn expect_raw(&mut self, key: Key) -> Result, DecoderError> { + let pre_pos = self.pos; + if let Some((k, val)) = try!(self.get_next()) { + if k == key { return Ok(val) } + } + + self.pos = pre_pos; + Err(DecoderError::Custom("Missing expected key")) + } + + // get the next key and value RLP. + fn get_next(&mut self) -> Result)>, DecoderError> { + while self.pos < self.rlp.item_count() { + let pair = try!(self.rlp.at(self.pos)); + let k: String = try!(pair.val_at(0)); + + self.pos += 1; + match Key::from_str(&k) { + Some(key) => return Ok(Some((key , try!(pair.at(1))))), + None => continue, + } + } + + Ok(None) + } +} + +// Helper for encoding a key-value pair +fn encode_pair(key: Key, val: &T) -> Vec { + let mut s = RlpStream::new_list(2); + s.append(&key.as_str()).append(val); + s.out() +} + +// Helper for encoding a flag. +fn encode_flag(key: Key) -> Vec { + let mut s = RlpStream::new_list(2); + s.append(&key.as_str()).append_empty_data(); + s.out() +} + +/// A peer status message. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Status { + /// Protocol version. + pub protocol_version: u32, + /// Network id of this peer. + pub network_id: NetworkId, + /// Total difficulty of the head of the chain. + pub head_td: U256, + /// Hash of the best block. + pub head_hash: H256, + /// Number of the best block. + pub head_num: u64, + /// Genesis hash + pub genesis_hash: H256, + /// Last announced chain head and reorg depth to common ancestor. + pub last_head: Option<(H256, u64)>, +} + +/// Peer capabilities. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Capabilities { + /// Whether this peer can serve headers + pub serve_headers: bool, + /// Earliest block number it can serve block/receipt requests for. + pub serve_chain_since: Option, + /// Earliest block number it can serve state requests for. + pub serve_state_since: Option, + /// Whether it can relay transactions to the eth network. + pub tx_relay: bool, +} + +impl Default for Capabilities { + fn default() -> Self { + Capabilities { + serve_headers: true, + serve_chain_since: None, + serve_state_since: None, + tx_relay: false, + } + } +} + +/// Attempt to parse a handshake message into its three parts: +/// - chain status +/// - serving capabilities +/// - buffer flow parameters +pub fn parse_handshake(rlp: UntrustedRlp) -> Result<(Status, Capabilities, FlowParams), DecoderError> { + let mut parser = Parser { + pos: 0, + rlp: rlp, + }; + + let status = Status { + protocol_version: try!(parser.expect(Key::ProtocolVersion)), + network_id: try!(parser.expect(Key::NetworkId) + .and_then(|id: u32| NetworkId::from_raw(id).ok_or(DecoderError::Custom("Invalid network ID")))), + head_td: try!(parser.expect(Key::HeadTD)), + head_hash: try!(parser.expect(Key::HeadHash)), + head_num: try!(parser.expect(Key::HeadNum)), + genesis_hash: try!(parser.expect(Key::GenesisHash)), + last_head: None, + }; + + let capabilities = Capabilities { + serve_headers: parser.expect_raw(Key::ServeHeaders).is_ok(), + serve_chain_since: parser.expect(Key::ServeChainSince).ok(), + serve_state_since: parser.expect(Key::ServeStateSince).ok(), + tx_relay: parser.expect_raw(Key::TxRelay).is_ok(), + }; + + let flow_params = FlowParams::new( + try!(parser.expect(Key::BufferLimit)), + try!(parser.expect(Key::BufferCostTable)), + try!(parser.expect(Key::BufferRechargeRate)), + ); + + Ok((status, capabilities, flow_params)) +} + +/// Write a handshake, given status, capabilities, and flow parameters. +pub fn write_handshake(status: &Status, capabilities: &Capabilities, flow_params: &FlowParams) -> Vec { + let mut pairs = Vec::new(); + pairs.push(encode_pair(Key::ProtocolVersion, &status.protocol_version)); + pairs.push(encode_pair(Key::NetworkId, &(status.network_id as u32))); + pairs.push(encode_pair(Key::HeadTD, &status.head_td)); + pairs.push(encode_pair(Key::HeadHash, &status.head_hash)); + pairs.push(encode_pair(Key::HeadNum, &status.head_num)); + pairs.push(encode_pair(Key::GenesisHash, &status.genesis_hash)); + + if capabilities.serve_headers { + pairs.push(encode_flag(Key::ServeHeaders)); + } + if let Some(ref serve_chain_since) = capabilities.serve_chain_since { + pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); + } + if let Some(ref serve_state_since) = capabilities.serve_state_since { + pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); + } + if capabilities.tx_relay { + pairs.push(encode_flag(Key::TxRelay)); + } + + pairs.push(encode_pair(Key::BufferLimit, flow_params.limit())); + pairs.push(encode_pair(Key::BufferCostTable, flow_params.cost_table())); + pairs.push(encode_pair(Key::BufferRechargeRate, flow_params.recharge_rate())); + + let mut stream = RlpStream::new_list(pairs.len()); + + for pair in pairs { + stream.append_raw(&pair, 1); + } + + stream.out() +} + +/// An announcement of new chain head or capabilities made by a peer. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Announcement { + /// Hash of the best block. + pub head_hash: H256, + /// Number of the best block. + pub head_num: u64, + /// Head total difficulty + pub head_td: U256, + /// reorg depth to common ancestor of last announced head. + pub reorg_depth: u64, + /// optional new header-serving capability. false means "no change" + pub serve_headers: bool, + /// optional new state-serving capability + pub serve_state_since: Option, + /// optional new chain-serving capability + pub serve_chain_since: Option, + /// optional new transaction-relay capability. false means "no change" + pub tx_relay: bool, + // TODO: changes in buffer flow? +} + +/// Parse an announcement. +pub fn parse_announcement(rlp: UntrustedRlp) -> Result { + let mut last_key = None; + + let mut announcement = Announcement { + head_hash: try!(rlp.val_at(0)), + head_num: try!(rlp.val_at(1)), + head_td: try!(rlp.val_at(2)), + reorg_depth: try!(rlp.val_at(3)), + serve_headers: false, + serve_state_since: None, + serve_chain_since: None, + tx_relay: false, + }; + + let mut parser = Parser { + pos: 4, + rlp: rlp, + }; + + while let Some((key, item)) = try!(parser.get_next()) { + if Some(key) <= last_key { return Err(DecoderError::Custom("Invalid announcement key ordering")) } + last_key = Some(key); + + match key { + Key::ServeHeaders => announcement.serve_headers = true, + Key::ServeStateSince => announcement.serve_state_since = Some(try!(item.as_val())), + Key::ServeChainSince => announcement.serve_chain_since = Some(try!(item.as_val())), + Key::TxRelay => announcement.tx_relay = true, + _ => return Err(DecoderError::Custom("Nonsensical key in announcement")), + } + } + + Ok(announcement) +} + +/// Write an announcement out. +pub fn write_announcement(announcement: &Announcement) -> Vec { + let mut pairs = Vec::new(); + if announcement.serve_headers { + pairs.push(encode_flag(Key::ServeHeaders)); + } + if let Some(ref serve_chain_since) = announcement.serve_chain_since { + pairs.push(encode_pair(Key::ServeChainSince, serve_chain_since)); + } + if let Some(ref serve_state_since) = announcement.serve_state_since { + pairs.push(encode_pair(Key::ServeStateSince, serve_state_since)); + } + if announcement.tx_relay { + pairs.push(encode_flag(Key::TxRelay)); + } + + let mut stream = RlpStream::new_list(4 + pairs.len()); + stream + .append(&announcement.head_hash) + .append(&announcement.head_num) + .append(&announcement.head_td) + .append(&announcement.reorg_depth); + + for item in pairs { + stream.append_raw(&item, 1); + } + + stream.out() +} + +#[cfg(test)] +mod tests { + use super::*; + use super::super::buffer_flow::FlowParams; + use util::{U256, H256, FixedHash}; + use rlp::{RlpStream, Stream ,UntrustedRlp, View}; + + #[test] + fn full_handshake() { + let status = Status { + protocol_version: 1, + network_id: NetworkId::Mainnet, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; + + let capabilities = Capabilities { + serve_headers: true, + serve_chain_since: Some(5), + serve_state_since: Some(8), + tx_relay: true, + }; + + let flow_params = FlowParams::new( + 1_000_000.into(), + Default::default(), + 1000.into(), + ); + + let handshake = write_handshake(&status, &capabilities, &flow_params); + + let (read_status, read_capabilities, read_flow) + = parse_handshake(UntrustedRlp::new(&handshake)).unwrap(); + + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow, flow_params); + } + + #[test] + fn partial_handshake() { + let status = Status { + protocol_version: 1, + network_id: NetworkId::Mainnet, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; + + let capabilities = Capabilities { + serve_headers: false, + serve_chain_since: Some(5), + serve_state_since: None, + tx_relay: true, + }; + + let flow_params = FlowParams::new( + 1_000_000.into(), + Default::default(), + 1000.into(), + ); + + let handshake = write_handshake(&status, &capabilities, &flow_params); + + let (read_status, read_capabilities, read_flow) + = parse_handshake(UntrustedRlp::new(&handshake)).unwrap(); + + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow, flow_params); + } + + #[test] + fn skip_unknown_keys() { + let status = Status { + protocol_version: 1, + network_id: NetworkId::Mainnet, + head_td: U256::default(), + head_hash: H256::default(), + head_num: 10, + genesis_hash: H256::zero(), + last_head: None, + }; + + let capabilities = Capabilities { + serve_headers: false, + serve_chain_since: Some(5), + serve_state_since: None, + tx_relay: true, + }; + + let flow_params = FlowParams::new( + 1_000_000.into(), + Default::default(), + 1000.into(), + ); + + let handshake = write_handshake(&status, &capabilities, &flow_params); + let interleaved = { + let handshake = UntrustedRlp::new(&handshake); + let mut stream = RlpStream::new_list(handshake.item_count() * 3); + + for item in handshake.iter() { + stream.append_raw(item.as_raw(), 1); + let (mut s1, mut s2) = (RlpStream::new_list(2), RlpStream::new_list(2)); + s1.append(&"foo").append_empty_data(); + s2.append(&"bar").append_empty_data(); + stream.append_raw(&s1.out(), 1); + stream.append_raw(&s2.out(), 1); + } + + stream.out() + }; + + let (read_status, read_capabilities, read_flow) + = parse_handshake(UntrustedRlp::new(&interleaved)).unwrap(); + + assert_eq!(read_status, status); + assert_eq!(read_capabilities, capabilities); + assert_eq!(read_flow, flow_params); + } + + #[test] + fn announcement_roundtrip() { + let announcement = Announcement { + head_hash: H256::random(), + head_num: 100_000, + head_td: 1_000_000.into(), + reorg_depth: 4, + serve_headers: false, + serve_state_since: Some(99_000), + serve_chain_since: Some(1), + tx_relay: true, + }; + + let serialized = write_announcement(&announcement); + let read = parse_announcement(UntrustedRlp::new(&serialized)).unwrap(); + + assert_eq!(read, announcement); + } + + #[test] + fn keys_out_of_order() { + use super::{Key, encode_pair, encode_flag}; + + let mut stream = RlpStream::new_list(6); + stream + .append(&H256::zero()) + .append(&10u64) + .append(&100_000u64) + .append(&2u64) + .append_raw(&encode_pair(Key::ServeStateSince, &44u64), 1) + .append_raw(&encode_flag(Key::ServeHeaders), 1); + + let out = stream.drain(); + assert!(parse_announcement(UntrustedRlp::new(&out)).is_err()); + + let mut stream = RlpStream::new_list(6); + stream + .append(&H256::zero()) + .append(&10u64) + .append(&100_000u64) + .append(&2u64) + .append_raw(&encode_flag(Key::ServeHeaders), 1) + .append_raw(&encode_pair(Key::ServeStateSince, &44u64), 1); + + let out = stream.drain(); + assert!(parse_announcement(UntrustedRlp::new(&out)).is_ok()); + } +} \ No newline at end of file diff --git a/ethcore/light/src/provider.rs b/ethcore/light/src/provider.rs new file mode 100644 index 000000000..b1625f95f --- /dev/null +++ b/ethcore/light/src/provider.rs @@ -0,0 +1,71 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! A provider for the LES protocol. This is typically a full node, who can +//! give as much data as necessary to its peers. + +use ethcore::transaction::SignedTransaction; +use ethcore::blockchain_info::BlockChainInfo; +use util::{Bytes, H256}; + +use request; + +/// Defines the operations that a provider for `LES` must fulfill. +/// +/// These are defined at [1], but may be subject to change. +/// Requests which can't be fulfilled should return an empty RLP list. +/// +/// [1]: https://github.com/ethcore/parity/wiki/Light-Ethereum-Subprotocol-(LES) +pub trait Provider: Send + Sync { + /// Provide current blockchain info. + fn chain_info(&self) -> BlockChainInfo; + + /// Find the depth of a common ancestor between two blocks. + fn reorg_depth(&self, a: &H256, b: &H256) -> Option; + + /// Earliest state. + fn earliest_state(&self) -> Option; + + /// Provide a list of headers starting at the requested block, + /// possibly in reverse and skipping `skip` at a time. + /// + /// The returned vector may have any length in the range [0, `max`], but the + /// results within must adhere to the `skip` and `reverse` parameters. + fn block_headers(&self, req: request::Headers) -> Vec; + + /// Provide as many as possible of the requested blocks (minus the headers) encoded + /// in RLP format. + fn block_bodies(&self, req: request::Bodies) -> Vec; + + /// Provide the receipts as many as possible of the requested blocks. + /// Returns a vector of RLP-encoded lists of receipts. + fn receipts(&self, req: request::Receipts) -> Vec; + + /// Provide a set of merkle proofs, as requested. Each request is a + /// block hash and request parameters. + /// + /// Returns a vector to RLP-encoded lists satisfying the requests. + fn proofs(&self, req: request::StateProofs) -> Vec; + + /// Provide contract code for the specified (block_hash, account_hash) pairs. + fn code(&self, req: request::ContractCodes) -> Vec; + + /// Provide header proofs from the Canonical Hash Tries. + fn header_proofs(&self, req: request::HeaderProofs) -> Vec; + + /// Provide pending transactions. + fn pending_transactions(&self) -> Vec; +} \ No newline at end of file diff --git a/ethcore/light/src/request.rs b/ethcore/light/src/request.rs new file mode 100644 index 000000000..f043f0f25 --- /dev/null +++ b/ethcore/light/src/request.rs @@ -0,0 +1,145 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! LES request types. + +// TODO: make IPC compatible. + +use util::H256; + +/// A request for block headers. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Headers { + /// Block information for the request being made. + pub block: (u64, H256), + /// The maximum amount of headers which can be returned. + pub max: usize, + /// The amount of headers to skip between each response entry. + pub skip: usize, + /// Whether the headers should proceed in falling number from the initial block. + pub reverse: bool, +} + +/// A request for specific block bodies. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Bodies { + /// Hashes which bodies are being requested for. + pub block_hashes: Vec +} + +/// A request for transaction receipts. +/// +/// This request is answered with a list of transaction receipts for each block +/// requested. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Receipts { + /// Block hashes to return receipts for. + pub block_hashes: Vec, +} + +/// A request for a state proof +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StateProof { + /// Block hash to query state from. + pub block: H256, + /// Key of the state trie -- corresponds to account hash. + pub key1: H256, + /// Key in that account's storage trie; if empty, then the account RLP should be + /// returned. + pub key2: Option, + /// if greater than zero, trie nodes beyond this level may be omitted. + pub from_level: u32, // could even safely be u8; trie w/ 32-byte key can be at most 64-levels deep. +} + +/// A request for state proofs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StateProofs { + /// All the proof requests. + pub requests: Vec, +} + +/// A request for contract code. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ContractCodes { + /// Block hash and account key (== sha3(address)) pairs to fetch code for. + pub code_requests: Vec<(H256, H256)>, +} + +/// A request for a header proof from the Canonical Hash Trie. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HeaderProof { + /// Number of the CHT. + pub cht_number: u64, + /// Block number requested. + pub block_number: u64, + /// If greater than zero, trie nodes beyond this level may be omitted. + pub from_level: u32, +} + +/// A request for header proofs from the CHT. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HeaderProofs { + /// All the proof requests. + pub requests: Vec, +} + +/// Kinds of requests. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Kind { + /// Requesting headers. + Headers, + /// Requesting block bodies. + Bodies, + /// Requesting transaction receipts. + Receipts, + /// Requesting proofs of state trie nodes. + StateProofs, + /// Requesting contract code by hash. + Codes, + /// Requesting header proofs (from the CHT). + HeaderProofs, +} + +/// Encompasses all possible types of requests in a single structure. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Request { + /// Requesting headers. + Headers(Headers), + /// Requesting block bodies. + Bodies(Bodies), + /// Requesting transaction receipts. + Receipts(Receipts), + /// Requesting state proofs. + StateProofs(StateProofs), + /// Requesting contract codes. + Codes(ContractCodes), + /// Requesting header proofs. + HeaderProofs(HeaderProofs), +} + +impl Request { + /// Get the kind of request this is. + pub fn kind(&self) -> Kind { + match *self { + Request::Headers(_) => Kind::Headers, + Request::Bodies(_) => Kind::Bodies, + Request::Receipts(_) => Kind::Receipts, + Request::StateProofs(_) => Kind::StateProofs, + Request::Codes(_) => Kind::Codes, + Request::HeaderProofs(_) => Kind::HeaderProofs, + } + } +} \ No newline at end of file diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index ecaefa4c3..be473237c 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -131,10 +131,10 @@ "0x807640a13483f8ac783c557fcdf27be11ea4ac7a" ], "eip150Transition": "0x259518", - "eip155Transition": 2642462, - "eip160Transition": 2642462, - "eip161abcTransition": 2642462, - "eip161dTransition": 2642462 + "eip155Transition": "0x7fffffffffffffff", + "eip160Transition": "0x7fffffffffffffff", + "eip161abcTransition": "0x7fffffffffffffff", + "eip161dTransition": "0x7fffffffffffffff" } } }, @@ -176,7 +176,13 @@ "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303", "enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303", "enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303", - "enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303" + "enode://4cd540b2c3292e17cff39922e864094bf8b0741fcc8c5dcea14957e389d7944c70278d872902e3d0345927f621547efa659013c400865485ab4bfa0c6596936f@138.201.144.135:30303", + + "enode://89d5dc2a81e574c19d0465f497c1af96732d1b61a41de89c2a37f35707689ac416529fae1038809852b235c2d30fd325abdc57c122feeefbeaaf802cc7e9580d@45.55.33.62:30303", + "enode://605e04a43b1156966b3a3b66b980c87b7f18522f7f712035f84576016be909a2798a438b2b17b1a8c58db314d88539a77419ca4be36148c086900fba487c9d39@188.166.255.12:30303", + "enode://016b20125f447a3b203a3cae953b2ede8ffe51290c071e7599294be84317635730c397b8ff74404d6be412d539ee5bb5c3c700618723d3b53958c92bd33eaa82@159.203.210.80:30303", + "enode://01f76fa0561eca2b9a7e224378dd854278735f1449793c46ad0c4e79e8775d080c21dcc455be391e90a98153c3b05dcc8935c8440de7b56fe6d67251e33f4e3c@10.6.6.117:30303", + "enode://fe11ef89fc5ac9da358fc160857855f25bbf9e332c79b9ca7089330c02b728b2349988c6062f10982041702110745e203d26975a6b34bcc97144f9fe439034e8@10.1.72.117:30303" ], "accounts": { "0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index 853333e7d..9028c4801 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit 853333e7da312775fb8f32f2c2771b8578cd0d79 +Subproject commit 9028c4801fd39fbb71a9796979182549a24e81c8 diff --git a/ethcore/src/account_provider.rs b/ethcore/src/account_provider.rs index 064c3e935..e906aefe9 100644 --- a/ethcore/src/account_provider.rs +++ b/ethcore/src/account_provider.rs @@ -95,6 +95,7 @@ impl KeyDirectory for NullDir { struct AddressBook { path: PathBuf, cache: HashMap, + transient: bool, } impl AddressBook { @@ -106,11 +107,18 @@ impl AddressBook { let mut r = AddressBook { path: path, cache: HashMap::new(), + transient: false, }; r.revert(); r } + pub fn transient() -> Self { + let mut book = AddressBook::new(Default::default()); + book.transient = true; + book + } + pub fn get(&self) -> HashMap { self.cache.clone() } @@ -134,6 +142,7 @@ impl AddressBook { } fn revert(&mut self) { + if self.transient { return; } trace!(target: "addressbook", "revert"); let _ = fs::File::open(self.path.clone()) .map_err(|e| trace!(target: "addressbook", "Couldn't open address book: {}", e)) @@ -144,6 +153,7 @@ impl AddressBook { } fn save(&mut self) { + if self.transient { return; } trace!(target: "addressbook", "save"); let _ = fs::File::create(self.path.clone()) .map_err(|e| warn!(target: "addressbook", "Couldn't open address book for writing: {}", e)) @@ -175,7 +185,7 @@ impl AccountProvider { pub fn transient_provider() -> Self { AccountProvider { unlocked: Mutex::new(HashMap::new()), - address_book: Mutex::new(AddressBook::new(Default::default())), + address_book: Mutex::new(AddressBook::transient()), sstore: Box::new(EthStore::open(Box::new(NullDir::default())) .expect("NullDir load always succeeds; qed")) } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4f7ba8821..1c5be7509 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1232,8 +1232,7 @@ impl BlockChainClient for Client { fn uncle_extra_info(&self, id: UncleID) -> Option> { self.uncle(id) - .map(|block| BlockView::new(&block).header()) - .map(|header| self.engine.extra_info(&header)) + .map(|header| self.engine.extra_info(&decode(&header))) } } diff --git a/ethcore/src/json_tests/transaction.rs b/ethcore/src/json_tests/transaction.rs index dd5a4ef14..438852124 100644 --- a/ethcore/src/json_tests/transaction.rs +++ b/ethcore/src/json_tests/transaction.rs @@ -26,8 +26,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { let old_schedule = evm::Schedule::new_frontier(); let new_schedule = evm::Schedule::new_homestead(); for (name, test) in tests.into_iter() { - let mut fail = false; - let mut fail_unless = |cond: bool| if !cond && !fail { failed.push(name.clone()); println!("Transaction failed: {:?}", name); fail = true }; + let mut fail_unless = |cond: bool, title: &str| if !cond { failed.push(name.clone()); println!("Transaction failed: {:?}: {:?}", name, title); }; let number: Option = test.block_number.map(Into::into); let schedule = match number { @@ -35,7 +34,7 @@ fn do_json_test(json_data: &[u8]) -> Vec { Some(x) if x < 1_150_000 => &old_schedule, Some(_) => &new_schedule }; - let allow_network_id_of_one = number.map_or(false, |n| n > 2600000); + let allow_network_id_of_one = number.map_or(false, |n| n >= 3_500_000); let rlp: Vec = test.rlp.into(); let res = UntrustedRlp::new(&rlp) @@ -43,26 +42,26 @@ fn do_json_test(json_data: &[u8]) -> Vec { .map_err(From::from) .and_then(|t: SignedTransaction| t.validate(schedule, schedule.have_delegate_call, allow_network_id_of_one)); - fail_unless(test.transaction.is_none() == res.is_err()); + fail_unless(test.transaction.is_none() == res.is_err(), "Validity different"); if let (Some(tx), Some(sender)) = (test.transaction, test.sender) { let t = res.unwrap(); - fail_unless(t.sender().unwrap() == sender.into()); + fail_unless(t.sender().unwrap() == sender.into(), "sender mismatch"); let is_acceptable_network_id = match t.network_id() { None => true, Some(1) if allow_network_id_of_one => true, _ => false, }; - fail_unless(is_acceptable_network_id); + fail_unless(is_acceptable_network_id, "Network ID unacceptable"); let data: Vec = tx.data.into(); - fail_unless(t.data == data); - fail_unless(t.gas_price == tx.gas_price.into()); - fail_unless(t.nonce == tx.nonce.into()); - fail_unless(t.value == tx.value.into()); + fail_unless(t.data == data, "data mismatch"); + fail_unless(t.gas_price == tx.gas_price.into(), "gas_price mismatch"); + fail_unless(t.nonce == tx.nonce.into(), "nonce mismatch"); + fail_unless(t.value == tx.value.into(), "value mismatch"); let to: Option = tx.to.into(); let to: Option
= to.map(Into::into); match t.action { - Action::Call(dest) => fail_unless(Some(dest) == to), - Action::Create => fail_unless(None == to), + Action::Call(dest) => fail_unless(Some(dest) == to, "call/destination mismatch"), + Action::Create => fail_unless(None == to, "create mismatch"), } } } @@ -80,3 +79,7 @@ declare_test!{TransactionTests_Homestead_ttTransactionTest, "TransactionTests/Ho declare_test!{heavy => TransactionTests_Homestead_tt10mbDataField, "TransactionTests/Homestead/tt10mbDataField"} declare_test!{TransactionTests_Homestead_ttWrongRLPTransaction, "TransactionTests/Homestead/ttWrongRLPTransaction"} declare_test!{TransactionTests_RandomTests_tr201506052141PYTHON, "TransactionTests/RandomTests/tr201506052141PYTHON"} +declare_test!{TransactionTests_Homestead_ttTransactionTestEip155VitaliksTests, "TransactionTests/Homestead/ttTransactionTestEip155VitaliksTests"} +declare_test!{TransactionTests_EIP155_ttTransactionTest, "TransactionTests/EIP155/ttTransactionTest"} +declare_test!{TransactionTests_EIP155_ttTransactionTestEip155VitaliksTests, "TransactionTests/EIP155/ttTransactionTestEip155VitaliksTests"} +declare_test!{TransactionTests_EIP155_ttTransactionTestVRule, "TransactionTests/EIP155/ttTransactionTestVRule"} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index cb3ca29e1..c7c92a6f9 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -137,6 +137,7 @@ pub mod miner; pub mod snapshot; pub mod action_params; pub mod db; +pub mod verification; #[macro_use] pub mod evm; mod cache_manager; @@ -150,7 +151,6 @@ mod account_db; mod builtin; mod executive; mod externalities; -mod verification; mod blockchain; mod types; mod factory; diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index afee32f94..0882b688c 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -89,7 +89,7 @@ impl From for PodAccount { let key: U256 = key.into(); let value: U256 = value.into(); (H256::from(key), H256::from(value)) - }).collect() + }).collect(), } } } @@ -99,8 +99,12 @@ impl From for PodAccount { PodAccount { balance: a.balance.map_or_else(U256::zero, Into::into), nonce: a.nonce.map_or_else(U256::zero, Into::into), - code: a.code.map(Into::into).or_else(|| Some(Vec::new())), - storage: BTreeMap::new() + code: Some(a.code.map_or_else(Vec::new, Into::into)), + storage: a.storage.map_or_else(BTreeMap::new, |s| s.into_iter().map(|(key, value)| { + let key: U256 = key.into(); + let value: U256 = value.into(); + (H256::from(key), H256::from(value)) + }).collect()), } } } @@ -112,7 +116,7 @@ impl fmt::Display for PodAccount { self.nonce, self.code.as_ref().map_or(0, |c| c.len()), self.code.as_ref().map_or_else(H256::new, |c| c.sha3()), - self.storage.len() + self.storage.len(), ) } } diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index d634057dc..d417695f0 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -45,6 +45,8 @@ pub enum Error { MissingCode(Vec), /// Unrecognized code encoding. UnrecognizedCodeState(u8), + /// Restoration aborted. + RestorationAborted, /// Trie error. Trie(TrieError), /// Decoder error. @@ -67,6 +69,7 @@ impl fmt::Display for Error { a pruned database. Please re-run with the --pruning archive flag."), Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()), Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state), + Error::RestorationAborted => write!(f, "Snapshot restoration aborted."), Error::Io(ref err) => err.fmt(f), Error::Decoder(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f), diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 22c44ba3b..f4d791593 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -407,30 +407,28 @@ impl StateRebuilder { } /// Feed an uncompressed state chunk into the rebuilder. - pub fn feed(&mut self, chunk: &[u8]) -> Result<(), ::error::Error> { + pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> { let rlp = UntrustedRlp::new(chunk); let empty_rlp = StateAccount::new_basic(U256::zero(), U256::zero()).rlp(); - let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect(); let mut pairs = Vec::with_capacity(rlp.item_count()); // initialize the pairs vector with empty values so we have slots to write into. pairs.resize(rlp.item_count(), (H256::new(), Vec::new())); - let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1; + let status = try!(rebuild_accounts( + self.db.as_hashdb_mut(), + rlp, + &mut pairs, + &self.code_map, + flag + )); - // new code contained within this chunk. - let mut chunk_code = HashMap::new(); - - for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) { - let code_map = &self.code_map; - let status = try!(rebuild_accounts(self.db.as_hashdb_mut(), account_chunk, out_pairs_chunk, code_map)); - chunk_code.extend(status.new_code); - for (addr_hash, code_hash) in status.missing_code { - self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash); - } + for (addr_hash, code_hash) in status.missing_code { + self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash); } + // patch up all missing code. must be done after collecting all new missing code entries. - for (code_hash, code) in chunk_code { + for (code_hash, code) in status.new_code { for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) { let mut db = AccountDBMut::from_hash(self.db.as_hashdb_mut(), addr_hash); db.emplace(code_hash, DBValue::from_slice(&code)); @@ -450,6 +448,8 @@ impl StateRebuilder { }; for (hash, thin_rlp) in pairs { + if !flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + if &thin_rlp[..] != &empty_rlp[..] { self.bloom.set(&*hash); } @@ -487,17 +487,18 @@ struct RebuiltStatus { } // rebuild a set of accounts and their storage. -// returns +// returns a status detailing newly-loaded code and accounts missing code. fn rebuild_accounts( db: &mut HashDB, - account_chunk: &[&[u8]], + account_fat_rlps: UntrustedRlp, out_chunk: &mut [(H256, Bytes)], - code_map: &HashMap + code_map: &HashMap, + abort_flag: &AtomicBool ) -> Result { let mut status = RebuiltStatus::default(); - for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) { - let account_rlp = UntrustedRlp::new(account_pair); + for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk) { + if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } let hash: H256 = try!(account_rlp.val_at(0)); let fat_rlp = try!(account_rlp.at(1)); @@ -580,7 +581,7 @@ impl BlockRebuilder { /// Feed the rebuilder an uncompressed block chunk. /// Returns the number of blocks fed or any errors. - pub fn feed(&mut self, chunk: &[u8], engine: &Engine) -> Result { + pub fn feed(&mut self, chunk: &[u8], engine: &Engine, abort_flag: &AtomicBool) -> Result { use basic_types::Seal::With; use util::U256; use util::triehash::ordered_trie_root; @@ -601,6 +602,8 @@ impl BlockRebuilder { let parent_total_difficulty = try!(rlp.val_at::(2)); for idx in 3..item_count { + if !abort_flag.load(Ordering::SeqCst) { return Err(Error::RestorationAborted.into()) } + let pair = try!(rlp.at(idx)); let abridged_rlp = try!(pair.at(0)).as_raw().to_owned(); let abridged_block = AbridgedBlock::from_raw(abridged_rlp); diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 16f7c6ec6..c0d34a6a9 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -118,12 +118,12 @@ impl Restoration { }) } - // feeds a state chunk - fn feed_state(&mut self, hash: H256, chunk: &[u8]) -> Result<(), Error> { + // feeds a state chunk, aborts early if `flag` becomes false. + fn feed_state(&mut self, hash: H256, chunk: &[u8], flag: &AtomicBool) -> Result<(), Error> { if self.state_chunks_left.remove(&hash) { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); - try!(self.state.feed(&self.snappy_buffer[..len])); + try!(self.state.feed(&self.snappy_buffer[..len], flag)); if let Some(ref mut writer) = self.writer.as_mut() { try!(writer.write_state_chunk(hash, chunk)); @@ -134,11 +134,11 @@ impl Restoration { } // feeds a block chunk - fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine) -> Result<(), Error> { + fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine, flag: &AtomicBool) -> Result<(), Error> { if self.block_chunks_left.remove(&hash) { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); - try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); + try!(self.blocks.feed(&self.snappy_buffer[..len], engine, flag)); if let Some(ref mut writer) = self.writer.as_mut() { try!(writer.write_block_chunk(hash, chunk)); } @@ -224,6 +224,7 @@ pub struct Service { db_restore: Arc, progress: super::Progress, taking_snapshot: AtomicBool, + restoring_snapshot: AtomicBool, } impl Service { @@ -244,6 +245,7 @@ impl Service { db_restore: params.db_restore, progress: Default::default(), taking_snapshot: AtomicBool::new(false), + restoring_snapshot: AtomicBool::new(false), }; // create the root snapshot dir if it doesn't exist. @@ -436,6 +438,8 @@ impl Service { state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, }; + + self.restoring_snapshot.store(true, Ordering::SeqCst); Ok(()) } @@ -490,8 +494,8 @@ impl Service { }; (match is_state { - true => rest.feed_state(hash, chunk), - false => rest.feed_blocks(hash, chunk, &*self.engine), + true => rest.feed_state(hash, chunk, &self.restoring_snapshot), + false => rest.feed_blocks(hash, chunk, &*self.engine, &self.restoring_snapshot), }.map(|_| rest.is_done()), rest.db.clone()) }; @@ -573,6 +577,7 @@ impl SnapshotService for Service { } fn abort_restore(&self) { + self.restoring_snapshot.store(false, Ordering::SeqCst); *self.restoration.lock() = None; *self.status.lock() = RestorationStatus::Inactive; } diff --git a/ethcore/src/snapshot/tests/blocks.rs b/ethcore/src/snapshot/tests/blocks.rs index 12efcda77..18637bad1 100644 --- a/ethcore/src/snapshot/tests/blocks.rs +++ b/ethcore/src/snapshot/tests/blocks.rs @@ -17,10 +17,11 @@ //! Block chunker and rebuilder tests. use devtools::RandomTempPath; +use error::Error; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; use blockchain::BlockChain; -use snapshot::{chunk_blocks, BlockRebuilder, Progress}; +use snapshot::{chunk_blocks, BlockRebuilder, Error as SnapshotError, Progress}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use util::{Mutex, snappy}; @@ -28,6 +29,7 @@ use util::kvdb::{Database, DatabaseConfig}; use std::collections::HashMap; use std::sync::Arc; +use std::sync::atomic::AtomicBool; fn chunk_and_restore(amount: u64) { let mut canon_chain = ChainGenerator::default(); @@ -75,10 +77,11 @@ fn chunk_and_restore(amount: u64) { let mut rebuilder = BlockRebuilder::new(new_chain, new_db.clone(), &manifest).unwrap(); let reader = PackedReader::new(&snapshot_path).unwrap().unwrap(); let engine = ::engines::NullEngine::new(Default::default(), Default::default()); + let flag = AtomicBool::new(true); for chunk_hash in &reader.manifest().block_hashes { let compressed = reader.chunk(*chunk_hash).unwrap(); let chunk = snappy::decompress(&compressed).unwrap(); - rebuilder.feed(&chunk, &engine).unwrap(); + rebuilder.feed(&chunk, &engine, &flag).unwrap(); } rebuilder.finalize(HashMap::new()).unwrap(); @@ -93,3 +96,46 @@ fn chunk_and_restore_500() { chunk_and_restore(500) } #[test] fn chunk_and_restore_40k() { chunk_and_restore(40000) } + +#[test] +fn checks_flag() { + use ::rlp::{RlpStream, Stream}; + use util::H256; + + let mut stream = RlpStream::new_list(5); + + stream.append(&100u64) + .append(&H256::default()) + .append(&(!0u64)); + + stream.append_empty_data().append_empty_data(); + + let genesis = { + let mut canon_chain = ChainGenerator::default(); + let mut finalizer = BlockFinalizer::default(); + canon_chain.generate(&mut finalizer).unwrap() + }; + + let chunk = stream.out(); + let path = RandomTempPath::create_dir(); + + let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + let db = Arc::new(Database::open(&db_cfg, path.as_str()).unwrap()); + let chain = BlockChain::new(Default::default(), &genesis, db.clone()); + let engine = ::engines::NullEngine::new(Default::default(), Default::default()); + + let manifest = ::snapshot::ManifestData { + state_hashes: Vec::new(), + block_hashes: Vec::new(), + state_root: ::util::sha3::SHA3_NULL_RLP, + block_number: 102, + block_hash: H256::default(), + }; + + let mut rebuilder = BlockRebuilder::new(chain, db.clone(), &manifest).unwrap(); + + match rebuilder.feed(&chunk, &engine, &AtomicBool::new(false)) { + Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {} + _ => panic!("Wrong result on abort flag set") + } +} \ No newline at end of file diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index e1d4df5f9..05537fa96 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -16,10 +16,12 @@ //! State snapshotting tests. -use snapshot::{chunk_state, Progress, StateRebuilder}; +use snapshot::{chunk_state, Error as SnapshotError, Progress, StateRebuilder}; use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}; use super::helpers::{compare_dbs, StateProducer}; +use error::Error; + use rand::{XorShiftRng, SeedableRng}; use util::hash::H256; use util::journaldb::{self, Algorithm}; @@ -29,6 +31,7 @@ use util::Mutex; use devtools::RandomTempPath; use std::sync::Arc; +use std::sync::atomic::AtomicBool; #[test] fn snap_and_restore() { @@ -65,11 +68,13 @@ fn snap_and_restore() { let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive); let reader = PackedReader::new(&snap_file).unwrap().unwrap(); + let flag = AtomicBool::new(true); + for chunk_hash in &reader.manifest().state_hashes { let raw = reader.chunk(*chunk_hash).unwrap(); let chunk = ::util::snappy::decompress(&raw).unwrap(); - rebuilder.feed(&chunk).unwrap(); + rebuilder.feed(&chunk, &flag).unwrap(); } assert_eq!(rebuilder.state_root(), state_root); @@ -82,3 +87,52 @@ fn snap_and_restore() { compare_dbs(&old_db, new_db.as_hashdb()); } + +#[test] +fn checks_flag() { + let mut producer = StateProducer::new(); + let mut rng = XorShiftRng::from_seed([5, 6, 7, 8]); + let mut old_db = MemoryDB::new(); + let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + for _ in 0..10 { + producer.tick(&mut rng, &mut old_db); + } + + let snap_dir = RandomTempPath::create_dir(); + let mut snap_file = snap_dir.as_path().to_owned(); + snap_file.push("SNAP"); + + let state_root = producer.state_root(); + let writer = Mutex::new(PackedWriter::new(&snap_file).unwrap()); + + let state_hashes = chunk_state(&old_db, &state_root, &writer, &Progress::default()).unwrap(); + + writer.into_inner().finish(::snapshot::ManifestData { + state_hashes: state_hashes, + block_hashes: Vec::new(), + state_root: state_root, + block_number: 0, + block_hash: H256::default(), + }).unwrap(); + + let mut db_path = snap_dir.as_path().to_owned(); + db_path.push("db"); + { + let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); + let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::Archive); + let reader = PackedReader::new(&snap_file).unwrap().unwrap(); + + let flag = AtomicBool::new(false); + + for chunk_hash in &reader.manifest().state_hashes { + let raw = reader.chunk(*chunk_hash).unwrap(); + let chunk = ::util::snappy::decompress(&raw).unwrap(); + + match rebuilder.feed(&chunk, &flag) { + Err(Error::Snapshot(SnapshotError::RestorationAborted)) => {}, + _ => panic!("unexpected result when feeding with flag off"), + } + } + } +} \ No newline at end of file diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index b498cbd41..20e421999 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -163,7 +163,7 @@ impl Spec { /// Get the configured Network ID. pub fn network_id(&self) -> usize { self.params.network_id } - /// Get the configured Network ID. + /// Get the configured subprotocol name. pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() } /// Get the configured network fork block. diff --git a/ethcore/src/types/mod.rs.in b/ethcore/src/types/mod.rs.in index 1d2cdb3c0..6ef67009a 100644 --- a/ethcore/src/types/mod.rs.in +++ b/ethcore/src/types/mod.rs.in @@ -33,4 +33,4 @@ pub mod transaction_import; pub mod block_import_error; pub mod restoration_status; pub mod snapshot_manifest; -pub mod mode; \ No newline at end of file +pub mod mode; diff --git a/ethcore/src/verification/canon_verifier.rs b/ethcore/src/verification/canon_verifier.rs index cc6bc448a..b5b01279e 100644 --- a/ethcore/src/verification/canon_verifier.rs +++ b/ethcore/src/verification/canon_verifier.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Canonical verifier. + use blockchain::BlockProvider; use engines::Engine; use error::Error; @@ -21,6 +23,7 @@ use header::Header; use super::Verifier; use super::verification; +/// A canonial verifier -- this does full verification. pub struct CanonVerifier; impl Verifier for CanonVerifier { diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 239c88597..55663052b 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Block verification utilities. + pub mod verification; pub mod verifier; pub mod queue; @@ -44,6 +46,7 @@ impl Default for VerifierType { } } +/// Create a new verifier based on type. pub fn new(v: VerifierType) -> Box { match v { VerifierType::Canon | VerifierType::CanonNoSeal => Box::new(CanonVerifier), diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index fb798be46..7db688a85 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! No-op verifier. + use blockchain::BlockProvider; use engines::Engine; use error::Error; use header::Header; use super::Verifier; +/// A no-op verifier -- this will verify everything it's given immediately. #[allow(dead_code)] pub struct NoopVerifier; diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index bb9f042ae..47b2e16de 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -/// Block and transaction verification functions -/// -/// Block verification is done in 3 steps -/// 1. Quick verification upon adding to the block queue -/// 2. Signatures verification done in the queue. -/// 3. Final verification against the blockchain done before enactment. +//! Block and transaction verification functions +//! +//! Block verification is done in 3 steps +//! 1. Quick verification upon adding to the block queue +//! 2. Signatures verification done in the queue. +//! 3. Final verification against the blockchain done before enactment. use util::*; use engines::Engine; diff --git a/ethcore/src/verification/verifier.rs b/ethcore/src/verification/verifier.rs index 7f57407f7..05d488f95 100644 --- a/ethcore/src/verification/verifier.rs +++ b/ethcore/src/verification/verifier.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! A generic verifier trait. + use blockchain::BlockProvider; use engines::Engine; use error::Error; @@ -21,6 +23,8 @@ use header::Header; /// Should be used to verify blocks. pub trait Verifier: Send + Sync { + /// Verify a block relative to its parent and uncles. fn verify_block_family(&self, header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error>; + /// Do a final verification check for an enacted header vs its expected counterpart. fn verify_block_final(&self, expected: &Header, got: &Header) -> Result<(), Error>; } diff --git a/js/.gitignore b/js/.gitignore index acb73a82a..c1e496d91 100644 --- a/js/.gitignore +++ b/js/.gitignore @@ -6,3 +6,4 @@ build .dist .happypack .npmjs +.eslintcache diff --git a/js/README.md b/js/README.md index 41252bc91..5ed26e0cf 100644 --- a/js/README.md +++ b/js/README.md @@ -7,6 +7,6 @@ JavaScript APIs and UIs for Parity. 0. Install [Node](https://nodejs.org/) if not already available 0. Change to the `js` directory inside `parity/` 0. Install the npm modules via `npm install` -0. Parity should be run with `parity --signer-no-validation [...options]` (where `options` can be `--chain testnet`) +0. Parity should be run with `parity --ui-no-validation [...options]` (where `options` can be `--chain testnet`) 0. Start the development environment via `npm start` 0. Connect to the [UI](http://localhost:3000) diff --git a/js/package.json b/js/package.json index 7a7748ad6..137cbd642 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "0.2.21", + "version": "0.2.37", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", @@ -39,9 +39,11 @@ "clean": "rm -rf ./build ./coverage", "coveralls": "npm run testCoverage && coveralls < coverage/lcov.info", "lint": "eslint --ignore-path .gitignore ./src/", + "lint:cached": "eslint --cache --ignore-path .gitignore ./src/", "test": "mocha 'src/**/*.spec.js'", "test:coverage": "istanbul cover _mocha -- 'src/**/*.spec.js'", - "test:e2e": "mocha 'src/**/*.e2e.js'" + "test:e2e": "mocha 'src/**/*.e2e.js'", + "prepush": "npm run lint:cached" }, "devDependencies": { "babel-cli": "^6.10.1", @@ -84,6 +86,7 @@ "happypack": "^2.2.1", "history": "^2.0.0", "html-loader": "^0.4.4", + "husky": "^0.11.9", "ignore-styles": "2.0.0", "image-webpack-loader": "^1.8.0", "istanbul": "^1.0.0-alpha.2", @@ -97,6 +100,7 @@ "postcss-loader": "^0.8.1", "postcss-nested": "^1.0.0", "postcss-simple-vars": "^3.0.0", + "raw-loader": "^0.5.1", "react-addons-test-utils": "^15.3.0", "react-copy-to-clipboard": "^4.2.3", "react-hot-loader": "^1.3.0", @@ -115,9 +119,11 @@ "dependencies": { "bignumber.js": "^2.3.0", "blockies": "0.0.2", + "brace": "^0.9.0", "bytes": "^2.4.0", "chart.js": "^2.3.0", "es6-promise": "^3.2.1", + "ethereumjs-tx": "^1.1.2", "file-saver": "^1.3.3", "format-json": "^1.0.3", "format-number": "^2.0.1", @@ -134,9 +140,11 @@ "moment": "^2.14.1", "qs": "^6.3.0", "react": "^15.2.1", + "react-ace": "^4.0.0", "react-addons-css-transition-group": "^15.2.1", "react-chartjs-2": "^1.5.0", "react-dom": "^15.2.1", + "react-dropzone": "^3.7.3", "react-redux": "^4.4.5", "react-router": "^2.6.1", "react-router-redux": "^4.0.5", @@ -147,10 +155,14 @@ "redux-actions": "^0.10.1", "redux-thunk": "^2.1.0", "rlp": "^2.0.0", + "scryptsy": "^2.0.0", + "solc": "ngotchac/solc-js", "store": "^1.3.20", "utf8": "^2.1.1", + "valid-url": "^1.0.9", "validator": "^5.7.0", "web3": "^0.17.0-beta", - "whatwg-fetch": "^1.0.0" + "whatwg-fetch": "^1.0.0", + "worker-loader": "^0.7.1" } } diff --git a/js/scripts/release.sh b/js/scripts/release.sh index fd95e00b8..5e631cf98 100755 --- a/js/scripts/release.sh +++ b/js/scripts/release.sh @@ -63,7 +63,7 @@ if [ "$BRANCH" == "master" ]; then echo "*** Publishing $PACKAGE to npmjs" cd .npmjs - npm publish --access public + npm publish --access public || true cd ../.. fi diff --git a/js/src/api/api.spec.js b/js/src/api/api.spec.js index bbd140d4d..16831ea66 100644 --- a/js/src/api/api.spec.js +++ b/js/src/api/api.spec.js @@ -34,7 +34,7 @@ describe('api/Api', () => { }); describe('interface', () => { - const api = new Api(new Api.Transport.Http(TEST_HTTP_URL)); + const api = new Api(new Api.Transport.Http(TEST_HTTP_URL, -1)); Object.keys(ethereumRpc).sort().forEach((endpoint) => { describe(endpoint, () => { diff --git a/js/src/api/contract/contract.js b/js/src/api/contract/contract.js index bb6c15d8d..8d556a118 100644 --- a/js/src/api/contract/contract.js +++ b/js/src/api/contract/contract.js @@ -136,27 +136,30 @@ export default class Contract { } parseEventLogs (logs) { - return logs.map((log) => { - const signature = log.topics[0].substr(2); - const event = this.events.find((evt) => evt.signature === signature); + return logs + .map((log) => { + const signature = log.topics[0].substr(2); + const event = this.events.find((evt) => evt.signature === signature); - if (!event) { - throw new Error(`Unable to find event matching signature ${signature}`); - } + if (!event) { + console.warn(`Unable to find event matching signature ${signature}`); + return null; + } - const decoded = event.decodeLog(log.topics, log.data); + const decoded = event.decodeLog(log.topics, log.data); - log.params = {}; - log.event = event.name; + log.params = {}; + log.event = event.name; - decoded.params.forEach((param) => { - const { type, value } = param.token; + decoded.params.forEach((param) => { + const { type, value } = param.token; - log.params[param.name] = { type, value }; - }); + log.params[param.name] = { type, value }; + }); - return log; - }); + return log; + }) + .filter((log) => log); } parseTransactionEvents (receipt) { @@ -306,7 +309,6 @@ export default class Contract { try { subscriptions[idx].callback(null, this.parseEventLogs(logs)); } catch (error) { - this.unsubscribe(idx); console.error('_sendSubscriptionChanges', error); } }); diff --git a/js/src/api/contract/contract.spec.js b/js/src/api/contract/contract.spec.js index 9065b4fad..3d57c2afa 100644 --- a/js/src/api/contract/contract.spec.js +++ b/js/src/api/contract/contract.spec.js @@ -25,7 +25,7 @@ import Api from '../api'; import Contract from './contract'; import { isInstanceOf, isFunction } from '../util/types'; -const transport = new Api.Transport.Http(TEST_HTTP_URL); +const transport = new Api.Transport.Http(TEST_HTTP_URL, -1); const eth = new Api(transport); describe('api/contract/Contract', () => { @@ -119,19 +119,6 @@ describe('api/contract/Contract', () => { }); describe('parseTransactionEvents', () => { - it('checks for unmatched signatures', () => { - const contract = new Contract(eth, [{ anonymous: false, name: 'Message', type: 'event' }]); - expect(() => contract.parseTransactionEvents({ - logs: [{ - data: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063cf90d3f0410092fc0fca41846f5962239791950000000000000000000000000000000000000000000000000000000056e6c85f0000000000000000000000000000000000000000000000000001000000004fcd00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000d706f7374286d6573736167652900000000000000000000000000000000000000', - topics: [ - '0x954ba6c157daf8a26539574ffa64203c044691aa57251af95f4b48d85ec00dd5', - '0x0000000000000000000000000000000000000000000000000001000000004fe0' - ] - }] - })).to.throw(/event matching signature/); - }); - it('parses a transaction log into the data', () => { const contract = new Contract(eth, [ { diff --git a/js/src/api/format/input.js b/js/src/api/format/input.js index b46148cdc..830ca0e21 100644 --- a/js/src/api/format/input.js +++ b/js/src/api/format/input.js @@ -93,6 +93,10 @@ export function inFilter (options) { } export function inHex (str) { + if (str && str.toString) { + str = str.toString(16); + } + if (str && str.substr(0, 2) === '0x') { return str.toLowerCase(); } diff --git a/js/src/api/rpc/db/db.spec.js b/js/src/api/rpc/db/db.spec.js index 4379b51c4..4a11fc416 100644 --- a/js/src/api/rpc/db/db.spec.js +++ b/js/src/api/rpc/db/db.spec.js @@ -19,7 +19,7 @@ import { TEST_HTTP_URL, mockHttp } from '../../../../test/mockRpc'; import Http from '../../transport/http'; import Db from './db'; -const instance = new Db(new Http(TEST_HTTP_URL)); +const instance = new Db(new Http(TEST_HTTP_URL, -1)); describe('api/rpc/Db', () => { let scope; diff --git a/js/src/api/rpc/eth/eth.spec.js b/js/src/api/rpc/eth/eth.spec.js index 65377db50..85d22f4bd 100644 --- a/js/src/api/rpc/eth/eth.spec.js +++ b/js/src/api/rpc/eth/eth.spec.js @@ -20,7 +20,7 @@ import { isBigNumber } from '../../../../test/types'; import Http from '../../transport/http'; import Eth from './eth'; -const instance = new Eth(new Http(TEST_HTTP_URL)); +const instance = new Eth(new Http(TEST_HTTP_URL, -1)); describe('rpc/Eth', () => { const address = '0x63Cf90D3f0410092FC0fca41846f596223979195'; diff --git a/js/src/api/rpc/net/net.spec.js b/js/src/api/rpc/net/net.spec.js index 55029a29e..4903a0cde 100644 --- a/js/src/api/rpc/net/net.spec.js +++ b/js/src/api/rpc/net/net.spec.js @@ -20,7 +20,7 @@ import { isBigNumber } from '../../../../test/types'; import Http from '../../transport/http'; import Net from './net'; -const instance = new Net(new Http(TEST_HTTP_URL)); +const instance = new Net(new Http(TEST_HTTP_URL, -1)); describe('api/rpc/Net', () => { describe('peerCount', () => { diff --git a/js/src/api/rpc/parity/parity.js b/js/src/api/rpc/parity/parity.js index f1739f848..a33828b80 100644 --- a/js/src/api/rpc/parity/parity.js +++ b/js/src/api/rpc/parity/parity.js @@ -60,6 +60,11 @@ export default class Parity { .then(outNumber); } + dappsInterface () { + return this._transport + .execute('parity_dappsInterface'); + } + defaultExtraData () { return this._transport .execute('parity_defaultExtraData'); @@ -176,6 +181,12 @@ export default class Parity { .then(outAddress); } + nextNonce (account) { + return this._transport + .execute('parity_nextNonce', inAddress(account)) + .then(outNumber); + } + nodeName () { return this._transport .execute('parity_nodeName'); diff --git a/js/src/api/rpc/parity/parity.spec.js b/js/src/api/rpc/parity/parity.spec.js index ea0dd8d8c..557314e5c 100644 --- a/js/src/api/rpc/parity/parity.spec.js +++ b/js/src/api/rpc/parity/parity.spec.js @@ -20,7 +20,7 @@ import { isBigNumber } from '../../../../test/types'; import Http from '../../transport/http'; import Parity from './parity'; -const instance = new Parity(new Http(TEST_HTTP_URL)); +const instance = new Parity(new Http(TEST_HTTP_URL, -1)); describe('api/rpc/parity', () => { describe('accountsInfo', () => { diff --git a/js/src/api/rpc/personal/personal.spec.js b/js/src/api/rpc/personal/personal.spec.js index a9bf4f644..70c8cf4c2 100644 --- a/js/src/api/rpc/personal/personal.spec.js +++ b/js/src/api/rpc/personal/personal.spec.js @@ -19,7 +19,7 @@ import { TEST_HTTP_URL, mockHttp } from '../../../../test/mockRpc'; import Http from '../../transport/http'; import Personal from './personal'; -const instance = new Personal(new Http(TEST_HTTP_URL)); +const instance = new Personal(new Http(TEST_HTTP_URL, -1)); describe('rpc/Personal', () => { const account = '0x63cf90d3f0410092fc0fca41846f596223979195'; diff --git a/js/src/api/rpc/signer/signer.js b/js/src/api/rpc/signer/signer.js index 7f905cf50..126ce651a 100644 --- a/js/src/api/rpc/signer/signer.js +++ b/js/src/api/rpc/signer/signer.js @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { inNumber16 } from '../../format/input'; +import { inNumber16, inData } from '../../format/input'; import { outSignerRequest } from '../../format/output'; export default class Signer { @@ -27,6 +27,11 @@ export default class Signer { .execute('signer_confirmRequest', inNumber16(requestId), options, password); } + confirmRequestRaw (requestId, data) { + return this._transport + .execute('signer_confirmRequestRaw', inNumber16(requestId), inData(data)); + } + generateAuthorizationToken () { return this._transport .execute('signer_generateAuthorizationToken'); diff --git a/js/src/api/rpc/trace/trace.spec.js b/js/src/api/rpc/trace/trace.spec.js index 4a38f7a3f..f36e5537c 100644 --- a/js/src/api/rpc/trace/trace.spec.js +++ b/js/src/api/rpc/trace/trace.spec.js @@ -19,7 +19,7 @@ import { TEST_HTTP_URL, mockHttp } from '../../../../test/mockRpc'; import Http from '../../transport/http'; import Trace from './trace'; -const instance = new Trace(new Http(TEST_HTTP_URL)); +const instance = new Trace(new Http(TEST_HTTP_URL, -1)); describe('api/rpc/Trace', () => { let scope; diff --git a/js/src/api/rpc/web3/web3.spec.js b/js/src/api/rpc/web3/web3.spec.js index eb4a59cd1..b933e805b 100644 --- a/js/src/api/rpc/web3/web3.spec.js +++ b/js/src/api/rpc/web3/web3.spec.js @@ -19,7 +19,7 @@ import { TEST_HTTP_URL, mockHttp } from '../../../../test/mockRpc'; import Http from '../../transport/http'; import Web3 from './web3'; -const instance = new Web3(new Http(TEST_HTTP_URL)); +const instance = new Web3(new Http(TEST_HTTP_URL, -1)); describe('api/rpc/Web3', () => { let scope; diff --git a/js/src/api/subscriptions/manager.js b/js/src/api/subscriptions/manager.js index 08f1a9e53..bc9632592 100644 --- a/js/src/api/subscriptions/manager.js +++ b/js/src/api/subscriptions/manager.js @@ -107,7 +107,6 @@ export default class Manager { callback(error, data); } catch (error) { console.error(`Unable to update callback for subscriptionId ${subscriptionId}`, error); - this.unsubscribe(subscriptionId); } } diff --git a/js/src/api/transport/http/http.js b/js/src/api/transport/http/http.js index 08d9422f8..8ea59f0fb 100644 --- a/js/src/api/transport/http/http.js +++ b/js/src/api/transport/http/http.js @@ -19,11 +19,14 @@ import JsonRpcBase from '../jsonRpcBase'; /* global fetch */ export default class Http extends JsonRpcBase { - constructor (url) { + constructor (url, connectTimeout = 1000) { super(); this._connected = true; this._url = url; + this._connectTimeout = connectTimeout; + + this._pollConnection(); } _encodeOptions (method, params) { @@ -77,4 +80,17 @@ export default class Http extends JsonRpcBase { return response.result; }); } + + _pollConnection = () => { + if (this._connectTimeout <= 0) { + return; + } + + const nextTimeout = () => setTimeout(this._pollConnection, this._connectTimeout); + + this + .execute('net_listening') + .then(nextTimeout) + .catch(nextTimeout); + } } diff --git a/js/src/api/transport/http/http.spec.js b/js/src/api/transport/http/http.spec.js index 94441bc51..718a7e66b 100644 --- a/js/src/api/transport/http/http.spec.js +++ b/js/src/api/transport/http/http.spec.js @@ -17,7 +17,7 @@ import { TEST_HTTP_URL, mockHttp } from '../../../../test/mockRpc'; import Http from './http'; -const transport = new Http(TEST_HTTP_URL); +const transport = new Http(TEST_HTTP_URL, -1); describe('api/transport/Http', () => { describe('instance', () => { diff --git a/js/src/contracts/abi/index.js b/js/src/contracts/abi/index.js index 599f8a13b..80f49dc5b 100644 --- a/js/src/contracts/abi/index.js +++ b/js/src/contracts/abi/index.js @@ -24,6 +24,7 @@ import owned from './owned.json'; import registry from './registry.json'; import signaturereg from './signaturereg.json'; import tokenreg from './tokenreg.json'; +import wallet from './wallet.json'; export { basiccoin, @@ -35,5 +36,6 @@ export { owned, registry, signaturereg, - tokenreg + tokenreg, + wallet }; diff --git a/js/src/contracts/abi/wallet.json b/js/src/contracts/abi/wallet.json new file mode 100644 index 000000000..8048d239c --- /dev/null +++ b/js/src/contracts/abi/wallet.json @@ -0,0 +1 @@ +[{"constant":false,"inputs":[{"name":"_owner","type":"address"}],"name":"removeOwner","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_addr","type":"address"}],"name":"isOwner","outputs":[{"name":"","type":"bool"}],"type":"function"},{"constant":true,"inputs":[],"name":"m_numOwners","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"m_lastDay","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[],"name":"resetSpentToday","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"m_spentToday","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"_owner","type":"address"}],"name":"addOwner","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"m_required","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"_h","type":"bytes32"}],"name":"confirm","outputs":[{"name":"","type":"bool"}],"type":"function"},{"constant":false,"inputs":[{"name":"_newLimit","type":"uint256"}],"name":"setDailyLimit","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"},{"name":"_data","type":"bytes"}],"name":"execute","outputs":[{"name":"_r","type":"bytes32"}],"type":"function"},{"constant":false,"inputs":[{"name":"_operation","type":"bytes32"}],"name":"revoke","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_newRequired","type":"uint256"}],"name":"changeRequirement","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"_operation","type":"bytes32"},{"name":"_owner","type":"address"}],"name":"hasConfirmed","outputs":[{"name":"","type":"bool"}],"type":"function"},{"constant":true,"inputs":[{"name":"ownerIndex","type":"uint256"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"}],"name":"kill","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"}],"name":"changeOwner","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"m_dailyLimit","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"inputs":[{"name":"_owners","type":"address[]"},{"name":"_required","type":"uint256"},{"name":"_daylimit","type":"uint256"}],"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"operation","type":"bytes32"}],"name":"Confirmation","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"operation","type":"bytes32"}],"name":"Revoke","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"oldOwner","type":"address"},{"indexed":false,"name":"newOwner","type":"address"}],"name":"OwnerChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"newOwner","type":"address"}],"name":"OwnerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"oldOwner","type":"address"}],"name":"OwnerRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"newRequirement","type":"uint256"}],"name":"RequirementChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"_from","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Deposit","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"to","type":"address"},{"indexed":false,"name":"data","type":"bytes"}],"name":"SingleTransact","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"operation","type":"bytes32"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"to","type":"address"},{"indexed":false,"name":"data","type":"bytes"}],"name":"MultiTransact","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"operation","type":"bytes32"},{"indexed":false,"name":"initiator","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"to","type":"address"},{"indexed":false,"name":"data","type":"bytes"}],"name":"ConfirmationNeeded","type":"event"}] diff --git a/js/src/contracts/snippets/human-standard-token.sol b/js/src/contracts/snippets/human-standard-token.sol new file mode 100644 index 000000000..db05bbc7d --- /dev/null +++ b/js/src/contracts/snippets/human-standard-token.sol @@ -0,0 +1,60 @@ +/* +This Token Contract implements the standard token functionality (https://github.com/ethereum/EIPs/issues/20) as well as the following OPTIONAL extras intended for use by humans. + +In other words. This is intended for deployment in something like a Token Factory or Mist wallet, and then used by humans. +Imagine coins, currencies, shares, voting weight, etc. +Machine-based, rapid creation of many tokens would not necessarily need these extra features or will be minted in other manners. + +1) Initial Finite Supply (upon creation one specifies how much is minted). +2) In the absence of a token registry: Optional Decimal, Symbol & Name. +3) Optional approveAndCall() functionality to notify a contract if an approval() has occurred. + +.*/ + +import "StandardToken.sol"; + +contract HumanStandardToken is StandardToken { + + function () { + //if ether is sent to this address, send it back. + throw; + } + + /* Public variables of the token */ + + /* + NOTE: + The following variables are OPTIONAL vanities. One does not have to include them. + They allow one to customise the token contract & in no way influences the core functionality. + Some wallets/interfaces might not even bother to look at this information. + */ + string public name; //fancy name: eg Simon Bucks + uint8 public decimals; //How many decimals to show. ie. There could 1000 base units with 3 decimals. Meaning 0.980 SBX = 980 base units. It's like comparing 1 wei to 1 ether. + string public symbol; //An identifier: eg SBX + string public version = 'H0.1'; //human 0.1 standard. Just an arbitrary versioning scheme. + + function HumanStandardToken( + uint256 _initialAmount, + string _tokenName, + uint8 _decimalUnits, + string _tokenSymbol + ) { + balances[msg.sender] = _initialAmount; // Give the creator all initial tokens + totalSupply = _initialAmount; // Update total supply + name = _tokenName; // Set the name for display purposes + decimals = _decimalUnits; // Amount of decimals for display purposes + symbol = _tokenSymbol; // Set the symbol for display purposes + } + + /* Approves and then calls the receiving contract */ + function approveAndCall(address _spender, uint256 _value, bytes _extraData) returns (bool success) { + allowed[msg.sender][_spender] = _value; + Approval(msg.sender, _spender, _value); + + //call the receiveApproval function on the contract you want to be notified. This crafts the function signature manually so one doesn't have to include a contract in here just for this. + //receiveApproval(address _from, uint256 _value, address _tokenContract, bytes _extraData) + //it is assumed that when does this that the call *should* succeed, otherwise one would use vanilla approve instead. + if(!_spender.call(bytes4(bytes32(sha3("receiveApproval(address,uint256,address,bytes)"))), msg.sender, _value, this, _extraData)) { throw; } + return true; + } +} diff --git a/js/src/contracts/snippets/standard-token.sol b/js/src/contracts/snippets/standard-token.sol new file mode 100644 index 000000000..3d91e5510 --- /dev/null +++ b/js/src/contracts/snippets/standard-token.sol @@ -0,0 +1,55 @@ +/* +You should inherit from StandardToken or, for a token like you would want to +deploy in something like Mist, see HumanStandardToken.sol. +(This implements ONLY the standard functions and NOTHING else. +If you deploy this, you won't have anything useful.) + +Implements ERC 20 Token standard: https://github.com/ethereum/EIPs/issues/20 +.*/ + +import "Token.sol"; + +contract StandardToken is Token { + + function transfer(address _to, uint256 _value) returns (bool success) { + //Default assumes totalSupply can't be over max (2^256 - 1). + //If your token leaves out totalSupply and can issue more tokens as time goes on, you need to check if it doesn't wrap. + //Replace the if with this one instead. + //if (balances[msg.sender] >= _value && balances[_to] + _value > balances[_to]) { + if (balances[msg.sender] >= _value && _value > 0) { + balances[msg.sender] -= _value; + balances[_to] += _value; + Transfer(msg.sender, _to, _value); + return true; + } else { return false; } + } + + function transferFrom(address _from, address _to, uint256 _value) returns (bool success) { + //same as above. Replace this line with the following if you want to protect against wrapping uints. + //if (balances[_from] >= _value && allowed[_from][msg.sender] >= _value && balances[_to] + _value > balances[_to]) { + if (balances[_from] >= _value && allowed[_from][msg.sender] >= _value && _value > 0) { + balances[_to] += _value; + balances[_from] -= _value; + allowed[_from][msg.sender] -= _value; + Transfer(_from, _to, _value); + return true; + } else { return false; } + } + + function balanceOf(address _owner) constant returns (uint256 balance) { + return balances[_owner]; + } + + function approve(address _spender, uint256 _value) returns (bool success) { + allowed[msg.sender][_spender] = _value; + Approval(msg.sender, _spender, _value); + return true; + } + + function allowance(address _owner, address _spender) constant returns (uint256 remaining) { + return allowed[_owner][_spender]; + } + + mapping (address => uint256) balances; + mapping (address => mapping (address => uint256)) allowed; +} diff --git a/js/src/contracts/snippets/token.sol b/js/src/contracts/snippets/token.sol new file mode 100644 index 000000000..d54c5c424 --- /dev/null +++ b/js/src/contracts/snippets/token.sol @@ -0,0 +1,47 @@ +// Abstract contract for the full ERC 20 Token standard +// https://github.com/ethereum/EIPs/issues/20 + +contract Token { + /* This is a slight change to the ERC20 base standard. + function totalSupply() constant returns (uint256 supply); + is replaced with: + uint256 public totalSupply; + This automatically creates a getter function for the totalSupply. + This is moved to the base contract since public getter functions are not + currently recognised as an implementation of the matching abstract + function by the compiler. + */ + /// total amount of tokens + uint256 public totalSupply; + + /// @param _owner The address from which the balance will be retrieved + /// @return The balance + function balanceOf(address _owner) constant returns (uint256 balance); + + /// @notice send `_value` token to `_to` from `msg.sender` + /// @param _to The address of the recipient + /// @param _value The amount of token to be transferred + /// @return Whether the transfer was successful or not + function transfer(address _to, uint256 _value) returns (bool success); + + /// @notice send `_value` token to `_to` from `_from` on the condition it is approved by `_from` + /// @param _from The address of the sender + /// @param _to The address of the recipient + /// @param _value The amount of token to be transferred + /// @return Whether the transfer was successful or not + function transferFrom(address _from, address _to, uint256 _value) returns (bool success); + + /// @notice `msg.sender` approves `_addr` to spend `_value` tokens + /// @param _spender The address of the account able to transfer the tokens + /// @param _value The amount of wei to be approved for transfer + /// @return Whether the approval was successful or not + function approve(address _spender, uint256 _value) returns (bool success); + + /// @param _owner The address of the account owning tokens + /// @param _spender The address of the account able to transfer the tokens + /// @return Amount of remaining tokens allowed to spent + function allowance(address _owner, address _spender) constant returns (uint256 remaining); + + event Transfer(address indexed _from, address indexed _to, uint256 _value); + event Approval(address indexed _owner, address indexed _spender, uint256 _value); +} diff --git a/js/src/dapps/basiccoin/Deploy/Deployment/deployment.js b/js/src/dapps/basiccoin/Deploy/Deployment/deployment.js index 0fa7dc863..be08d616d 100644 --- a/js/src/dapps/basiccoin/Deploy/Deployment/deployment.js +++ b/js/src/dapps/basiccoin/Deploy/Deployment/deployment.js @@ -148,7 +148,7 @@ export default class Deployment extends Component { addresses={ addresses } onChange={ this.onChangeFrom } />
- the owner account to eploy from + the owner account to deploy from
diff --git a/js/src/dapps/githubhint/services.js b/js/src/dapps/githubhint/services.js index c9d260b73..d4f7fc6b2 100644 --- a/js/src/dapps/githubhint/services.js +++ b/js/src/dapps/githubhint/services.js @@ -28,26 +28,26 @@ export function attachInterface () { return Promise .all([ registry.getAddress.call({}, [api.util.sha3('githubhint'), 'A']), - api.eth.accounts(), api.parity.accounts() ]); }) - .then(([address, addresses, accountsInfo]) => { - accountsInfo = accountsInfo || {}; + .then(([address, accountsInfo]) => { console.log(`githubhint was found at ${address}`); const contract = api.newContract(abis.githubhint, address); - const accounts = addresses.reduce((obj, address) => { - const info = accountsInfo[address] || {}; + const accounts = Object + .keys(accountsInfo) + .filter((address) => accountsInfo[address].uuid) + .reduce((obj, address) => { + const account = accountsInfo[address]; - return Object.assign(obj, { - [address]: { - address, - name: info.name, - uuid: info.uuid - } - }); - }, {}); + return Object.assign(obj, { + [address]: { + address, + name: account.name + } + }); + }, {}); const fromAddress = Object.keys(accounts)[0]; return { diff --git a/js/src/dapps/registry/Application/application.css b/js/src/dapps/registry/Application/application.css index ebdb23baa..b46afbcf7 100644 --- a/js/src/dapps/registry/Application/application.css +++ b/js/src/dapps/registry/Application/application.css @@ -49,3 +49,15 @@ padding-bottom: 0 !important; } } + +.warning { + background: #f80; + bottom: 0; + color: #fff; + left: 0; + opacity: 1; + padding: 1.5em; + position: fixed; + right: 50%; + z-index: 100; +} diff --git a/js/src/dapps/registry/Application/application.js b/js/src/dapps/registry/Application/application.js index e763069a5..5102e5d57 100644 --- a/js/src/dapps/registry/Application/application.js +++ b/js/src/dapps/registry/Application/application.js @@ -53,6 +53,7 @@ export default class Application extends Component { }; render () { + const { api } = window.parity; const { actions, accounts, contacts, @@ -60,9 +61,11 @@ export default class Application extends Component { lookup, events } = this.props; + let warning = null; return (
+ { warning }

RΞgistry

@@ -70,13 +73,11 @@ export default class Application extends Component { { contract && fee ? (
- { this.renderActions() } - -

- The Registry is provided by the contract at { contract.address }. -

+
+ WARNING: The name registry is experimental. Please ensure that you understand the risks, benefits & consequences of registering a name before doing so. A non-refundable fee of { api.util.fromWei(fee).toFormat(3) }ETH is required for all registrations. +
) : ( diff --git a/js/src/dapps/registry/addresses/actions.js b/js/src/dapps/registry/addresses/actions.js index 2341d716c..666196e88 100644 --- a/js/src/dapps/registry/addresses/actions.js +++ b/js/src/dapps/registry/addresses/actions.js @@ -19,18 +19,16 @@ import { api } from '../parity'; export const set = (addresses) => ({ type: 'addresses set', addresses }); export const fetch = () => (dispatch) => { - return Promise - .all([ - api.eth.accounts(), - api.parity.accounts() - ]) - .then(([ accounts, data ]) => { - data = data || {}; - const addresses = Object.keys(data) - .filter((address) => data[address] && !data[address].meta.deleted) + return api.parity + .accounts() + .then((accountsInfo) => { + const addresses = Object + .keys(accountsInfo) + .filter((address) => accountsInfo[address] && !accountsInfo[address].meta.deleted) .map((address) => ({ - ...data[address], address, - isAccount: accounts.includes(address) + ...accountsInfo[address], + address, + isAccount: !!accountsInfo[address].uuid })); dispatch(set(addresses)); }) diff --git a/js/src/dapps/signaturereg/Import/import.js b/js/src/dapps/signaturereg/Import/import.js index dcf2b3f98..90edf9415 100644 --- a/js/src/dapps/signaturereg/Import/import.js +++ b/js/src/dapps/signaturereg/Import/import.js @@ -146,7 +146,7 @@ export default class Import extends Component { } sortFunctions = (a, b) => { - return a.name.localeCompare(b.name); + return (a.name || '').localeCompare(b.name || ''); } countFunctions () { diff --git a/js/src/dapps/signaturereg/services.js b/js/src/dapps/signaturereg/services.js index 54394c4b8..eab498fc4 100644 --- a/js/src/dapps/signaturereg/services.js +++ b/js/src/dapps/signaturereg/services.js @@ -49,26 +49,26 @@ export function attachInterface (callback) { return Promise .all([ registry.getAddress.call({}, [api.util.sha3('signaturereg'), 'A']), - api.eth.accounts(), api.parity.accounts() ]); }) - .then(([address, addresses, accountsInfo]) => { - accountsInfo = accountsInfo || {}; + .then(([address, accountsInfo]) => { console.log(`signaturereg was found at ${address}`); const contract = api.newContract(abis.signaturereg, address); - const accounts = addresses.reduce((obj, address) => { - const info = accountsInfo[address] || {}; + const accounts = Object + .keys(accountsInfo) + .filter((address) => accountsInfo[address].uuid) + .reduce((obj, address) => { + const info = accountsInfo[address] || {}; - return Object.assign(obj, { - [address]: { - address, - name: info.name || 'Unnamed', - uuid: info.uuid - } - }); - }, {}); + return Object.assign(obj, { + [address]: { + address, + name: info.name || 'Unnamed' + } + }); + }, {}); const fromAddress = Object.keys(accounts)[0]; return { diff --git a/js/src/dapps/tokenreg/Accounts/AccountSelector/account-selector.js b/js/src/dapps/tokenreg/Accounts/AccountSelector/account-selector.js index 4c8525d7e..4d29a6692 100644 --- a/js/src/dapps/tokenreg/Accounts/AccountSelector/account-selector.js +++ b/js/src/dapps/tokenreg/Accounts/AccountSelector/account-selector.js @@ -70,7 +70,8 @@ export default class AccountSelector extends Component { static propTypes = { list: PropTypes.array.isRequired, selected: PropTypes.object.isRequired, - handleSetSelected: PropTypes.func.isRequired + handleSetSelected: PropTypes.func.isRequired, + onAccountChange: PropTypes.func }; state = { @@ -85,7 +86,8 @@ export default class AccountSelector extends Component { nestedItems={ nestedAccounts } open={ this.state.open } onSelectAccount={ this.onToggleOpen } - autoGenerateNestedIndicator={ false } /> + autoGenerateNestedIndicator={ false } + nestedListStyle={ { maxHeight: '14em', overflow: 'auto' } } /> ); return ( @@ -110,6 +112,10 @@ export default class AccountSelector extends Component { onToggleOpen = () => { this.setState({ open: !this.state.open }); + + if (typeof this.props.onAccountChange === 'function') { + this.props.onAccountChange(); + } } onSelectAccount = (address) => { diff --git a/js/src/dapps/tokenreg/Accounts/actions.js b/js/src/dapps/tokenreg/Accounts/actions.js index 58a74dfd8..a310baf7d 100644 --- a/js/src/dapps/tokenreg/Accounts/actions.js +++ b/js/src/dapps/tokenreg/Accounts/actions.js @@ -35,16 +35,13 @@ export const setSelectedAccount = (address) => ({ }); export const loadAccounts = () => (dispatch) => { - Promise - .all([ - api.eth.accounts(), - api.parity.accounts() - ]) - .then(([ accounts, accountsInfo ]) => { - accountsInfo = accountsInfo || {}; - - const accountsList = accounts - .map(address => ({ + api.parity + .accounts() + .then((accountsInfo) => { + const accountsList = Object + .keys(accountsInfo) + .filter((address) => accountsInfo[address].uuid) + .map((address) => ({ ...accountsInfo[address], address })); diff --git a/js/src/dapps/tokenreg/Actions/Register/register.js b/js/src/dapps/tokenreg/Actions/Register/register.js index 5eb2d9e1b..8ae042494 100644 --- a/js/src/dapps/tokenreg/Actions/Register/register.js +++ b/js/src/dapps/tokenreg/Actions/Register/register.js @@ -81,6 +81,7 @@ export default class RegisterAction extends Component { className={ styles.dialog } onRequestClose={ this.onClose } actions={ this.renderActions() } + ref='dialog' autoScrollBodyContent > { this.renderContent() } @@ -149,7 +150,9 @@ export default class RegisterAction extends Component { renderForm () { return (
- + { this.renderInputs() }
); @@ -175,6 +178,11 @@ export default class RegisterAction extends Component { }); } + onAccountChange = () => { + const { dialog } = this.refs; + dialog.forceUpdate(); + } + onChange (fieldKey, valid, value) { const { fields } = this.state; const field = fields[fieldKey]; diff --git a/js/src/dapps/tokenreg/Application/application.css b/js/src/dapps/tokenreg/Application/application.css index 07bc74b40..033147ae3 100644 --- a/js/src/dapps/tokenreg/Application/application.css +++ b/js/src/dapps/tokenreg/Application/application.css @@ -20,3 +20,15 @@ flex-direction: column; align-items: center; } + +.warning { + background: #f80; + bottom: 0; + color: #fff; + left: 0; + opacity: 1; + padding: 1.5em; + position: fixed; + right: 50%; + z-index: 100; +} diff --git a/js/src/dapps/tokenreg/Application/application.js b/js/src/dapps/tokenreg/Application/application.js index e48922b05..6a94f5c9c 100644 --- a/js/src/dapps/tokenreg/Application/application.js +++ b/js/src/dapps/tokenreg/Application/application.js @@ -17,6 +17,8 @@ import React, { Component, PropTypes } from 'react'; import getMuiTheme from 'material-ui/styles/getMuiTheme'; +import { api } from '../parity'; + import Loading from '../Loading'; import Status from '../Status'; import Tokens from '../Tokens'; @@ -59,6 +61,9 @@ export default class Application extends Component { +
+ WARNING: The token registry is experimental. Please ensure that you understand the steps, risks, benefits & consequences of registering a token before doing so. A non-refundable fee of { api.util.fromWei(contract.fee).toFormat(3) }ETH is required for all registrations. +
); } diff --git a/js/src/dapps/tokenreg/Inputs/validation.js b/js/src/dapps/tokenreg/Inputs/validation.js index b2e0688a8..38eba5ef1 100644 --- a/js/src/dapps/tokenreg/Inputs/validation.js +++ b/js/src/dapps/tokenreg/Inputs/validation.js @@ -75,7 +75,7 @@ const validateTokenAddress = (address, contract, simple) => { return getTokenTotalSupply(address) .then(balance => { - if (balance === null) { + if (balance === null || balance.equals(0)) { return { error: ERRORS.invalidTokenAddress, valid: false diff --git a/js/src/dapps/tokenreg/Status/status.css b/js/src/dapps/tokenreg/Status/status.css index 27ef53607..7333194b7 100644 --- a/js/src/dapps/tokenreg/Status/status.css +++ b/js/src/dapps/tokenreg/Status/status.css @@ -31,6 +31,12 @@ .title { font-size: 3rem; font-weight: 300; - margin-top: 0; + margin: 0; text-transform: uppercase; } + +.byline { + font-size: 1.25em; + opacity: 0.75; + margin: 0 0 1.75em 0; +} diff --git a/js/src/dapps/tokenreg/Status/status.js b/js/src/dapps/tokenreg/Status/status.js index f8c7b347a..4ca47a6ea 100644 --- a/js/src/dapps/tokenreg/Status/status.js +++ b/js/src/dapps/tokenreg/Status/status.js @@ -29,17 +29,12 @@ export default class Status extends Component { }; render () { - const { address, fee } = this.props; + const { fee } = this.props; return (

Token Registry

- - - +

A global registry of all recognised tokens on the network

{ if (!token || !token.tla) { @@ -61,7 +61,8 @@ export default class Tokens extends Component { handleMetaLookup={ this.props.handleMetaLookup } handleAddMeta={ this.props.handleAddMeta } key={ index } - isTokenOwner={ isTokenOwner } /> + isTokenOwner={ isTokenOwner } + isContractOwner={ isOwner } /> ); }); } diff --git a/js/src/index.js b/js/src/index.js index 966e2708e..c0f4f94ad 100644 --- a/js/src/index.js +++ b/js/src/index.js @@ -31,7 +31,7 @@ import ContractInstances from './contracts'; import { initStore } from './redux'; import { ContextProvider, muiTheme } from './ui'; -import { Accounts, Account, Addresses, Address, Application, Contract, Contracts, Dapp, Dapps, Settings, SettingsBackground, SettingsParity, SettingsProxy, SettingsViews, Signer, Status } from './views'; +import { Accounts, Account, Addresses, Address, Application, Contract, Contracts, WriteContract, Dapp, Dapps, Settings, SettingsBackground, SettingsParity, SettingsProxy, SettingsViews, Signer, Status } from './views'; import { setApi } from './redux/providers/apiActions'; @@ -76,6 +76,7 @@ ReactDOM.render( + diff --git a/js/src/jsonrpc/interfaces/parity.js b/js/src/jsonrpc/interfaces/parity.js index 66a8ea962..5dd313e00 100644 --- a/js/src/jsonrpc/interfaces/parity.js +++ b/js/src/jsonrpc/interfaces/parity.js @@ -109,6 +109,15 @@ export default { } }, + dappsInterface: { + desc: 'Returns the interface the dapps are running on, error if not enabled', + params: [], + returns: { + type: String, + desc: 'The interface' + } + }, + defaultExtraData: { desc: 'Returns the default extra data', params: [], @@ -347,6 +356,20 @@ export default { } }, + nextNonce: { + desc: 'Returns next available nonce for transaction from given account. Includes pending block and transaction queue.', + params: [ + { + type: Address, + desc: 'Account' + } + ], + returns: { + type: Quantity, + desc: 'Next valid nonce' + } + }, + nodeName: { desc: 'Returns node name (identity)', params: [], diff --git a/js/src/jsonrpc/interfaces/signer.js b/js/src/jsonrpc/interfaces/signer.js index f394dbb61..f50bb1115 100644 --- a/js/src/jsonrpc/interfaces/signer.js +++ b/js/src/jsonrpc/interfaces/signer.js @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { Quantity } from '../types'; +import { Quantity, Data } from '../types'; export default { generateAuthorizationToken: { @@ -57,6 +57,24 @@ export default { } }, + confirmRequestRaw: { + desc: 'Confirm a request in the signer queue providing signed request.', + params: [ + { + type: Quantity, + desc: 'The request id' + }, + { + type: Data, + desc: 'Signed request (transaction RLP)' + } + ], + returns: { + type: Boolean, + desc: 'The status of the confirmation' + } + }, + rejectRequest: { desc: 'Rejects a request in the signer queue', params: [ diff --git a/js/src/modals/AddContract/addContract.css b/js/src/modals/AddContract/addContract.css new file mode 100644 index 000000000..ed92a86d5 --- /dev/null +++ b/js/src/modals/AddContract/addContract.css @@ -0,0 +1,32 @@ +/* Copyright 2015, 2016 Ethcore (UK) Ltd. +/* This file is part of Parity. +/* +/* Parity is free software: you can redistribute it and/or modify +/* it under the terms of the GNU General Public License as published by +/* the Free Software Foundation, either version 3 of the License, or +/* (at your option) any later version. +/* +/* Parity is distributed in the hope that it will be useful, +/* but WITHOUT ANY WARRANTY; without even the implied warranty of +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/* GNU General Public License for more details. +/* +/* You should have received a copy of the GNU General Public License +/* along with Parity. If not, see . +*/ + +.spaced { + margin: 0.25em 0; +} + +.typeContainer { + display: flex; + flex-direction: column; + + .desc { + font-size: 0.8em; + margin-bottom: 0.5em; + color: #ccc; + z-index: 2; + } +} diff --git a/js/src/modals/AddContract/addContract.js b/js/src/modals/AddContract/addContract.js index 418378136..57773d2dc 100644 --- a/js/src/modals/AddContract/addContract.js +++ b/js/src/modals/AddContract/addContract.js @@ -17,10 +17,38 @@ import React, { Component, PropTypes } from 'react'; import ContentAdd from 'material-ui/svg-icons/content/add'; import ContentClear from 'material-ui/svg-icons/content/clear'; +import NavigationArrowForward from 'material-ui/svg-icons/navigation/arrow-forward'; +import NavigationArrowBack from 'material-ui/svg-icons/navigation/arrow-back'; + +import { RadioButton, RadioButtonGroup } from 'material-ui/RadioButton'; import { Button, Modal, Form, Input, InputAddress } from '../../ui'; import { ERRORS, validateAbi, validateAddress, validateName } from '../../util/validation'; +import { eip20, wallet } from '../../contracts/abi'; +import styles from './addContract.css'; + +const ABI_TYPES = [ + { + label: 'Token', readOnly: true, value: JSON.stringify(eip20), + type: 'token', + description: (A standard
ERC 20 token) + }, + { + label: 'Multisig Wallet', readOnly: true, + type: 'multisig', + value: JSON.stringify(wallet), + description: (Official Multisig contract: see contract code) + }, + { + label: 'Custom Contract', value: '', + type: 'custom', + description: 'Contract created from custom ABI' + } +]; + +const STEPS = [ 'choose a contract type', 'enter contract details' ]; + export default class AddContract extends Component { static contextTypes = { api: PropTypes.object.isRequired @@ -34,44 +62,101 @@ export default class AddContract extends Component { state = { abi: '', abiError: ERRORS.invalidAbi, + abiType: ABI_TYPES[2], + abiTypeIndex: 2, abiParsed: null, address: '', addressError: ERRORS.invalidAddress, name: '', nameError: ERRORS.invalidName, - description: '' + description: '', + step: 0 }; + componentDidMount () { + this.onChangeABIType(null, this.state.abiTypeIndex); + } + render () { + const { step } = this.state; + return ( - { this.renderFields() } + steps={ STEPS } + current={ step } + > + { this.renderStep(step) } ); } + renderStep (step) { + switch (step) { + case 0: + return this.renderContractTypeSelector(); + default: + return this.renderFields(); + } + } + + renderContractTypeSelector () { + const { abiTypeIndex } = this.state; + + return ( + + { this.renderAbiTypes() } + + ); + } + renderDialogActions () { - const { addressError, nameError } = this.state; + const { addressError, nameError, step } = this.state; const hasError = !!(addressError || nameError); - return ([ + const cancelBtn = (
+ ) } + key={ index } + /> + )); + } - this.setState(validateAbi(abi, api)); + onNext = () => { + this.setState({ step: this.state.step + 1 }); + } + + onPrev = () => { + this.setState({ step: this.state.step - 1 }); + } + + onChangeABIType = (event, index) => { + const abiType = ABI_TYPES[index]; + this.setState({ abiTypeIndex: index, abiType }); + this.onEditAbi(abiType.value); + } + + onEditAbi = (abiIn) => { + const { api } = this.context; + const { abi, abiError, abiParsed } = validateAbi(abiIn, api); + this.setState({ abi, abiError, abiParsed }); + } + + onChangeAddress = (event, value) => { + this.onEditAddress(value); } onEditAddress = (_address) => { @@ -138,7 +262,7 @@ export default class AddContract extends Component { onAdd = () => { const { api } = this.context; - const { abiParsed, address, name, description } = this.state; + const { abiParsed, address, name, description, abiType } = this.state; Promise.all([ api.parity.setAccountName(address, name), @@ -147,6 +271,7 @@ export default class AddContract extends Component { deleted: false, timestamp: Date.now(), abi: abiParsed, + type: abiType.type, description }) ]).catch((error) => { diff --git a/js/src/modals/CreateAccount/AccountDetails/accountDetails.js b/js/src/modals/CreateAccount/AccountDetails/accountDetails.js index 646c7c180..14c858c06 100644 --- a/js/src/modals/CreateAccount/AccountDetails/accountDetails.js +++ b/js/src/modals/CreateAccount/AccountDetails/accountDetails.js @@ -58,7 +58,7 @@ export default class AccountDetails extends Component { readOnly allowCopy hint='the account recovery phrase' - label='account recovery phrase (keep safe)' + label='owner recovery phrase (keep private and secure, it allows full and unlimited access to the account)' value={ phrase } /> ); } diff --git a/js/src/modals/DeployContract/DetailsStep/detailsStep.js b/js/src/modals/DeployContract/DetailsStep/detailsStep.js index e0f02bc70..854715396 100644 --- a/js/src/modals/DeployContract/DetailsStep/detailsStep.js +++ b/js/src/modals/DeployContract/DetailsStep/detailsStep.js @@ -25,7 +25,7 @@ import styles from '../deployContract.css'; export default class DetailsStep extends Component { static contextTypes = { api: PropTypes.object.isRequired - } + }; static propTypes = { accounts: PropTypes.object.isRequired, @@ -46,16 +46,33 @@ export default class DetailsStep extends Component { onFromAddressChange: PropTypes.func.isRequired, onDescriptionChange: PropTypes.func.isRequired, onNameChange: PropTypes.func.isRequired, - onParamsChange: PropTypes.func.isRequired - } + onParamsChange: PropTypes.func.isRequired, + readOnly: PropTypes.bool + }; + + static defaultProps = { + readOnly: false + }; state = { inputs: [] } + componentDidMount () { + const { abi, code } = this.props; + + if (abi) { + this.onAbiChange(abi); + } + + if (code) { + this.onCodeChange(code); + } + } + render () { const { accounts } = this.props; - const { abi, abiError, code, codeError, fromAddress, fromAddressError, name, nameError } = this.props; + const { abi, abiError, code, codeError, fromAddress, fromAddressError, name, nameError, readOnly } = this.props; return (
@@ -77,13 +94,15 @@ export default class DetailsStep extends Component { hint='the abi of the contract to deploy' error={ abiError } value={ abi } - onSubmit={ this.onAbiChange } /> + onSubmit={ this.onAbiChange } + readOnly={ readOnly } /> + onSubmit={ this.onCodeChange } + readOnly={ readOnly } /> { this.renderConstructorInputs() }
); diff --git a/js/src/modals/DeployContract/deployContract.js b/js/src/modals/DeployContract/deployContract.js index 588d16f6a..768723d1f 100644 --- a/js/src/modals/DeployContract/deployContract.js +++ b/js/src/modals/DeployContract/deployContract.js @@ -36,8 +36,17 @@ export default class DeployContract extends Component { static propTypes = { accounts: PropTypes.object.isRequired, - onClose: PropTypes.func.isRequired - } + onClose: PropTypes.func.isRequired, + abi: PropTypes.string, + code: PropTypes.string, + readOnly: PropTypes.bool, + source: PropTypes.string + }; + + static defaultProps = { + readOnly: false, + source: '' + }; state = { abi: '', @@ -57,6 +66,31 @@ export default class DeployContract extends Component { deployError: null } + componentWillMount () { + const { abi, code } = this.props; + + if (abi && code) { + this.setState({ abi, code }); + } + } + + componentWillReceiveProps (nextProps) { + const { abi, code } = nextProps; + const newState = {}; + + if (abi !== this.props.abi) { + newState.abi = abi; + } + + if (code !== this.props.code) { + newState.code = code; + } + + if (Object.keys(newState).length) { + this.setState(newState); + } + } + render () { const { step, deployError } = this.state; @@ -115,7 +149,7 @@ export default class DeployContract extends Component { } renderStep () { - const { accounts } = this.props; + const { accounts, readOnly } = this.props; const { address, deployError, step, deployState, txhash } = this.state; if (deployError) { @@ -129,6 +163,7 @@ export default class DeployContract extends Component { return ( { const { api, store } = this.context; + const { source } = this.props; const { abiParsed, code, description, name, params, fromAddress } = this.state; const options = { data: code, @@ -219,6 +255,7 @@ export default class DeployContract extends Component { contract: true, timestamp: Date.now(), deleted: false, + source, description }) ]) diff --git a/js/src/modals/LoadContract/index.js b/js/src/modals/LoadContract/index.js new file mode 100644 index 000000000..be5e62af5 --- /dev/null +++ b/js/src/modals/LoadContract/index.js @@ -0,0 +1,17 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default from './loadContract'; diff --git a/js/src/modals/LoadContract/loadContract.css b/js/src/modals/LoadContract/loadContract.css new file mode 100644 index 000000000..f3144eeb8 --- /dev/null +++ b/js/src/modals/LoadContract/loadContract.css @@ -0,0 +1,52 @@ +/* Copyright 2015, 2016 Ethcore (UK) Ltd. +/* This file is part of Parity. +/* +/* Parity is free software: you can redistribute it and/or modify +/* it under the terms of the GNU General Public License as published by +/* the Free Software Foundation, either version 3 of the License, or +/* (at your option) any later version. +/* +/* Parity is distributed in the hope that it will be useful, +/* but WITHOUT ANY WARRANTY; without even the implied warranty of +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +/* GNU General Public License for more details. +/* +/* You should have received a copy of the GNU General Public License +/* along with Parity. If not, see . +*/ + +.loadContainer { + display: flex; + flex-direction: row; + + > * { + flex: 50%; + width: 0; + } +} + +.editor { + display: flex; + flex-direction: column; + padding-left: 1em; + + p { + line-height: 48px; + height: 48px; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + + margin: 0; + font-size: 1.2em; + } +} + +.confirmRemoval { + text-align: center; + + .editor { + text-align: left; + margin-top: 0.5em; + } +} diff --git a/js/src/modals/LoadContract/loadContract.js b/js/src/modals/LoadContract/loadContract.js new file mode 100644 index 000000000..3de55561a --- /dev/null +++ b/js/src/modals/LoadContract/loadContract.js @@ -0,0 +1,284 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import React, { Component, PropTypes } from 'react'; + +import ContentClear from 'material-ui/svg-icons/content/clear'; +import CheckIcon from 'material-ui/svg-icons/navigation/check'; +import DeleteIcon from 'material-ui/svg-icons/action/delete'; + +import { List, ListItem, makeSelectable } from 'material-ui/List'; +import { Subheader, IconButton, Tabs, Tab } from 'material-ui'; +import moment from 'moment'; + +import { Button, Modal, Editor } from '../../ui'; + +import styles from './loadContract.css'; + +const SelectableList = makeSelectable(List); + +const SELECTED_STYLE = { + backgroundColor: 'rgba(255, 255, 255, 0.1)' +}; + +export default class LoadContract extends Component { + + static propTypes = { + onClose: PropTypes.func.isRequired, + onLoad: PropTypes.func.isRequired, + onDelete: PropTypes.func.isRequired, + contracts: PropTypes.object.isRequired, + snippets: PropTypes.object.isRequired + }; + + state = { + selected: -1, + deleteRequest: false, + deleteId: -1 + }; + + render () { + const { deleteRequest } = this.state; + + const title = deleteRequest + ? 'confirm removal' + : 'view contracts'; + + return ( + + { this.renderBody() } + + ); + } + + renderBody () { + if (this.state.deleteRequest) { + return this.renderConfirmRemoval(); + } + + const { contracts, snippets } = this.props; + + const contractsTab = Object.keys(contracts).length === 0 + ? null + : ( + + { this.renderEditor() } + + + Saved Contracts + { this.renderContracts(contracts) } + + + ); + + return ( +
+ + { contractsTab } + + + { this.renderEditor() } + + + Contract Snippets + { this.renderContracts(snippets, false) } + + + +
+ ); + } + + renderConfirmRemoval () { + const { deleteId } = this.state; + const { name, timestamp, sourcecode } = this.props.contracts[deleteId]; + + return ( +
+

+ Are you sure you want to remove the following + contract from your saved contracts? +

+ + +
+ +
+
+ ); + } + + renderEditor () { + const { contracts, snippets } = this.props; + const { selected } = this.state; + + const mergedContracts = Object.assign({}, contracts, snippets); + + if (selected === -1 || !mergedContracts[selected]) { + return null; + } + + const { sourcecode, name } = mergedContracts[selected]; + + return ( +
+

{ name }

+ +
+ ); + } + + renderContracts (contracts, removable = true) { + const { selected } = this.state; + + return Object + .values(contracts) + .map((contract) => { + const { id, name, timestamp, description } = contract; + const onDelete = () => this.onDeleteRequest(id); + + const secondaryText = description || `Saved ${moment(timestamp).fromNow()}`; + const remove = removable + ? ( + + + + ) + : null; + + return ( + + ); + }); + } + + renderDialogActions () { + const { deleteRequest } = this.state; + + if (deleteRequest) { + return [ +
+ ); + } + + renderModal () { + const { title, renderValidation } = this.props; + const { show, step } = this.state; + + if (!show) { + return null; + } + + const hasSteps = typeof renderValidation === 'function'; + + const steps = hasSteps ? [ 'select a file', 'validate' ] : null; + + return ( + + { this.renderBody() } + + ); + } + + renderActions () { + const { validate } = this.state; + + const cancelBtn = ( +
); } + renderDetails (contract) { + const { showDetailsDialog } = this.state; + + if (!showDetailsDialog) { + return null; + } + + const cancelBtn = ( +