diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f929aedda..58036eb88 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,6 @@ stages: - build - test - - deploy variables: GIT_DEPTH: "3" SIMPLECOV: "true" @@ -18,18 +17,39 @@ linux-stable: - tags - stable script: - - export - cargo build --release --verbose - strip target/release/parity - - mkdir -p x86_64-unknown-linux-gnu/stable - - cp target/release/parity x86_64-unknown-linux-gnu/stable/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-linux-gnu/parity --body target/release/parity tags: - rust - rust-stable artifacts: paths: - - x86_64-unknown-linux-gnu/stable/parity + - target/release/parity name: "stable-x86_64-unknown-linux-gnu_parity" +linux-stable-14.04: + stage: build + image: ethcore/rust-14.04:latest + only: + - master + - beta + - tags + - stable + script: + - cargo build --release --verbose + - strip target/release/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-ubuntu_14_04-gnu/parity --body target/release/parity + tags: + - rust + - rust-14.04 + artifacts: + paths: + - target/release/parity + name: "stable-x86_64-unknown-ubuntu_14_04-gnu_parity" linux-beta: stage: build image: ethcore/rust:beta @@ -39,17 +59,14 @@ linux-beta: - tags - stable script: - - export - cargo build --release --verbose - strip target/release/parity - - mkdir -p x86_64-unknown-linux-gnu/beta - - cp target/release/parity x86_64-unknown-linux-gnu/beta/parity tags: - rust - rust-beta artifacts: paths: - - x86_64-unknown-linux-gnu/beta/parity + - target/release/parity name: "beta-x86_64-unknown-linux-gnu_parity" allow_failure: true linux-nightly: @@ -63,14 +80,12 @@ linux-nightly: script: - cargo build --release --verbose - strip target/release/parity - - mkdir -p x86_64-unknown-linux-gnu/nightly - - cp target/release/parity x86_64-unknown-linux-gnu/nigthly/parity tags: - rust - rust-nightly artifacts: paths: - - x86_64-unknown-linux-gnu/nigthly/parity + - target/release/parity name: "nigthly-x86_64-unknown-linux-gnu_parity" allow_failure: true linux-centos: @@ -86,25 +101,25 @@ linux-centos: - export CC="gcc" - cargo build --release --verbose - strip target/release/parity - - mkdir -p x86_64-unknown-linux-gnu/centos - - cp target/release/parity x86_64-unknown-linux-gnu/centos/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-unknown-centos-gnu/parity --body target/release/parity tags: - rust - rust-centos artifacts: paths: - - x86_64-unknown-linux-gnu/centos/parity - name: "centos-x86_64-unknown-linux-gnu_parity" + - target/release/parity + name: "x86_64-unknown-centos-gnu_parity" linux-armv7: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-armv7:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config @@ -112,14 +127,15 @@ linux-armv7: - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity - - mkdir -p armv7_unknown_linux_gnueabihf - - cp target/release/party armv7_unknown_linux_gnueabihf/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/armv7-unknown-linux-gnueabihf/parity --body target/armv7-unknown-linux-gnueabihf/release/parity tags: - rust - rust-arm artifacts: paths: - - armv7-unknown-linux-gnueabihf/parity + - target/armv7-unknown-linux-gnueabihf/release/parity name: "armv7_unknown_linux_gnueabihf_parity" allow_failure: true linux-arm: @@ -131,7 +147,6 @@ linux-arm: - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config @@ -139,26 +154,26 @@ linux-arm: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity - - mkdir -p arm-unknown-linux-gnueabihf - - cp target/release/parity arm-unknown-linux-gnueabihf/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabihf/parity --body target/arm-unknown-linux-gnueabihf/release/parity tags: - rust - rust-arm artifacts: paths: - - arm-unknown-linux-gnueabihf/parity + - target/arm-unknown-linux-gnueabihf/release/parity name: "arm-unknown-linux-gnueabihf_parity" allow_failure: true linux-armv6: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-armv6:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config @@ -166,26 +181,26 @@ linux-armv6: - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity - - mkdir -p arm-unknown-linux-gnueabi - - cp target/release/parity arm-unknown-linux-gnueabi/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/arm-unknown-linux-gnueabi/parity --body target/arm-unknown-linux-gnueabi/release/parity tags: - rust - rust-arm artifacts: paths: - - arm-unknown-linux-gnueabi/parity + - target/arm-unknown-linux-gnueabi/release/parity name: "arm-unknown-linux-gnueabi_parity" allow_failure: true linux-aarch64: stage: build - image: ethcore/rust-arm:latest + image: ethcore/rust-aarch64:latest only: - master - beta - tags - stable script: - - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config @@ -193,14 +208,15 @@ linux-aarch64: - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity - - mkdir -p aarch64-unknown-linux-gnu - - cp target/release/parity aarch64-unknown-linux-gnu/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/aarch64-unknown-linux-gnu/parity --body target/aarch64-unknown-linux-gnu/release/parity tags: - rust - rust-arm artifacts: paths: - - aarch64-unknown-linux-gnu/parity + - target/aarch64-unknown-linux-gnu/release/parity name: "aarch64-unknown-linux-gnu_parity" allow_failure: true darwin: @@ -212,13 +228,14 @@ darwin: - stable script: - cargo build --release --verbose - - mkdir -p x86_64-apple-darwin - - cp target/release/parity x86_64-apple-darwin/parity + - aws configure set aws_access_key_id $s3_key + - aws configure set aws_secret_access_key $s3_secret + - aws s3api put-object --bucket builds-parity --key $CI_BUILD_REF_NAME/x86_64-apple-darwin/parity --body target/release/parity tags: - osx artifacts: paths: - - x86_64-apple-darwin/parity + - target/release/parity name: "x86_64-apple-darwin_parity" windows: stage: build @@ -233,6 +250,10 @@ windows: - set RUST_BACKTRACE=1 - rustup default stable-x86_64-pc-windows-msvc - cargo build --release --verbose + - aws configure set aws_access_key_id %s3_key% + - aws configure set aws_secret_access_key %s3_secret% + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.exe + - aws s3api put-object --bucket builds-parity --key %CI_BUILD_REF_NAME%/x86_64-pc-windows-msvc/parity --body target/release/parity.pdb tags: - rust-windows artifacts: @@ -250,12 +271,3 @@ test-linux: - rust-test dependencies: - linux-stable -deploy-binaries: - stage: deploy - only: - - master - - beta - - tags - - stable - script: - - scripts/deploy.sh diff --git a/Cargo.lock b/Cargo.lock index 4b373f2f2..52bdee494 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,6 +37,8 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -281,6 +283,7 @@ dependencies = [ "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", + "evmjit 1.4.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -458,6 +461,7 @@ dependencies = [ "ethcore-io 1.4.0", "ethcore-ipc 1.4.0", "ethcore-util 1.4.0", + "ethcrypto 0.1.0", "ethjson 0.1.0", "ethkey 0.2.0", "ethstore 0.1.0", @@ -621,6 +625,13 @@ dependencies = [ "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "evmjit" +version = "1.4.0" +dependencies = [ + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "fdlimit" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 913b4c5f2..84edb6c1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,8 @@ json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.90", optional = true} ethcore-stratum = { path = "stratum" } +serde = "0.8.0" +serde_json = "0.8.0" [target.'cfg(windows)'.dependencies] winapi = "0.2" @@ -61,11 +63,13 @@ ui = ["dapps", "ethcore-signer/ui"] use-precompiled-js = ["ethcore-dapps/use-precompiled-js", "ethcore-signer/use-precompiled-js"] dapps = ["ethcore-dapps"] ipc = ["ethcore/ipc"] +jit = ["ethcore/jit"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] stratum = ["ipc"] ethkey-cli = ["ethcore/ethkey-cli"] ethstore-cli = ["ethcore/ethstore-cli"] +evm-debug = ["ethcore/evm-debug"] [[bin]] path = "parity/main.rs" diff --git a/README.md b/README.md index 26913183c..d5fb5f044 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,8 @@ Be sure to check out [our wiki][wiki-url] for more information. [gitter-image]: https://badges.gitter.im/Join%20Chat.svg [gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg -[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html -[doc-url]: http://ethcore.github.io/parity/ethcore/index.html +[license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html +[doc-url]: https://ethcore.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/ethcore/parity/wiki ---- diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index 84db93f63..9a8dfef95 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -15,23 +15,26 @@ // along with Parity. If not, see . use std::sync::Arc; -use hyper::{server, net, Decoder, Encoder, Next}; +use hyper::{server, net, Decoder, Encoder, Next, Control}; use api::types::{App, ApiError}; use api::response::{as_json, as_json_error, ping_response}; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; +use apps::fetcher::ContentFetcher; #[derive(Clone)] pub struct RestApi { local_domain: String, endpoints: Arc, + fetcher: Arc, } impl RestApi { - pub fn new(local_domain: String, endpoints: Arc) -> Box { + pub fn new(local_domain: String, endpoints: Arc, fetcher: Arc) -> Box { Box::new(RestApi { local_domain: local_domain, endpoints: endpoints, + fetcher: fetcher, }) } @@ -43,23 +46,42 @@ impl RestApi { } impl Endpoint for RestApi { - fn to_handler(&self, _path: EndpointPath) -> Box { - Box::new(RestApiRouter { - api: self.clone(), - handler: as_json_error(&ApiError { - code: "404".into(), - title: "Not Found".into(), - detail: "Resource you requested has not been found.".into(), - }), - }) + fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { + Box::new(RestApiRouter::new(self.clone(), path, control)) } } struct RestApiRouter { api: RestApi, + path: Option, + control: Option, handler: Box, } +impl RestApiRouter { + fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { + RestApiRouter { + path: Some(path), + control: Some(control), + api: api, + handler: as_json_error(&ApiError { + code: "404".into(), + title: "Not Found".into(), + detail: "Resource you requested has not been found.".into(), + }), + } + } + + fn resolve_content(&self, hash: Option<&str>, path: EndpointPath, control: Control) -> Option> { + match hash { + Some(hash) if self.api.fetcher.contains(hash) => { + Some(self.api.fetcher.to_async_handler(path, control)) + }, + _ => None + } + } +} + impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { @@ -69,13 +91,18 @@ impl server::Handler for RestApiRouter { return Next::write(); } - let url = url.expect("Check for None is above; qed"); + let url = url.expect("Check for None early-exists above; qed"); + let path = self.path.take().expect("on_request called only once, and path is always defined in new; qed"); + let control = self.control.take().expect("on_request called only once, and control is always defined in new; qed"); + let endpoint = url.path.get(1).map(|v| v.as_str()); + let hash = url.path.get(2).map(|v| v.as_str()); let handler = endpoint.and_then(|v| match v { "apps" => Some(as_json(&self.api.list_apps())), "ping" => Some(ping_response(&self.api.local_domain)), - _ => None, + "content" => self.resolve_content(hash, path, control), + _ => None }); // Overwrite default diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 51e863f19..eea7a872f 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -42,7 +42,9 @@ pub type Handler = server::Handler + Send; pub trait Endpoint : Send + Sync { fn info(&self) -> Option<&EndpointInfo> { None } - fn to_handler(&self, path: EndpointPath) -> Box; + fn to_handler(&self, _path: EndpointPath) -> Box { + panic!("This Endpoint is asynchronous and requires Control object."); + } fn to_async_handler(&self, path: EndpointPath, _control: hyper::Control) -> Box { self.to_handler(path) diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 4dcf53a44..edc0bebe5 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -196,8 +196,11 @@ impl Server { let special = Arc::new({ let mut special = HashMap::new(); special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, panic_handler.clone())); - special.insert(router::SpecialEndpoint::Api, api::RestApi::new(format!("{}", addr), endpoints.clone())); special.insert(router::SpecialEndpoint::Utils, apps::utils()); + special.insert( + router::SpecialEndpoint::Api, + api::RestApi::new(format!("{}", addr), endpoints.clone(), content_fetcher.clone()) + ); special }); let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index f54d6bf3d..e3ff6e64f 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -91,7 +91,7 @@ impl server::Handler for Router { (Some(ref path), _) if self.fetch.contains(&path.app_id) => { self.fetch.to_async_handler(path.clone(), control) }, - // Redirection to main page (maybe 404 instead?) + // 404 for non-existent content (Some(ref path), _) if *req.method() == hyper::method::Method::Get => { let address = apps::redirection_address(path.using_dapps_domains, self.main_page); Box::new(ContentHandler::error( @@ -143,7 +143,7 @@ impl Router { allowed_hosts: Option>, ) -> Self { - let handler = special.get(&SpecialEndpoint::Api).unwrap().to_handler(EndpointPath::default()); + let handler = special.get(&SpecialEndpoint::Utils).unwrap().to_handler(EndpointPath::default()); Router { control: Some(control), main_page: main_page, diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 4fbc37772..649d283ce 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -38,10 +38,6 @@ struct RpcEndpoint { } impl Endpoint for RpcEndpoint { - fn to_handler(&self, _path: EndpointPath) -> Box { - panic!("RPC Endpoint is asynchronous and requires Control object."); - } - fn to_async_handler(&self, _path: EndpointPath, control: hyper::Control) -> Box { let panic_handler = PanicHandler { handler: self.panic_handler.clone() }; Box::new(ServerHandler::new( diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index ab0d33726..fc255ec20 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use tests::helpers::{serve, request}; +use tests::helpers::{serve, serve_with_registrar, request}; #[test] fn should_return_error() { @@ -82,3 +82,24 @@ fn should_handle_ping() { assert_eq!(response.body, "0\n\n".to_owned()); } + +#[test] +fn should_try_to_resolve_dapp() { + // given + let (server, registrar) = serve_with_registrar(); + + // when + let response = request(server, + "\ + GET /api/content/1472a9e190620cdf6b31f383373e45efcfe869a820c91f9ccd7eb9fb45e4985d HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + assert_eq!(registrar.calls.lock().len(), 2); +} + diff --git a/devtools/src/http_client.rs b/devtools/src/http_client.rs index 27fa6ec50..f194c4004 100644 --- a/devtools/src/http_client.rs +++ b/devtools/src/http_client.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::time::Duration; use std::io::{Read, Write}; use std::str::{self, Lines}; use std::net::{TcpStream, SocketAddr}; @@ -43,10 +44,11 @@ pub fn read_block(lines: &mut Lines, all: bool) -> String { pub fn request(address: &SocketAddr, request: &str) -> Response { let mut req = TcpStream::connect(address).unwrap(); + req.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); req.write_all(request.as_bytes()).unwrap(); let mut response = String::new(); - req.read_to_string(&mut response).unwrap(); + let _ = req.read_to_string(&mut response); let mut lines = response.lines(); let status = lines.next().unwrap().to_owned(); diff --git a/devtools/src/random_path.rs b/devtools/src/random_path.rs index d58042512..9c6c261a2 100644 --- a/devtools/src/random_path.rs +++ b/devtools/src/random_path.rs @@ -23,7 +23,8 @@ use std::ops::{Deref, DerefMut}; use rand::random; pub struct RandomTempPath { - path: PathBuf + path: PathBuf, + pub panic_on_drop_failure: bool, } pub fn random_filename() -> String { @@ -39,7 +40,8 @@ impl RandomTempPath { let mut dir = env::temp_dir(); dir.push(random_filename()); RandomTempPath { - path: dir.clone() + path: dir.clone(), + panic_on_drop_failure: true, } } @@ -48,7 +50,8 @@ impl RandomTempPath { dir.push(random_filename()); fs::create_dir_all(dir.as_path()).unwrap(); RandomTempPath { - path: dir.clone() + path: dir.clone(), + panic_on_drop_failure: true, } } @@ -72,12 +75,20 @@ impl AsRef for RandomTempPath { self.as_path() } } +impl Deref for RandomTempPath { + type Target = Path; + fn deref(&self) -> &Self::Target { + self.as_path() + } +} impl Drop for RandomTempPath { fn drop(&mut self) { if let Err(_) = fs::remove_dir_all(&self) { if let Err(e) = fs::remove_file(&self) { - panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + if self.panic_on_drop_failure { + panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + } } } } diff --git a/ethcore/src/account_provider.rs b/ethcore/src/account_provider.rs index c2379d09e..851d015ba 100644 --- a/ethcore/src/account_provider.rs +++ b/ethcore/src/account_provider.rs @@ -322,6 +322,26 @@ impl AccountProvider { Ok(signature) } + /// Decrypts a message. Account must be unlocked. + pub fn decrypt(&self, account: Address, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let data = { + let mut unlocked = self.unlocked.lock(); + let data = try!(unlocked.get(&account).ok_or(Error::NotUnlocked)).clone(); + if let Unlock::Temp = data.unlock { + unlocked.remove(&account).expect("data exists: so key must exist: qed"); + } + if let Unlock::Timed((ref start, ref duration)) = data.unlock { + if start.elapsed() > Duration::from_millis(*duration as u64) { + unlocked.remove(&account).expect("data exists: so key must exist: qed"); + return Err(Error::NotUnlocked); + } + } + data + }; + + Ok(try!(self.sstore.decrypt(&account, &data.password, shared_mac, message))) + } + /// Unlocks an account, signs the message, and locks it again. pub fn sign_with_password(&self, account: Address, password: String, message: Message) -> Result { let signature = try!(self.sstore.sign(&account, &password, &message)); diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 88894618c..b35b4dc1a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -205,7 +205,6 @@ pub struct ClosedBlock { block: ExecutedBlock, uncle_bytes: Bytes, last_hashes: Arc, - unclosed_state: State, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -343,18 +342,19 @@ impl<'x> OpenBlock<'x> { } } - /// Turn this into a `ClosedBlock`. A `BlockChain` must be provided in order to figure out the uncles. + /// Turn this into a `ClosedBlock`. pub fn close(self) -> ClosedBlock { let mut s = self; - let unclosed_state = s.block.state.clone(); + // take a snapshot so the engine's changes can be rolled back. + s.block.state.snapshot(); s.engine.on_close_block(&mut s.block); - s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); s.block.base.header.set_state_root(s.block.state.root().clone()); - s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()))); s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); @@ -362,33 +362,37 @@ impl<'x> OpenBlock<'x> { block: s.block, uncle_bytes: uncle_bytes, last_hashes: s.last_hashes, - unclosed_state: unclosed_state, } } - /// Turn this into a `LockedBlock`. A BlockChain must be provided in order to figure out the uncles. + /// Turn this into a `LockedBlock`. pub fn close_and_lock(self) -> LockedBlock { let mut s = self; + // take a snapshot so the engine's changes can be rolled back. + s.block.state.snapshot(); + s.engine.on_close_block(&mut s.block); if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { - s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()))); } let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); if s.block.base.header.uncles_hash().is_zero() { s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); } if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP { - s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()))); } + s.block.base.header.set_state_root(s.block.state.root().clone()); s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); - LockedBlock { + ClosedBlock { block: s.block, uncle_bytes: uncle_bytes, - } + last_hashes: s.last_hashes, + }.lock() } } @@ -409,7 +413,17 @@ impl ClosedBlock { pub fn hash(&self) -> H256 { self.header().rlp_sha3(Seal::Without) } /// Turn this into a `LockedBlock`, unable to be reopened again. - pub fn lock(self) -> LockedBlock { + pub fn lock(mut self) -> LockedBlock { + // finalize the changes made by the engine. + self.block.state.clear_snapshot(); + if let Err(e) = self.block.state.commit() { + warn!("Error committing closed block's state: {:?}", e); + } + + // set the state root here, after commit recalculates with the block + // rewards. + self.block.base.header.set_state_root(self.block.state.root().clone()); + LockedBlock { block: self.block, uncle_bytes: self.uncle_bytes, @@ -417,12 +431,12 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen(self, engine: &Engine) -> OpenBlock { + pub fn reopen(mut self, engine: &Engine) -> OpenBlock { // revert rewards (i.e. set state back at last transaction's state). - let mut block = self.block; - block.state = self.unclosed_state; + self.block.state.revert_snapshot(); + OpenBlock { - block: block, + block: self.block, engine: engine, last_hashes: self.last_hashes, } diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index 2825a2a12..e8eb0ed68 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -17,14 +17,15 @@ use crypto::sha2::Sha256 as Sha256Digest; use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::digest::Digest; -use util::*; +use std::cmp::min; +use util::{U256, H256, Hashable, FixedHash, BytesRef}; use ethkey::{Signature, recover as ec_recover}; use ethjson; /// Native implementation of a built-in contract. pub trait Impl: Send + Sync { /// execute this built-in on the given input, writing to the given output. - fn execute(&self, input: &[u8], out: &mut [u8]); + fn execute(&self, input: &[u8], output: &mut BytesRef); } /// A gas pricing scheme for built-in contracts. @@ -56,7 +57,7 @@ impl Builtin { pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) } /// Simple forwarder for execute. - pub fn execute(&self, input: &[u8], output: &mut[u8]) { self.native.execute(input, output) } + pub fn execute(&self, input: &[u8], output: &mut BytesRef) { self.native.execute(input, output) } } impl From for Builtin { @@ -108,14 +109,13 @@ struct Sha256; struct Ripemd160; impl Impl for Identity { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let len = min(input.len(), output.len()); - output[..len].copy_from_slice(&input[..len]); + fn execute(&self, input: &[u8], output: &mut BytesRef) { + output.write(0, input); } } impl Impl for EcRecover { - fn execute(&self, i: &[u8], output: &mut [u8]) { + fn execute(&self, i: &[u8], output: &mut BytesRef) { let len = min(i.len(), 128); let mut input = [0; 128]; @@ -135,58 +135,34 @@ impl Impl for EcRecover { if s.is_valid() { if let Ok(p) = ec_recover(&s, &hash) { let r = p.sha3(); - - let out_len = min(output.len(), 32); - - for x in &mut output[0.. min(12, out_len)] { - *x = 0; - } - - if out_len > 12 { - output[12..out_len].copy_from_slice(&r[12..out_len]); - } + output.write(0, &[0; 12]); + output.write(12, &r[12..r.len()]); } } } } impl Impl for Sha256 { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let out_len = min(output.len(), 32); - + fn execute(&self, input: &[u8], output: &mut BytesRef) { let mut sha = Sha256Digest::new(); sha.input(input); - if out_len == 32 { - sha.result(&mut output[0..32]); - } else { - let mut out = [0; 32]; - sha.result(&mut out); + let mut out = [0; 32]; + sha.result(&mut out); - output.copy_from_slice(&out[..out_len]) - } + output.write(0, &out); } } impl Impl for Ripemd160 { - fn execute(&self, input: &[u8], output: &mut [u8]) { - let out_len = min(output.len(), 32); - + fn execute(&self, input: &[u8], output: &mut BytesRef) { let mut sha = Ripemd160Digest::new(); sha.input(input); - for x in &mut output[0.. min(12, out_len)] { - *x = 0; - } + let mut out = [0; 32]; + sha.result(&mut out[12..32]); - if out_len >= 32 { - sha.result(&mut output[12..32]); - } else if out_len > 12 { - let mut out = [0; 20]; - sha.result(&mut out); - - output.copy_from_slice(&out[12..out_len]) - } + output.write(0, &out); } } @@ -194,7 +170,7 @@ impl Impl for Ripemd160 { mod tests { use super::{Builtin, Linear, ethereum_builtin, Pricer}; use ethjson; - use util::U256; + use util::{U256, BytesRef}; #[test] fn identity() { @@ -203,15 +179,15 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o2 = [255u8; 2]; - f.execute(&i[..], &mut o2[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o2[..])); assert_eq!(i[0..2], o2); let mut o4 = [255u8; 4]; - f.execute(&i[..], &mut o4[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o4[..])); assert_eq!(i, o4); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(i, o8[..4]); assert_eq!([255u8; 4], o8[4..]); } @@ -224,16 +200,20 @@ mod tests { let i = [0u8; 0]; let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]); + + let mut ov = vec![]; + f.execute(&i[..], &mut BytesRef::Flexible(&mut ov)); + assert_eq!(&ov[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); } #[test] @@ -244,15 +224,15 @@ mod tests { let i = [0u8; 0]; let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]); } @@ -272,46 +252,46 @@ mod tests { let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i[..], &mut o[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]); let mut o8 = [255u8; 8]; - f.execute(&i[..], &mut o8[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o8[..])); assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); let mut o34 = [255u8; 34]; - f.execute(&i[..], &mut o34[..]); + f.execute(&i[..], &mut BytesRef::Fixed(&mut o34[..])); assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); // TODO: Should this (corrupted version of the above) fail rather than returning some address? /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let mut o = [255u8; 32]; - f.execute(&i_bad[..], &mut o[..]); + f.execute(&i_bad[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ } @@ -336,7 +316,7 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o = [255u8; 4]; - b.execute(&i[..], &mut o[..]); + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(i, o); } @@ -357,7 +337,7 @@ mod tests { let i = [0u8, 1, 2, 3]; let mut o = [255u8; 4]; - b.execute(&i[..], &mut o[..]); + b.execute(&i[..], &mut BytesRef::Fixed(&mut o[..])); assert_eq!(i, o); } -} \ No newline at end of file +} diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 863130699..445ec37f7 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -144,7 +144,9 @@ pub struct Client { factories: Factories, } -const HISTORY: u64 = 1200; +/// The pruning constant -- how old blocks must be before we +/// assume finality of a given candidate. +pub const HISTORY: u64 = 1200; /// Append a path element to the given path and return the string. pub fn append_path

(path: P, item: &str) -> String where P: AsRef { @@ -168,7 +170,7 @@ impl Client { let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); - let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); + let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { @@ -685,7 +687,7 @@ impl snapshot::DatabaseRestore for Client { *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); - *tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); + *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); Ok(()) } } @@ -957,7 +959,7 @@ impl BlockChainClient for Client { } } - fn logs(&self, filter: Filter, limit: Option) -> Vec { + fn logs(&self, filter: Filter) -> Vec { let blocks = filter.bloom_possibilities().iter() .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .flat_map(|m| m) @@ -966,7 +968,7 @@ impl BlockChainClient for Client { .into_iter() .collect::>(); - self.chain.read().logs(blocks, |entry| filter.matches(entry), limit) + self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit) } fn filter_traces(&self, filter: TraceFilter) -> Option> { diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index bb70de6cd..0146293df 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -18,7 +18,7 @@ use std::str::FromStr; pub use std::time::Duration; pub use block_queue::BlockQueueConfig; pub use blockchain::Config as BlockChainConfig; -pub use trace::{Config as TraceConfig, Switch}; +pub use trace::Config as TraceConfig; pub use evm::VMType; pub use verification::VerifierType; use util::{journaldb, CompactionProfile}; @@ -102,7 +102,7 @@ pub struct ClientConfig { /// State db compaction profile pub db_compaction: DatabaseCompactionProfile, /// Should db have WAL enabled? - pub db_wal: bool, + pub db_wal: bool, /// Operating mode pub mode: Mode, /// Type of block verifier used by client. diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 32582ddf2..a5ff89c47 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -23,7 +23,7 @@ mod trace; mod client; pub use self::client::*; -pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType}; +pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType}; pub use self::error::Error; pub use types::ids::*; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 0c0a443d6..c0d7e35ba 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -67,6 +67,8 @@ pub struct TestBlockChainClient { pub execution_result: RwLock>>, /// Transaction receipts. pub receipts: RwLock>, + /// Logs + pub logs: RwLock>, /// Block queue size. pub queue_size: AtomicUsize, /// Miner @@ -114,6 +116,7 @@ impl TestBlockChainClient { code: RwLock::new(HashMap::new()), execution_result: RwLock::new(None), receipts: RwLock::new(HashMap::new()), + logs: RwLock::new(Vec::new()), queue_size: AtomicUsize::new(0), miner: Arc::new(Miner::with_spec(&spec)), spec: spec, @@ -165,6 +168,11 @@ impl TestBlockChainClient { *self.latest_block_timestamp.write() = ts; } + /// Set logs to return for each logs call. + pub fn set_logs(&self, logs: Vec) { + *self.logs.write() = logs; + } + /// Add blocks to test client. pub fn add_blocks(&self, count: usize, with: EachBlockWith) { let len = self.numbers.read().len(); @@ -390,8 +398,13 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn logs(&self, _filter: Filter, _limit: Option) -> Vec { - Vec::new() + fn logs(&self, filter: Filter) -> Vec { + let mut logs = self.logs.read().clone(); + let len = logs.len(); + match filter.limit { + Some(limit) if limit <= len => logs.split_off(len - limit), + _ => logs, + } } fn last_hashes(&self) -> LastHashes { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index f262aabbd..45f7322fd 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -156,7 +156,7 @@ pub trait BlockChainClient : Sync + Send { fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option>; /// Returns logs matching given filter. - fn logs(&self, filter: Filter, limit: Option) -> Vec; + fn logs(&self, filter: Filter) -> Vec; /// Makes a non-persistent transaction call. fn call(&self, t: &SignedTransaction, block: BlockID, analytics: CallAnalytics) -> Result; @@ -215,8 +215,11 @@ pub trait BlockChainClient : Sync + Send { /// Extended client interface used for mining pub trait MiningBlockChainClient : BlockChainClient { /// Returns OpenBlock prepared for closing. - fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) - -> OpenBlock; + fn prepare_open_block(&self, + author: Address, + gas_range_target: (U256, U256), + extra_data: Bytes + ) -> OpenBlock; /// Returns EvmFactory. fn vm_factory(&self) -> &EvmFactory; diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 599721f86..5234553ef 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -45,7 +45,7 @@ pub trait Engine : Sync + Send { fn extra_info(&self, _header: &Header) -> HashMap { HashMap::new() } /// Additional information. - fn additional_params(&self) -> HashMap { HashMap::new() } + fn additional_params(&self) -> HashMap { HashMap::new() } /// Get the general parameters of the chain. fn params(&self) -> &CommonParams; @@ -126,7 +126,7 @@ pub trait Engine : Sync + Send { fn cost_of_builtin(&self, a: &Address, input: &[u8]) -> U256 { self.builtins().get(a).unwrap().cost(input.len()) } /// Execution the builtin contract `a` on `input` and return `output`. /// Panics if `is_builtin(a)` is not true. - fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut [u8]) { self.builtins().get(a).unwrap().execute(input, output); } + fn execute_builtin(&self, a: &Address, input: &[u8], output: &mut BytesRef) { self.builtins().get(a).unwrap().execute(input, output); } // TODO: sealing stuff - though might want to leave this for later. } diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 216ebed17..734acb758 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -167,9 +167,7 @@ impl Engine for Ethash { for u in fields.uncles.iter() { fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); } - if let Err(e) = fields.state.commit() { - warn!("Encountered error on state commit: {}", e); - } + } fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { diff --git a/ethcore/src/evm/evm.rs b/ethcore/src/evm/evm.rs index 813819250..8d2202480 100644 --- a/ethcore/src/evm/evm.rs +++ b/ethcore/src/evm/evm.rs @@ -113,7 +113,10 @@ impl<'a> Finalize for Result> { } /// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256 -pub trait CostType: ops::Mul + ops::Div + ops::Add + ops::Sub + ops::Shr + ops::Shl + cmp::Ord + Sized + From + Copy { +pub trait CostType: Sized + From + Copy + + ops::Mul + ops::Div + ops::Add +ops::Sub + + ops::Shr + ops::Shl + + cmp::Ord + fmt::Debug { /// Converts this cost into `U256` fn as_u256(&self) -> U256; /// Tries to fit `U256` into this `Cost` type diff --git a/ethcore/src/evm/ext.rs b/ethcore/src/evm/ext.rs index 0db789507..2bbc7035b 100644 --- a/ethcore/src/evm/ext.rs +++ b/ethcore/src/evm/ext.rs @@ -83,6 +83,9 @@ pub trait Ext { /// Returns code at given address fn extcode(&self, address: &Address) -> Bytes; + /// Returns code size at given address + fn extcodesize(&self, address: &Address) -> usize; + /// Creates log entry with given topics and data fn log(&mut self, topics: Vec, data: &[u8]); diff --git a/ethcore/src/evm/interpreter/mod.rs b/ethcore/src/evm/interpreter/mod.rs index d1b9b18bc..ad2d5cd34 100644 --- a/ethcore/src/evm/interpreter/mod.rs +++ b/ethcore/src/evm/interpreter/mod.rs @@ -53,6 +53,17 @@ fn color(instruction: Instruction, name: &'static str) -> String { type CodePosition = usize; type ProgramCounter = usize; +const ONE: U256 = U256([1, 0, 0, 0]); +const TWO: U256 = U256([2, 0, 0, 0]); +const TWO_POW_5: U256 = U256([0x20, 0, 0, 0]); +const TWO_POW_8: U256 = U256([0x100, 0, 0, 0]); +const TWO_POW_16: U256 = U256([0x10000, 0, 0, 0]); +const TWO_POW_24: U256 = U256([0x1000000, 0, 0, 0]); +const TWO_POW_64: U256 = U256([0, 0x1, 0, 0]); // 0x1 00000000 00000000 +const TWO_POW_96: U256 = U256([0, 0x100000000, 0, 0]); //0x1 00000000 00000000 00000000 +const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000 + /// Abstraction over raw vector of Bytes. Easier state management of PC. struct CodeReader<'a> { position: ProgramCounter, @@ -126,7 +137,7 @@ impl evm::Evm for Interpreter { gasometer.current_gas = gasometer.current_gas - gas_cost; evm_debug!({ - println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}", + println!("[0x{:x}][{}(0x{:x}) Gas: {:?}\n Gas Before: {:?}", reader.position, color(instruction, info.name), instruction, @@ -471,7 +482,7 @@ impl Interpreter { }, instructions::EXTCODESIZE => { let address = u256_to_address(&stack.pop_back()); - let len = ext.extcode(&address).len(); + let len = ext.extcodesize(&address); stack.push(U256::from(len)); }, instructions::CALLDATACOPY => { @@ -599,7 +610,19 @@ impl Interpreter { let a = stack.pop_back(); let b = stack.pop_back(); stack.push(if !self.is_zero(&b) { - a.overflowing_div(b).0 + match b { + ONE => a, + TWO => a >> 1, + TWO_POW_5 => a >> 5, + TWO_POW_8 => a >> 8, + TWO_POW_16 => a >> 16, + TWO_POW_24 => a >> 24, + TWO_POW_64 => a >> 64, + TWO_POW_96 => a >> 96, + TWO_POW_224 => a >> 224, + TWO_POW_248 => a >> 248, + _ => a.overflowing_div(b).0, + } } else { U256::zero() }); diff --git a/ethcore/src/evm/jit.rs b/ethcore/src/evm/jit.rs index 4f43d327b..c62f87ab7 100644 --- a/ethcore/src/evm/jit.rs +++ b/ethcore/src/evm/jit.rs @@ -18,6 +18,7 @@ use common::*; use evmjit; use evm::{self, GasLeft}; +use types::executed::CallType; /// Should be used to convert jit types to ethcore trait FromJit: Sized { @@ -77,10 +78,11 @@ impl IntoJit for U256 { impl IntoJit for H256 { fn into_jit(self) -> evmjit::I256 { let mut ret = [0; 4]; - for i in 0..self.bytes().len() { - let rev = self.bytes().len() - 1 - i; + let len = self.len(); + for i in 0..len { + let rev = len - 1 - i; let pos = rev / 8; - ret[pos] += (self.bytes()[i] as u64) << ((rev % 8) * 8); + ret[pos] += (self[i] as u64) << ((rev % 8) * 8); } evmjit::I256 { words: ret } } @@ -206,6 +208,7 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { let sender_address = unsafe { Address::from_jit(&*sender_address) }; let receive_address = unsafe { Address::from_jit(&*receive_address) }; let code_address = unsafe { Address::from_jit(&*code_address) }; + // TODO Is it always safe in case of DELEGATE_CALL? let transfer_value = unsafe { U256::from_jit(&*transfer_value) }; let value = Some(transfer_value); @@ -239,6 +242,12 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { } } + // TODO [ToDr] Any way to detect DelegateCall? + let call_type = match is_callcode { + true => CallType::CallCode, + false => CallType::Call, + }; + match self.ext.call( &call_gas, &sender_address, @@ -246,7 +255,9 @@ impl<'a> evmjit::Ext for ExtAdapter<'a> { value, unsafe { slice::from_raw_parts(in_beg, in_size as usize) }, &code_address, - unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }) { + unsafe { slice::from_raw_parts_mut(out_beg, out_size as usize) }, + call_type, + ) { evm::MessageCallResult::Success(gas_left) => unsafe { *io_gas = (gas + gas_left).low_u64(); true diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index bdb1f1ddb..ec217b6c5 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -140,6 +140,10 @@ impl Ext for FakeExt { self.codes.get(address).unwrap_or(&Bytes::new()).clone() } + fn extcodesize(&self, address: &Address) -> usize { + self.codes.get(address).map(|v| v.len()).unwrap_or(0) + } + fn log(&mut self, topics: Vec, data: &[u8]) { self.logs.push(FakeLogEntry { topics: topics, diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 332eda190..8f8b534ee 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -193,7 +193,6 @@ impl<'a> Executive<'a> { data: Some(t.data.clone()), call_type: CallType::Call, }; - // TODO: move output upstream let mut out = vec![]; (self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out) } diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 09c4b4e11..7395522c3 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -205,6 +205,11 @@ impl<'a, T, V> Ext for Externalities<'a, T, V> where T: 'a + Tracer, V: 'a + VMT self.state.code(address).unwrap_or_else(|| vec![]) } + fn extcodesize(&self, address: &Address) -> usize { + self.state.code_size(address).unwrap_or(0) + } + + #[cfg_attr(feature="dev", allow(match_ref_pats))] fn ret(mut self, gas: &U256, data: &[u8]) -> evm::Result where Self: Sized { diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 7304f5931..1fe98acdb 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -131,6 +131,10 @@ impl<'a, T, V> Ext for TestExt<'a, T, V> where T: Tracer, V: VMTracer { self.ext.extcode(address) } + fn extcodesize(&self, address: &Address) -> usize { + self.ext.extcodesize(address) + } + fn log(&mut self, topics: Vec, data: &[u8]) { self.ext.log(topics, data) } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 73c1c9cf9..182234280 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -292,6 +292,7 @@ impl Miner { }; let mut invalid_transactions = HashSet::new(); + let mut transactions_to_penalize = HashSet::new(); let block_number = open_block.block().fields().header.number(); // TODO: push new uncles, too. for tx in transactions { @@ -299,6 +300,12 @@ impl Miner { match open_block.push_transaction(tx, None) { Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, gas })) => { debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?} (limit: {:?}, used: {:?}, gas: {:?})", hash, gas_limit, gas_used, gas); + + // Penalize transaction if it's above current gas limit + if gas > gas_limit { + transactions_to_penalize.insert(hash); + } + // Exit early if gas left is smaller then min_tx_gas let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly. if gas_limit - gas_used < min_tx_gas { @@ -334,6 +341,9 @@ impl Miner { for hash in invalid_transactions.into_iter() { queue.remove_invalid(&hash, &fetch_account); } + for hash in transactions_to_penalize { + queue.penalize(&hash); + } } (block, original_work_hash) } diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index af054aa98..7db65eacb 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -134,6 +134,8 @@ struct TransactionOrder { hash: H256, /// Origin of the transaction origin: TransactionOrigin, + /// Penalties + penalties: usize, } @@ -144,6 +146,7 @@ impl TransactionOrder { gas_price: tx.transaction.gas_price, hash: tx.hash(), origin: tx.origin, + penalties: 0, } } @@ -151,6 +154,11 @@ impl TransactionOrder { self.nonce_height = nonce - base_nonce; self } + + fn penalize(mut self) -> Self { + self.penalties = self.penalties.saturating_add(1); + self + } } impl Eq for TransactionOrder {} @@ -167,6 +175,11 @@ impl PartialOrd for TransactionOrder { impl Ord for TransactionOrder { fn cmp(&self, b: &TransactionOrder) -> Ordering { + // First check number of penalties + if self.penalties != b.penalties { + return self.penalties.cmp(&b.penalties); + } + // First check nonce_height if self.nonce_height != b.nonce_height { return self.nonce_height.cmp(&b.nonce_height); @@ -387,7 +400,7 @@ pub struct AccountDetails { } /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. -const GAS_LIMIT_HYSTERESIS: usize = 10; // % +const GAS_LIMIT_HYSTERESIS: usize = 10; // (100/GAS_LIMIT_HYSTERESIS) % /// `TransactionQueue` implementation pub struct TransactionQueue { @@ -506,8 +519,6 @@ impl TransactionQueue { pub fn add(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result where T: Fn(&Address) -> AccountDetails { - trace!(target: "txqueue", "Importing: {:?}", tx.hash()); - if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local { trace!(target: "txqueue", "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", @@ -593,6 +604,39 @@ impl TransactionQueue { assert_eq!(self.future.by_priority.len() + self.current.by_priority.len(), self.by_hash.len()); } + /// Penalize transactions from sender of transaction with given hash. + /// I.e. it should change the priority of the transaction in the queue. + /// + /// NOTE: We need to penalize all transactions from particular sender + /// to avoid breaking invariants in queue (ordered by nonces). + /// Consecutive transactions from this sender would fail otherwise (because of invalid nonce). + pub fn penalize(&mut self, transaction_hash: &H256) { + let transaction = match self.by_hash.get(transaction_hash) { + None => return, + Some(t) => t, + }; + let sender = transaction.sender(); + + // Penalize all transactions from this sender + let nonces_from_sender = match self.current.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in nonces_from_sender { + let order = self.current.drop(&sender, &k).unwrap(); + self.current.insert(sender, k, order.penalize()); + } + // Same thing for future + let nonces_from_sender = match self.future.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in nonces_from_sender { + let order = self.future.drop(&sender, &k).unwrap(); + self.current.insert(sender, k, order.penalize()); + } + } + /// Removes invalid transaction identified by hash from queue. /// Assumption is that this transaction nonce is not related to client nonce, /// so transactions left in queue are processed according to client nonce. @@ -764,6 +808,7 @@ impl TransactionQueue { let address = tx.sender(); let nonce = tx.nonce(); + let hash = tx.hash(); let next_nonce = self.last_nonces .get(&address) @@ -785,6 +830,9 @@ impl TransactionQueue { try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); // Return an error if this transaction is not imported because of limit. try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); + + debug!(target: "txqueue", "Importing transaction to future: {:?}", hash); + debug!(target: "txqueue", "status: {:?}", self.status()); return Ok(TransactionImportResult::Future); } try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); @@ -811,7 +859,8 @@ impl TransactionQueue { // Trigger error if the transaction we are importing was removed. try!(check_if_removed(&address, &nonce, removed)); - trace!(target: "txqueue", "status: {:?}", self.status()); + debug!(target: "txqueue", "Imported transaction to current: {:?}", hash); + debug!(target: "txqueue", "status: {:?}", self.status()); Ok(TransactionImportResult::Current) } @@ -945,6 +994,17 @@ mod test { (tx1.sign(secret), tx2.sign(secret)) } + /// Returns two consecutive transactions, both with increased gas price + fn new_tx_pair_with_gas_price_increment(gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { + let gas = default_gas_price() + gas_price_increment; + let tx1 = new_unsigned_tx(default_nonce(), gas); + let tx2 = new_unsigned_tx(default_nonce() + 1.into(), gas); + + let keypair = Random.generate().unwrap(); + let secret = &keypair.secret(); + (tx1.sign(secret), tx2.sign(secret)) + } + fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment) } @@ -1332,6 +1392,39 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_penalize_transactions_from_sender() { + // given + let mut txq = TransactionQueue::new(); + // txa, txb - slightly bigger gas price to have consistent ordering + let (txa, txb) = new_tx_pair_default(1.into(), 0.into()); + let (tx1, tx2) = new_tx_pair_with_gas_price_increment(3.into()); + + // insert everything + txq.add(txa.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(txb.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + + let top = txq.top_transactions(); + assert_eq!(top[0], tx1); + assert_eq!(top[1], txa); + assert_eq!(top[2], tx2); + assert_eq!(top[3], txb); + assert_eq!(top.len(), 4); + + // when + txq.penalize(&tx1.hash()); + + // then + let top = txq.top_transactions(); + assert_eq!(top[0], txa); + assert_eq!(top[1], txb); + assert_eq!(top[2], tx1); + assert_eq!(top[3], tx2); + assert_eq!(top.len(), 4); + } + #[test] fn should_return_pending_hashes() { // given diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 16c59db2e..8cfc4c96b 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -92,7 +92,8 @@ impl Account { let mut pairs = Vec::new(); - for (k, v) in db.iter() { + for item in try!(db.iter()) { + let (k, v) = try!(item); pairs.push((k, v)); } diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index 05b3281c8..4f7f912ca 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -21,10 +21,10 @@ use header::Header; use views::BlockView; use rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; -use rlp::{Compressible, RlpType}; use util::{Bytes, Hashable, H256}; +use util::triehash::ordered_trie_root; -const HEADER_FIELDS: usize = 10; +const HEADER_FIELDS: usize = 8; const BLOCK_FIELDS: usize = 2; pub struct AbridgedBlock { @@ -61,8 +61,6 @@ impl AbridgedBlock { stream .append(&header.author()) .append(&header.state_root()) - .append(&header.transactions_root()) - .append(&header.receipts_root()) .append(&header.log_bloom()) .append(&header.difficulty()) .append(&header.gas_limit()) @@ -79,33 +77,35 @@ impl AbridgedBlock { } AbridgedBlock { - rlp: UntrustedRlp::new(stream.as_raw()).compress(RlpType::Blocks).to_vec(), + rlp: stream.out(), } } /// Flesh out an abridged block view with the provided parent hash and block number. /// /// Will fail if contains invalid rlp. - pub fn to_block(&self, parent_hash: H256, number: u64) -> Result { - let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks); - let rlp = UntrustedRlp::new(&rlp); + pub fn to_block(&self, parent_hash: H256, number: u64, receipts_root: H256) -> Result { + let rlp = UntrustedRlp::new(&self.rlp); let mut header: Header = Default::default(); header.set_parent_hash(parent_hash); header.set_author(try!(rlp.val_at(0))); header.set_state_root(try!(rlp.val_at(1))); - header.set_transactions_root(try!(rlp.val_at(2))); - header.set_receipts_root(try!(rlp.val_at(3))); - header.set_log_bloom(try!(rlp.val_at(4))); - header.set_difficulty(try!(rlp.val_at(5))); + header.set_log_bloom(try!(rlp.val_at(2))); + header.set_difficulty(try!(rlp.val_at(3))); header.set_number(number); - header.set_gas_limit(try!(rlp.val_at(6))); - header.set_gas_used(try!(rlp.val_at(7))); - header.set_timestamp(try!(rlp.val_at(8))); - header.set_extra_data(try!(rlp.val_at(9))); + header.set_gas_limit(try!(rlp.val_at(4))); + header.set_gas_used(try!(rlp.val_at(5))); + header.set_timestamp(try!(rlp.val_at(6))); + header.set_extra_data(try!(rlp.val_at(7))); - let transactions = try!(rlp.val_at(10)); - let uncles: Vec

= try!(rlp.val_at(11)); + let transactions = try!(rlp.val_at(8)); + let uncles: Vec
= try!(rlp.val_at(9)); + + header.set_transactions_root(ordered_trie_root( + try!(rlp.at(8)).iter().map(|r| r.as_raw().to_owned()) + )); + header.set_receipts_root(receipts_root); let mut uncles_rlp = RlpStream::new(); uncles_rlp.append(&uncles); @@ -143,20 +143,22 @@ mod tests { #[test] fn empty_block_abridging() { let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); - assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); } #[test] #[should_panic] fn wrong_number() { let b = Block::default(); + let receipts_root = b.header.receipts_root().clone(); let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded)); - assert_eq!(abridged.to_block(H256::new(), 2).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 2, receipts_root).unwrap(), b); } #[test] @@ -184,9 +186,14 @@ mod tests { b.transactions.push(t1); b.transactions.push(t2); + let receipts_root = b.header.receipts_root().clone(); + b.header.set_transactions_root(::util::triehash::ordered_trie_root( + b.transactions.iter().map(::rlp::encode).map(|out| out.to_vec()) + )); + let encoded = encode_block(&b); let abridged = AbridgedBlock::from_block_view(&BlockView::new(&encoded[..])); - assert_eq!(abridged.to_block(H256::new(), 0).unwrap(), b); + assert_eq!(abridged.to_block(H256::new(), 0, receipts_root).unwrap(), b); } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 43622fc51..2074f8174 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -15,6 +15,9 @@ // along with Parity. If not, see . //! Snapshot creation, restoration, and network service. +//! +//! Documentation of the format can be found at +//! https://github.com/ethcore/parity/wiki/%22PV64%22-Snapshot-Format use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; @@ -34,7 +37,7 @@ use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::Database; use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; use util::sha3::SHA3_NULL_RLP; -use rlp::{RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; +use rlp::{RlpStream, Stream, UntrustedRlp, View}; use self::account::Account; use self::block::AbridgedBlock; @@ -358,15 +361,15 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex Result { use basic_types::Seal::With; use util::U256; + use util::triehash::ordered_trie_root; let rlp = UntrustedRlp::new(chunk); let item_count = rlp.item_count(); @@ -585,7 +588,11 @@ impl BlockRebuilder { let abridged_rlp = try!(pair.at(0)).as_raw().to_owned(); let abridged_block = AbridgedBlock::from_raw(abridged_rlp); let receipts: Vec<::receipt::Receipt> = try!(pair.val_at(1)); - let block = try!(abridged_block.to_block(parent_hash, cur_number)); + let receipts_root = ordered_trie_root( + try!(pair.at(1)).iter().map(|r| r.as_raw().to_owned()) + ); + + let block = try!(abridged_block.to_block(parent_hash, cur_number, receipts_root)); let block_bytes = block.rlp_bytes(With); if self.rng.gen::() <= POW_VERIFY_RATE { diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 78a065958..5243a4792 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -27,7 +27,7 @@ use super::{ManifestData, StateRebuilder, BlockRebuilder, RestorationStatus, Sna use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; -use client::Client; +use client::{BlockChainClient, Client}; use engines::Engine; use error::Error; use ids::BlockID; @@ -345,7 +345,17 @@ impl Service { let res = client.take_snapshot(writer, BlockID::Number(num), &self.progress); self.taking_snapshot.store(false, Ordering::SeqCst); - try!(res); + if let Err(e) = res { + if client.chain_info().best_block_number >= num + ::client::HISTORY { + // "Cancelled" is mincing words a bit -- what really happened + // is that the state we were snapshotting got pruned out + // before we could finish. + info!("Cancelled prematurely-started periodic snapshot."); + return Ok(()) + } else { + return Err(e); + } + } info!("Finished taking snapshot at #{}", num); diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 42fb68220..cb928346e 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -52,8 +52,9 @@ impl StateProducer { // modify existing accounts. let mut accounts_to_modify: Vec<_> = { let trie = TrieDB::new(&*db, &self.state_root).unwrap(); - let temp = trie.iter() // binding required due to complicated lifetime stuff + let temp = trie.iter().unwrap() // binding required due to complicated lifetime stuff .filter(|_| rng.gen::() < ACCOUNT_CHURN) + .map(Result::unwrap) .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) .collect(); diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 8c7e49ba3..e1299d1dc 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -191,10 +191,16 @@ impl State { })) } - /// Mutate storage of account `a` so that it is `value` for `key`. + /// Get the code of account `a`. pub fn code(&self, a: &Address) -> Option { self.ensure_cached(a, true, - |a| a.as_ref().map_or(None, |a|a.code().map(|x|x.to_vec()))) + |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.to_vec()))) + } + + /// Get the code size of account `a`. + pub fn code_size(&self, a: &Address) -> Option { + self.ensure_cached(a, true, + |a| a.as_ref().map_or(None, |a| a.code().map(|x| x.len()))) } /// Add `incr` to the balance of account `a`. @@ -420,10 +426,27 @@ impl fmt::Debug for State { impl Clone for State { fn clone(&self) -> State { + let cache = { + let mut cache = HashMap::new(); + for (key, val) in self.cache.borrow().iter() { + let key = key.clone(); + match *val { + Some(ref acc) if acc.is_dirty() => { + cache.insert(key, Some(acc.clone())); + }, + None => { + cache.insert(key, None); + }, + _ => {}, + } + } + cache + }; + State { db: self.db.boxed_clone(), root: self.root.clone(), - cache: RefCell::new(self.cache.borrow().clone()), + cache: RefCell::new(cache), snapshots: RefCell::new(self.snapshots.borrow().clone()), account_start_nonce: self.account_start_nonce.clone(), factories: self.factories.clone(), diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index b10b56d95..dc95e8267 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -142,7 +142,8 @@ fn returns_logs() { to_block: BlockID::Latest, address: None, topics: vec![], - }, None); + limit: None, + }); assert_eq!(logs.len(), 0); } @@ -156,7 +157,8 @@ fn returns_logs_with_limit() { to_block: BlockID::Latest, address: None, topics: vec![], - }, Some(2)); + limit: Some(2), + }); assert_eq!(logs.len(), 0); } diff --git a/ethcore/src/trace/config.rs b/ethcore/src/trace/config.rs index ff96cea74..9dab7524d 100644 --- a/ethcore/src/trace/config.rs +++ b/ethcore/src/trace/config.rs @@ -15,57 +15,14 @@ // along with Parity. If not, see . //! Traces config. -use std::str::FromStr; use bloomchain::Config as BloomConfig; -use trace::Error; - -/// 3-value enum. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Switch { - /// True. - On, - /// False. - Off, - /// Auto. - Auto, -} - -impl Default for Switch { - fn default() -> Self { - Switch::Auto - } -} - -impl FromStr for Switch { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "on" => Ok(Switch::On), - "off" => Ok(Switch::Off), - "auto" => Ok(Switch::Auto), - other => Err(format!("Invalid switch value: {}", other)) - } - } -} - -impl Switch { - /// Tries to turn old switch to new value. - pub fn turn_to(&self, to: Switch) -> Result { - match (*self, to) { - (Switch::On, Switch::On) | (Switch::On, Switch::Auto) | (Switch::Auto, Switch::On) => Ok(true), - (Switch::Off, Switch::On) => Err(Error::ResyncRequired), - _ => Ok(false), - } - } -} /// Traces config. #[derive(Debug, PartialEq, Clone)] pub struct Config { /// Indicates if tracing should be enabled or not. /// If it's None, it will be automatically configured. - pub enabled: Switch, + pub enabled: bool, /// Traces blooms configuration. pub blooms: BloomConfig, /// Preferef cache-size. @@ -77,7 +34,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: Switch::default(), + enabled: false, blooms: BloomConfig { levels: 3, elements_per_index: 16, @@ -87,20 +44,3 @@ impl Default for Config { } } } - -#[cfg(test)] -mod tests { - use super::Switch; - - #[test] - fn test_switch_parsing() { - assert_eq!(Switch::On, "on".parse().unwrap()); - assert_eq!(Switch::Off, "off".parse().unwrap()); - assert_eq!(Switch::Auto, "auto".parse().unwrap()); - } - - #[test] - fn test_switch_default() { - assert_eq!(Switch::default(), Switch::Auto); - } -} diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index e7bd7c825..b608ad685 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -22,7 +22,7 @@ use bloomchain::{Number, Config as BloomConfig}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf}; use header::BlockNumber; -use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; +use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; use blooms; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; @@ -126,38 +126,20 @@ impl BloomGroupDatabase for TraceDB where T: DatabaseExtras { impl TraceDB where T: DatabaseExtras { /// Creates new instance of `TraceDB`. - pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Result { - // check if in previously tracing was enabled - let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() { - Some(ref value) if value as &[u8] == &[0x1] => Switch::On, - Some(ref value) if value as &[u8] == &[0x0] => Switch::Off, - Some(_) => { panic!("tracesdb is corrupted") }, - None => Switch::Auto, - }; - - let enabled = try!(old_tracing.turn_to(config.enabled)); - - let encoded_tracing = match enabled { - true => [0x1], - false => [0x0] - }; - + pub fn new(config: Config, tracesdb: Arc, extras: Arc) -> Self { let mut batch = DBTransaction::new(&tracesdb); - batch.put(db::COL_TRACE, b"enabled", &encoded_tracing); batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); tracesdb.write(batch).unwrap(); - let db = TraceDB { + TraceDB { traces: RwLock::new(HashMap::new()), blooms: RwLock::new(HashMap::new()), cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), tracesdb: tracesdb, bloom_config: config.blooms, - enabled: enabled, + enabled: config.enabled, extras: extras, - }; - - Ok(db) + } } fn cache_size(&self) -> usize { @@ -419,7 +401,7 @@ mod tests { use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction}; use devtools::RandomTempPath; use header::BlockNumber; - use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; + use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; use trace::trace::{Call, Action, Res}; use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; @@ -474,22 +456,10 @@ mod tests { let mut config = Config::default(); // set autotracing - config.enabled = Switch::Auto; + config.enabled = false; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); assert_eq!(tracedb.tracing_enabled(), false); } } @@ -501,50 +471,12 @@ mod tests { let mut config = Config::default(); // set tracing on - config.enabled = Switch::On; + config.enabled = true; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)); assert_eq!(tracedb.tracing_enabled(), true); } - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::Auto; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), false); - } - } - - #[test] - #[should_panic] - fn test_invalid_reopening_db() { - let temp = RandomTempPath::new(); - let db = new_db(temp.as_str()); - let mut config = Config::default(); - - // set tracing on - config.enabled = Switch::Off; - - { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); - assert_eq!(tracedb.tracing_enabled(), true); - } - - config.enabled = Switch::On; - TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic! } fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { @@ -595,7 +527,7 @@ mod tests { let temp = RandomTempPath::new(); let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap()); let mut config = Config::default(); - config.enabled = Switch::On; + config.enabled = true; let block_0 = H256::from(0xa1); let block_1 = H256::from(0xa2); let tx_0 = H256::from(0xff); @@ -607,7 +539,7 @@ mod tests { extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]); - let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap(); + let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)); // import block 0 let request = create_simple_import_request(0, block_0.clone()); @@ -679,10 +611,10 @@ mod tests { extras.transaction_hashes.insert(0, vec![tx_0.clone()]); // set tracing on - config.enabled = Switch::On; + config.enabled = true; { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())); // import block 0 let request = create_simple_import_request(0, block_0.clone()); @@ -692,7 +624,7 @@ mod tests { } { - let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap(); + let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)); let traces = tracedb.transaction_traces(0, 0); assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]); } diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 06604450f..da3bbc02b 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -26,7 +26,7 @@ mod noop_tracer; pub use types::trace_types::{filter, flat, localized, trace}; pub use types::trace_types::error::Error as TraceError; -pub use self::config::{Config, Switch}; +pub use self::config::Config; pub use self::db::TraceDB; pub use self::error::Error; pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; diff --git a/ethcore/src/types/filter.rs b/ethcore/src/types/filter.rs index 91338899f..6274d63f4 100644 --- a/ethcore/src/types/filter.rs +++ b/ethcore/src/types/filter.rs @@ -41,6 +41,12 @@ pub struct Filter { /// If None, match all. /// If specified, log must contain one of these topics. pub topics: Vec>>, + + /// Logs limit + /// + /// If None, return all logs + /// If specified, should only return *last* `n` logs. + pub limit: Option, } impl Clone for Filter { @@ -59,7 +65,8 @@ impl Clone for Filter { from_block: self.from_block.clone(), to_block: self.to_block.clone(), address: self.address.clone(), - topics: topics[..].to_vec() + topics: topics[..].to_vec(), + limit: self.limit, } } } @@ -117,6 +124,7 @@ mod tests { to_block: BlockID::Latest, address: None, topics: vec![None, None, None, None], + limit: None, }; let possibilities = none_filter.bloom_possibilities(); @@ -136,7 +144,8 @@ mod tests { None, None, None, - ] + ], + limit: None, }; let possibilities = filter.bloom_possibilities(); @@ -154,7 +163,8 @@ mod tests { Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), None, None, - ] + ], + limit: None, }; let possibilities = filter.bloom_possibilities(); @@ -181,7 +191,8 @@ mod tests { ]), Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23f9".into()]), None - ] + ], + limit: None, }; // number of possibilites should be equal 2 * 2 * 2 * 1 = 8 @@ -201,7 +212,8 @@ mod tests { Some(vec!["ff74e91598aed6ae5d2fdcf8b24cd2c7be49a0808112a305069355b7160f23fa".into()]), None, None, - ] + ], + limit: None, }; let entry0 = LogEntry { diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index f7e582f11..386b85f7e 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -21,7 +21,7 @@ use std::cell::*; use rlp::*; use util::sha3::Hashable; use util::{H256, Address, U256, Bytes}; -use ethkey::{Signature, sign, Secret, recover, public_to_address, Error as EthkeyError}; +use ethkey::{Signature, sign, Secret, Public, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; use header::BlockNumber; @@ -305,13 +305,18 @@ impl SignedTransaction { match sender { Some(s) => Ok(s), None => { - let s = public_to_address(&try!(recover(&self.signature(), &self.unsigned.hash()))); + let s = public_to_address(&try!(self.public_key())); self.sender.set(Some(s)); Ok(s) } } } + /// Returns the public key of the sender. + pub fn public_key(&self) -> Result { + Ok(try!(recover(&self.signature(), &self.unsigned.hash()))) + } + /// Do basic validation, checking for valid signature and minimum gas, // TODO: consider use in block validation. #[cfg(test)] diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 86d3623c2..4e1305a33 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -215,7 +215,7 @@ fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> { fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: &H256) -> Result<(), Error> { let block = UntrustedRlp::new(block); let tx = try!(block.at(1)); - let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here + let expected_root = &ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here if expected_root != transactions_root { return Err(From::from(BlockError::InvalidTransactionsRoot(Mismatch { expected: expected_root.clone(), found: transactions_root.clone() }))) } @@ -422,7 +422,7 @@ mod tests { let mut uncles_rlp = RlpStream::new(); uncles_rlp.append(&good_uncles); let good_uncles_hash = uncles_rlp.as_raw().sha3(); - let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t).to_vec()).collect()); + let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| ::rlp::encode::(t).to_vec())); let mut parent = good.clone(); parent.set_number(9); diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index 680718d12..7a1aba48c 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -22,6 +22,7 @@ extern crate crypto as rcrypto; extern crate secp256k1; extern crate ethkey; +use std::fmt; use tiny_keccak::Keccak; use rcrypto::pbkdf2::pbkdf2; use rcrypto::scrypt::{scrypt, ScryptParams}; @@ -39,6 +40,17 @@ pub enum Error { InvalidMessage, } +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let s = match *self { + Error::Secp(ref err) => err.to_string(), + Error::InvalidMessage => "Invalid message".into(), + }; + + write!(f, "{}", s) + } +} + impl From for Error { fn from(e: SecpError) -> Self { Error::Secp(e) diff --git a/ethkey/Cargo.toml b/ethkey/Cargo.toml index 8e95d8519..319a38b20 100644 --- a/ethkey/Cargo.toml +++ b/ethkey/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "ethkey" version = "0.2.0" -authors = ["debris "] +authors = ["Ethcore "] [dependencies] rand = "0.3.14" diff --git a/ethstore/src/account/safe_account.rs b/ethstore/src/account/safe_account.rs index 56e8494f7..38069a718 100644 --- a/ethstore/src/account/safe_account.rs +++ b/ethstore/src/account/safe_account.rs @@ -16,7 +16,7 @@ use ethkey::{KeyPair, sign, Address, Secret, Signature, Message}; use {json, Error, crypto}; -use crypto::Keccak256; +use crypto::{Keccak256}; use random::Random; use account::{Version, Cipher, Kdf, Aes128Ctr, Pbkdf2, Prf}; @@ -170,6 +170,11 @@ impl SafeAccount { sign(&secret, message).map_err(From::from) } + pub fn decrypt(&self, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let secret = try!(self.crypto.secret(password)); + crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from) + } + pub fn change_password(&self, old_password: &str, new_password: &str, iterations: u32) -> Result { let secret = try!(self.crypto.secret(old_password)); let result = SafeAccount { diff --git a/ethstore/src/error.rs b/ethstore/src/error.rs index a5d3de745..cee689b24 100644 --- a/ethstore/src/error.rs +++ b/ethstore/src/error.rs @@ -17,6 +17,7 @@ use std::fmt; use std::io::Error as IoError; use ethkey::Error as EthKeyError; +use crypto::Error as EthCryptoError; #[derive(Debug)] pub enum Error { @@ -28,6 +29,7 @@ pub enum Error { InvalidKeyFile(String), CreationFailed, EthKey(EthKeyError), + EthCrypto(EthCryptoError), Custom(String), } @@ -42,6 +44,7 @@ impl fmt::Display for Error { Error::InvalidKeyFile(ref reason) => format!("Invalid key file: {}", reason), Error::CreationFailed => "Account creation failed".into(), Error::EthKey(ref err) => err.to_string(), + Error::EthCrypto(ref err) => err.to_string(), Error::Custom(ref s) => s.clone(), }; @@ -60,3 +63,9 @@ impl From for Error { Error::EthKey(err) } } + +impl From for Error { + fn from(err: EthCryptoError) -> Self { + Error::EthCrypto(err) + } +} diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index 29f4c757c..8de988b9a 100644 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -144,6 +144,11 @@ impl SecretStore for EthStore { account.sign(password, message) } + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let account = try!(self.get(account)); + account.decrypt(password, shared_mac, message) + } + fn uuid(&self, address: &Address) -> Result { let account = try!(self.get(address)); Ok(account.id.into()) diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 90ed79fb5..aa79cb8b6 100644 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -20,33 +20,24 @@ use json::UUID; pub trait SecretStore: Send + Sync { fn insert_account(&self, secret: Secret, password: &str) -> Result; - fn import_presale(&self, json: &[u8], password: &str) -> Result; - fn import_wallet(&self, json: &[u8], password: &str) -> Result; - - fn accounts(&self) -> Result, Error>; - fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>; - fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>; fn sign(&self, account: &Address, password: &str, message: &Message) -> Result; + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; + fn accounts(&self) -> Result, Error>; fn uuid(&self, account: &Address) -> Result; - fn name(&self, account: &Address) -> Result; - fn meta(&self, account: &Address) -> Result; fn set_name(&self, address: &Address, name: String) -> Result<(), Error>; - fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error>; fn local_path(&self) -> String; - fn list_geth_accounts(&self, testnet: bool) -> Vec
; - fn import_geth_accounts(&self, desired: Vec
, testnet: bool) -> Result, Error>; } diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 72215ca59..3dfdac804 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -26,15 +26,16 @@ use io::{PanicHandler, ForwardPanic}; use util::{ToPretty, Uint}; use rlp::PayloadInfo; use ethcore::service::ClientService; -use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; +use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID}; use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; use informant::{Informant, MillisecondDuration}; use io_handler::ImportIoHandler; -use params::{SpecType, Pruning}; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; +use user_defaults::UserDefaults; use fdlimit; #[derive(Debug, PartialEq)] @@ -113,29 +114,44 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + // load spec file let spec = try!(cmd.spec.spec()); // load genesis hash let genesis_hash = spec.genesis_header().hash(); - // Setup logging - let _logger = setup_log(&cmd.logger_config); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref()); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); // build client let service = try!(ClientService::start( @@ -220,6 +236,12 @@ fn execute_import(cmd: ImportBlockchain) -> Result { } } client.flush_queue(); + + // save user defaults + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + try!(user_defaults.save(&user_defaults_path)); + let report = client.report(); let ms = timer.elapsed().as_milliseconds(); @@ -238,6 +260,12 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); + // Setup logging + let _logger = setup_log(&cmd.logger_config); + + // create dirs used by parity + try!(cmd.dirs.create_dirs()); + let format = cmd.format.unwrap_or_default(); // load spec file @@ -246,23 +274,32 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // load genesis hash let genesis_hash = spec.genesis_header().hash(); - // Setup logging - let _logger = setup_log(&cmd.logger_config); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index aa7b460db..b8b10ec1d 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -32,6 +32,8 @@ usage! { cmd_snapshot: bool, cmd_restore: bool, cmd_ui: bool, + cmd_tools: bool, + cmd_hash: bool, // Arguments arg_pid_file: String, @@ -441,6 +443,8 @@ mod tests { cmd_snapshot: false, cmd_restore: false, cmd_ui: false, + cmd_tools: false, + cmd_hash: false, // Arguments arg_pid_file: "".into(), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 1749cd4b7..a94f55a8d 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -14,6 +14,7 @@ Usage: parity signer new-token [options] parity snapshot [options] parity restore [ ] [options] + parity tools hash Operating Options: --mode MODE Set the operating mode. MODE can be one of: @@ -283,4 +284,3 @@ Miscellaneous Options: --no-color Don't use terminal color codes in output. (default: {flag_no_color}) -v --version Show information about version. -h --help Show this screen. - diff --git a/parity/configuration.rs b/parity/configuration.rs index 6ee89eb35..1aa338c26 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -51,6 +51,7 @@ pub enum Cmd { Blockchain(BlockchainCmd), SignerToken(String), Snapshot(SnapshotCommand), + Hash(Option), } #[derive(Debug, PartialEq)] @@ -94,8 +95,10 @@ impl Configuration { let cmd = if self.args.flag_version { Cmd::Version - } else if self.args.cmd_signer { + } else if self.args.cmd_signer && self.args.cmd_new_token { Cmd::SignerToken(dirs.signer) + } else if self.args.cmd_tools && self.args.cmd_hash { + Cmd::Hash(self.args.arg_file) } else if self.args.cmd_account { let account_cmd = if self.args.cmd_new { let new_acc = NewAccount { diff --git a/parity/dir.rs b/parity/dir.rs index d31e81e2c..158b5b2c5 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -52,32 +52,13 @@ impl Directories { Ok(()) } - /// Get the chain's root path. - pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = Path::new(&self.db).to_path_buf(); - dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); - dir - } - - /// Get the root path for database - pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { - let mut dir = self.chain_path(genesis_hash, fork_name); - dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); - dir - } - - /// Get the path for the databases given the genesis_hash and information on the databases. - pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf { - let mut dir = self.db_version_path(genesis_hash, fork_name, pruning); - dir.push("db"); - dir - } - - /// Get the path for the snapshot directory given the genesis hash and fork name. - pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { - let mut dir = self.chain_path(genesis_hash, fork_name); - dir.push("snapshot"); - dir + /// Database paths. + pub fn database(&self, genesis_hash: H256, fork_name: Option) -> DatabaseDirectories { + DatabaseDirectories { + path: self.db.clone(), + genesis_hash: genesis_hash, + fork_name: fork_name, + } } /// Get the ipc sockets path @@ -88,6 +69,49 @@ impl Directories { } } +#[derive(Debug, PartialEq)] +pub struct DatabaseDirectories { + pub path: String, + pub genesis_hash: H256, + pub fork_name: Option, +} + +impl DatabaseDirectories { + fn fork_path(&self) -> PathBuf { + let mut dir = Path::new(&self.path).to_path_buf(); + dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())); + dir + } + + /// Get the root path for database + pub fn version_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.fork_path(); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + dir + } + + /// Get the path for the databases given the genesis_hash and information on the databases. + pub fn client_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.version_path(pruning); + dir.push("db"); + dir + } + + /// Get user defaults path + pub fn user_defaults_path(&self) -> PathBuf { + let mut dir = self.fork_path(); + dir.push("user_defaults"); + dir + } + + /// Get the path for the snapshot directory given the genesis hash and fork name. + pub fn snapshot_path(&self) -> PathBuf { + let mut dir = self.fork_path(); + dir.push("snapshot"); + dir + } +} + #[cfg(test)] mod tests { use super::Directories; diff --git a/parity/helpers.rs b/parity/helpers.rs index 778dc1265..0649e7fe9 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -19,13 +19,12 @@ use std::io::{Write, Read, BufReader, BufRead}; use std::time::Duration; use std::path::Path; use std::fs::File; -use util::{clean_0x, U256, Uint, Address, path, H256, CompactionProfile}; +use util::{clean_0x, U256, Uint, Address, path, CompactionProfile}; use util::journaldb::Algorithm; -use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; +use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::miner::PendingSet; use cache::CacheConfig; -use dir::Directories; -use params::Pruning; +use dir::DatabaseDirectories; use upgrade::upgrade; use migration::migrate; use ethsync::is_valid_node_url; @@ -190,16 +189,13 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration { #[cfg_attr(feature = "dev", allow(too_many_arguments))] pub fn to_client_config( cache_config: &CacheConfig, - dirs: &Directories, - genesis_hash: H256, mode: Mode, - tracing: Switch, - pruning: Pruning, + tracing: bool, compaction: DatabaseCompactionProfile, wal: bool, vm_type: VMType, name: String, - fork_name: Option<&String>, + pruning: Algorithm, ) -> ClientConfig { let mut client_config = ClientConfig::default(); @@ -221,7 +217,7 @@ pub fn to_client_config( client_config.mode = mode; client_config.tracing.enabled = tracing; - client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); + client_config.pruning = pruning; client_config.db_compaction = compaction; client_config.db_wal = wal; client_config.vm_type = vm_type; @@ -230,14 +226,12 @@ pub fn to_client_config( } pub fn execute_upgrades( - dirs: &Directories, - genesis_hash: H256, - fork_name: Option<&String>, + dirs: &DatabaseDirectories, pruning: Algorithm, compaction_profile: CompactionProfile ) -> Result<(), String> { - match upgrade(Some(&dirs.db)) { + match upgrade(Some(&dirs.path)) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); }, @@ -247,7 +241,7 @@ pub fn execute_upgrades( _ => {}, } - let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning); + let client_path = dirs.version_path(pruning); migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) } diff --git a/parity/main.rs b/parity/main.rs index 8cb348958..b74af7b3d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -39,6 +39,8 @@ extern crate semver; extern crate ethcore_io as io; extern crate ethcore_ipc as ipc; extern crate ethcore_ipc_nano as nanoipc; +extern crate serde; +extern crate serde_json; extern crate rlp; extern crate json_ipc_server as jsonipc; @@ -106,15 +108,29 @@ mod run; mod sync; #[cfg(feature="ipc")] mod boot; +mod user_defaults; #[cfg(feature="stratum")] mod stratum; use std::{process, env}; +use std::io::BufReader; +use std::fs::File; +use util::sha3::sha3; use cli::Args; use configuration::{Cmd, Configuration}; use deprecated::find_deprecated; +fn print_hash_of(maybe_file: Option) -> Result { + if let Some(file) = maybe_file { + let mut f = BufReader::new(try!(File::open(&file).map_err(|_| "Unable to open file".to_owned()))); + let hash = try!(sha3(&mut f).map_err(|_| "Unable to read from file".to_owned())); + Ok(hash.hex()) + } else { + Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) + } +} + fn execute(command: Cmd) -> Result { match command { Cmd::Run(run_cmd) => { @@ -122,6 +138,7 @@ fn execute(command: Cmd) -> Result { Ok("".into()) }, Cmd::Version => Ok(Args::print_version()), + Cmd::Hash(maybe_file) => print_hash_of(maybe_file), Cmd::Account(account_cmd) => account::execute(account_cmd), Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), diff --git a/parity/params.rs b/parity/params.rs index c67520aa1..71f702cfb 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; -use std::fs; +use std::{str, fs}; use std::time::Duration; -use util::{H256, Address, U256, version_data}; +use util::{Address, U256, version_data}; use util::journaldb::Algorithm; use ethcore::spec::Spec; use ethcore::ethereum; use ethcore::miner::{GasPricer, GasPriceCalibratorOptions}; -use dir::Directories; +use user_defaults::UserDefaults; #[derive(Debug, PartialEq)] pub enum SpecType { @@ -39,7 +38,7 @@ impl Default for SpecType { } } -impl FromStr for SpecType { +impl str::FromStr for SpecType { type Err = String; fn from_str(s: &str) -> Result { @@ -81,7 +80,7 @@ impl Default for Pruning { } } -impl FromStr for Pruning { +impl str::FromStr for Pruning { type Err = String; fn from_str(s: &str) -> Result { @@ -93,24 +92,12 @@ impl FromStr for Pruning { } impl Pruning { - pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { + pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm { match *self { Pruning::Specific(algo) => algo, - Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name), + Pruning::Auto => user_defaults.pruning, } } - - fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { - let mut algo_types = Algorithm::all_types(); - // if all dbs have the same modification time, the last element is the default one - algo_types.push(Algorithm::default()); - - algo_types.into_iter().max_by_key(|i| { - let mut client_path = dirs.client_path(genesis_hash, fork_name, *i); - client_path.push("CURRENT"); - fs::metadata(&client_path).and_then(|m| m.modified()).ok() - }).unwrap() - } } #[derive(Debug, PartialEq)] @@ -128,7 +115,7 @@ impl Default for ResealPolicy { } } -impl FromStr for ResealPolicy { +impl str::FromStr for ResealPolicy { type Err = String; fn from_str(s: &str) -> Result { @@ -223,10 +210,50 @@ impl Default for MinerExtras { } } +/// 3-value enum. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Switch { + /// True. + On, + /// False. + Off, + /// Auto. + Auto, +} + +impl Default for Switch { + fn default() -> Self { + Switch::Auto + } +} + +impl str::FromStr for Switch { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "on" => Ok(Switch::On), + "off" => Ok(Switch::Off), + "auto" => Ok(Switch::Auto), + other => Err(format!("Invalid switch value: {}", other)) + } + } +} + +pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> Result { + match (user_defaults.is_first_launch, switch, user_defaults.tracing) { + (false, Switch::On, false) => Err("TraceDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + } +} + #[cfg(test)] mod tests { use util::journaldb::Algorithm; - use super::{SpecType, Pruning, ResealPolicy}; + use user_defaults::UserDefaults; + use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool}; #[test] fn test_spec_type_parsing() { @@ -274,4 +301,36 @@ mod tests { let all = ResealPolicy { own: true, external: true }; assert_eq!(all, ResealPolicy::default()); } + + #[test] + fn test_switch_parsing() { + assert_eq!(Switch::On, "on".parse().unwrap()); + assert_eq!(Switch::Off, "off".parse().unwrap()); + assert_eq!(Switch::Auto, "auto".parse().unwrap()); + } + + #[test] + fn test_switch_default() { + assert_eq!(Switch::default(), Switch::Auto); + } + + fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults { + let mut ud = UserDefaults::default(); + ud.is_first_launch = first_launch; + ud.tracing = tracing; + ud + } + + #[test] + fn test_switch_to_bool() { + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap()); + assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)).unwrap()); + + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap()); + assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err()); + } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index 624c6b3f4..29b33b844 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -25,7 +25,7 @@ use ethcore::client::Client; use ethcore::account_provider::AccountProvider; use ethsync::{ManageNetwork, SyncProvider}; use ethcore_rpc::{Extendable, NetworkSettings}; -pub use ethcore_rpc::ConfirmationsQueue; +pub use ethcore_rpc::SignerService; #[derive(Debug, PartialEq, Clone, Eq, Hash)] @@ -94,7 +94,7 @@ impl FromStr for ApiSet { pub struct Dependencies { pub signer_port: Option, - pub signer_queue: Arc, + pub signer_service: Arc, pub client: Arc, pub sync: Arc, pub net: Arc, @@ -173,7 +173,7 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(filter_client.to_delegate()); if deps.signer_port.is_some() { - server.add_delegate(EthSigningQueueClient::new(&deps.signer_queue, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); + server.add_delegate(EthSigningQueueClient::new(&deps.signer_service, &deps.client, &deps.miner, &deps.secret_store).to_delegate()); } else { server.add_delegate(EthSigningUnsafeClient::new(&deps.client, &deps.secret_store, &deps.miner).to_delegate()); } @@ -182,11 +182,11 @@ pub fn setup_rpc(server: T, deps: Arc, apis: ApiSet server.add_delegate(PersonalClient::new(&deps.secret_store, &deps.client, &deps.miner, deps.signer_port, deps.geth_compatibility).to_delegate()); }, Api::Signer => { - server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_queue).to_delegate()); + server.add_delegate(SignerClient::new(&deps.secret_store, &deps.client, &deps.miner, &deps.signer_service).to_delegate()); }, Api::Ethcore => { - let queue = deps.signer_port.map(|_| deps.signer_queue.clone()); - server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), queue).to_delegate()) + let signer = deps.signer_port.map(|_| deps.signer_service.clone()); + server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, &deps.sync, &deps.net_service, deps.logger.clone(), deps.settings.clone(), signer).to_delegate()) }, Api::EthcoreSet => { server.add_delegate(EthcoreSetClient::new(&deps.client, &deps.miner, &deps.net_service).to_delegate()) diff --git a/parity/run.rs b/parity/run.rs index cefd8bb21..e95b5c9f5 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -23,7 +23,7 @@ use ethcore_rpc::NetworkSettings; use ethsync::NetworkConfiguration; use util::{Colour, version, U256}; use io::{MayPanic, ForwardPanic, PanicHandler}; -use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify}; +use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, ChainNotify}; use ethcore::service::ClientService; use ethcore::account_provider::AccountProvider; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; @@ -35,10 +35,11 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; use signer::SignerServer; use dapps::WebappServer; use io_handler::ClientIoHandler; -use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras}; +use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool}; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use dir::Directories; use cache::CacheConfig; +use user_defaults::UserDefaults; use dapps; use signer; use modules; @@ -87,34 +88,45 @@ pub struct RunCmd { } pub fn execute(cmd: RunCmd) -> Result<(), String> { - // increase max number of open files - raise_fd_limit(); + // set up panic handler + let panic_handler = PanicHandler::new_in_arc(); // set up logger let logger = try!(setup_log(&cmd.logger_config)); - // set up panic handler - let panic_handler = PanicHandler::new_in_arc(); + // increase max number of open files + raise_fd_limit(); // create dirs used by parity try!(cmd.dirs.create_dirs()); // load spec let spec = try!(cmd.spec.spec()); - let fork_name = spec.fork_name.clone(); // load genesis hash let genesis_hash = spec.genesis_header().hash(); + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); // prepare client and snapshot paths. - let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); - let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref()); + let client_path = db_dirs.client_path(algorithm); + let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // run in daemon mode if let Some(pid_file) = cmd.daemon { @@ -152,16 +164,13 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // create client config let client_config = to_client_config( &cmd.cache_config, - &cmd.dirs, - genesis_hash, cmd.mode, - cmd.tracing, - cmd.pruning, + tracing, cmd.compaction, cmd.wal, cmd.vm_type, cmd.name, - fork_name.as_ref(), + algorithm, ); // set up bootnodes @@ -206,9 +215,10 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { } // set up dependencies for rpc servers + let signer_path = cmd.signer_conf.signer_path.clone(); let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { signer_port: cmd.signer_port, - signer_queue: Arc::new(rpc_apis::ConfirmationsQueue::default()), + signer_service: Arc::new(rpc_apis::SignerService::new(move || signer::new_token(signer_path.clone()))), client: client.clone(), sync: sync_provider.clone(), net: manage_network.clone(), @@ -287,6 +297,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port)); } + // save user defaults + user_defaults.pruning = algorithm; + user_defaults.tracing = tracing; + try!(user_defaults.save(&user_defaults_path)); + // Handle exit wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); diff --git a/parity/signer.rs b/parity/signer.rs index e6924dcef..b60bc7211 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -90,7 +90,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result. + +use std::fs::File; +use std::io::Write; +use std::path::Path; +use std::collections::BTreeMap; +use serde::{Serialize, Serializer, Error, Deserialize, Deserializer}; +use serde::de::{Visitor, MapVisitor}; +use serde::de::impls::BTreeMapVisitor; +use serde_json::Value; +use serde_json::de::from_reader; +use serde_json::ser::to_string; +use util::journaldb::Algorithm; + +pub struct UserDefaults { + pub is_first_launch: bool, + pub pruning: Algorithm, + pub tracing: bool, +} + +impl Serialize for UserDefaults { + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + where S: Serializer { + let mut map: BTreeMap = BTreeMap::new(); + map.insert("pruning".into(), Value::String(self.pruning.as_str().into())); + map.insert("tracing".into(), Value::Bool(self.tracing)); + map.serialize(serializer) + } +} + +struct UserDefaultsVisitor; + +impl Deserialize for UserDefaults { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer { + deserializer.deserialize(UserDefaultsVisitor) + } +} + +impl Visitor for UserDefaultsVisitor { + type Value = UserDefaults; + + fn visit_map(&mut self, visitor: V) -> Result + where V: MapVisitor { + let mut map: BTreeMap = try!(BTreeMapVisitor::new().visit_map(visitor)); + let pruning: Value = try!(map.remove("pruning".into()).ok_or_else(|| Error::custom("missing pruning"))); + let pruning = try!(pruning.as_str().ok_or_else(|| Error::custom("invalid pruning value"))); + let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method"))); + let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing"))); + let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value"))); + + let user_defaults = UserDefaults { + is_first_launch: false, + pruning: pruning, + tracing: tracing, + }; + + Ok(user_defaults) + } +} + +impl Default for UserDefaults { + fn default() -> Self { + UserDefaults { + is_first_launch: true, + pruning: Algorithm::default(), + tracing: false, + } + } +} + +impl UserDefaults { + pub fn load

(path: P) -> Result where P: AsRef { + match File::open(path) { + Ok(file) => from_reader(file).map_err(|e| e.to_string()), + _ => Ok(UserDefaults::default()), + } + } + + pub fn save

(self, path: P) -> Result<(), String> where P: AsRef { + let mut file: File = try!(File::create(path).map_err(|_| "Cannot create user defaults file".to_owned())); + file.write_all(to_string(&self).unwrap().as_bytes()).map_err(|_| "Failed to save user defaults".to_owned()) + } +} diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 440e41fc9..c3f9cddbd 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -17,6 +17,7 @@ jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc-http-server.gi ethcore-io = { path = "../util/io" } ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } +ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } ethstore = { path = "../ethstore" } ethash = { path = "../ethash" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 17d1837ae..7f2f11400 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -28,6 +28,7 @@ extern crate jsonrpc_http_server; extern crate ethcore_io as io; extern crate ethcore; extern crate ethkey; +extern crate ethcrypto as crypto; extern crate ethstore; extern crate ethsync; extern crate transient_hashmap; @@ -53,7 +54,7 @@ use self::jsonrpc_core::{IoHandler, IoDelegate}; pub use jsonrpc_http_server::{ServerBuilder, Server, RpcServerError}; pub mod v1; -pub use v1::{SigningQueue, ConfirmationsQueue, NetworkSettings}; +pub use v1::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings}; /// An object that can be extended with `IoDelegates` pub trait Extendable { diff --git a/rpc/src/v1/helpers/auto_args.rs b/rpc/src/v1/helpers/auto_args.rs new file mode 100644 index 000000000..c7deb0436 --- /dev/null +++ b/rpc/src/v1/helpers/auto_args.rs @@ -0,0 +1,171 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Automatically serialize and deserialize parameters around a strongly-typed function. + +// because we reuse the type names as idents in the macros as a dirty hack to +// work around `concat_idents!` being unstable. +#![allow(non_snake_case)] + +use super::errors; + +use jsonrpc_core::{Error, Params, Value, from_params, to_value}; +use serde::{Serialize, Deserialize}; + +/// Auto-generates an RPC trait from trait definition. +/// +/// This just copies out all the methods, docs, and adds another +/// function `to_delegate` which will automatically wrap each strongly-typed +/// function in a wrapper which handles parameter and output type serialization. +/// +/// Every function must have a `#[name("rpc_nameHere")]` attribute after +/// its documentation, and no other attributes. All function names are +/// allowed except for `to_delegate`, which is auto-generated. +macro_rules! build_rpc_trait { + ( + $(#[$t_attr: meta])* + pub trait $name: ident { + $( + $(#[doc=$m_doc: expr])* #[name($rpc_name: expr)] + fn $method: ident (&self $(, $param: ty)*) -> $out: ty; + )* + } + ) => { + $(#[$t_attr])* + pub trait $name: Sized + Send + Sync + 'static { + $( + $(#[doc=$m_doc])* + fn $method(&self $(, $param)*) -> $out; + )* + + /// Transform this into an `IoDelegate`, automatically wrapping + /// the parameters. + fn to_delegate(self) -> ::jsonrpc_core::IoDelegate { + let mut del = ::jsonrpc_core::IoDelegate::new(self.into()); + $( + del.add_method($rpc_name, move |base, params| { + ($name::$method as fn(&_ $(, $param)*) -> $out).wrap_rpc(base, params) + }); + )* + del + } + } + } +} + +/// A wrapper type without an implementation of `Deserialize` +/// which allows a special implementation of `Wrap` for functions +/// that take a trailing default parameter. +pub struct Trailing(pub T); + +/// Wrapper trait for synchronous RPC functions. +pub trait Wrap { + fn wrap_rpc(&self, base: &B, params: Params) -> Result; +} + +// special impl for no parameters. +impl Wrap for fn(&B) -> Result + where B: Send + Sync + 'static, OUT: Serialize +{ + fn wrap_rpc(&self, base: &B, params: Params) -> Result { + ::v1::helpers::params::expect_no_params(params) + .and_then(|()| (self)(base)) + .map(to_value) + } +} + +// creates a wrapper implementation which deserializes the parameters, +// calls the function with concrete type, and serializes the output. +macro_rules! wrap { + ($($x: ident),+) => { + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + > Wrap for fn(&BASE, $($x,)+) -> Result { + fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { + from_params::<($($x,)+)>(params).and_then(|($($x,)+)| { + (self)(base, $($x,)+) + }).map(to_value) + } + } + } +} + +// special impl for no parameters other than block parameter. +impl Wrap for fn(&B, Trailing) -> Result + where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize +{ + fn wrap_rpc(&self, base: &B, params: Params) -> Result { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return Err(errors::invalid_params("not an array", "")), + }; + + let (id,) = match len { + 0 => (T::default(),), + 1 => try!(from_params::<(T,)>(params)), + _ => return Err(Error::invalid_params()), + }; + + (self)(base, Trailing(id)).map(to_value) + } +} + +// similar to `wrap!`, but handles a single default trailing parameter +// accepts an additional argument indicating the number of non-trailing parameters. +macro_rules! wrap_with_trailing { + ($num: expr, $($x: ident),+) => { + impl < + BASE: Send + Sync + 'static, + OUT: Serialize, + $($x: Deserialize,)+ + TRAILING: Default + Deserialize, + > Wrap for fn(&BASE, $($x,)+ Trailing) -> Result { + fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { + let len = match params { + Params::Array(ref v) => v.len(), + Params::None => 0, + _ => return Err(errors::invalid_params("not an array", "")), + }; + + let params = match len - $num { + 0 => from_params::<($($x,)+)>(params) + .map(|($($x,)+)| ($($x,)+ TRAILING::default())), + 1 => from_params::<($($x,)+ TRAILING)>(params) + .map(|($($x,)+ id)| ($($x,)+ id)), + _ => Err(Error::invalid_params()), + }; + + let ($($x,)+ id) = try!(params); + (self)(base, $($x,)+ Trailing(id)).map(to_value) + } + } + } +} + +wrap!(A, B, C, D, E); +wrap!(A, B, C, D); +wrap!(A, B, C); +wrap!(A, B); +wrap!(A); + +wrap_with_trailing!(5, A, B, C, D, E); +wrap_with_trailing!(4, A, B, C, D); +wrap_with_trailing!(3, A, B, C); +wrap_with_trailing!(2, A, B); +wrap_with_trailing!(1, A); \ No newline at end of file diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 5587673d8..df2d8cbd3 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -41,7 +41,7 @@ fn prepare_transaction(client: &C, miner: &M, request: TransactionRequest) } } -pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result +pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result where C: MiningBlockChainClient, M: MinerService { let hash = RpcH256::from(signed_transaction.hash()); @@ -49,7 +49,7 @@ pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: Sig import .map_err(errors::from_transaction_error) - .map(|_| to_value(&hash)) + .map(|_| hash) } pub fn signature_with_password(accounts: &AccountProvider, address: Address, hash: H256, pass: String) -> Result { @@ -70,7 +70,7 @@ pub fn unlock_sign_and_dispatch(client: &C, miner: &M, request: Transactio }; trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty()); - dispatch_transaction(&*client, &*miner, signed_transaction) + dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value) } pub fn sign_and_dispatch(client: &C, miner: &M, request: TransactionRequest, account_provider: &AccountProvider, address: Address) -> Result @@ -84,7 +84,7 @@ pub fn sign_and_dispatch(client: &C, miner: &M, request: TransactionReques }; trace!(target: "miner", "send_transaction: dispatching tx: {}", ::rlp::encode(&signed_transaction).to_vec().pretty()); - dispatch_transaction(&*client, &*miner, signed_transaction) + dispatch_transaction(&*client, &*miner, signed_transaction).map(to_value) } pub fn default_gas_price(client: &C, miner: &M) -> U256 where C: MiningBlockChainClient, M: MinerService { diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 8a11f2466..18e369208 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -139,6 +139,13 @@ pub fn no_author() -> Error { } } +pub fn token(e: String) -> Error { + Error { + code: ErrorCode::ServerError(codes::UNKNOWN_ERROR), + message: "There was an error when saving your authorization tokens.".into(), + data: Some(Value::String(e)), + } +} pub fn signer_disabled() -> Error { Error { diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index d71eaac41..e6ada3379 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -14,18 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +#[macro_use] +pub mod auto_args; + #[macro_use] pub mod errors; + pub mod dispatch; pub mod params; + mod poll_manager; mod poll_filter; mod requests; +mod signer; mod signing_queue; mod network_settings; pub use self::poll_manager::PollManager; -pub use self::poll_filter::PollFilter; +pub use self::poll_filter::{PollFilter, limit_logs}; pub use self::requests::{TransactionRequest, FilledTransactionRequest, ConfirmationRequest, ConfirmationPayload, CallRequest}; pub use self::signing_queue::{ConfirmationsQueue, ConfirmationPromise, ConfirmationResult, SigningQueue, QueueEvent}; +pub use self::signer::SignerService; pub use self::network_settings::NetworkSettings; diff --git a/rpc/src/v1/helpers/params.rs b/rpc/src/v1/helpers/params.rs index 4a6abf542..f56c500fc 100644 --- a/rpc/src/v1/helpers/params.rs +++ b/rpc/src/v1/helpers/params.rs @@ -36,14 +36,6 @@ pub fn params_len(params: &Params) -> usize { } } -/// Deserialize request parameters with optional second parameter `BlockNumber` defaulting to `BlockNumber::Latest`. -pub fn from_params_default_second(params: Params) -> Result<(F, BlockNumber, ), Error> where F: serde::de::Deserialize { - match params_len(¶ms) { - 1 => from_params::<(F, )>(params).map(|(f,)| (f, BlockNumber::Latest)), - _ => from_params::<(F, BlockNumber)>(params), - } -} - /// Deserialize request parameters with optional third parameter `BlockNumber` defaulting to `BlockNumber::Latest`. pub fn from_params_default_third(params: Params) -> Result<(F1, F2, BlockNumber, ), Error> where F1: serde::de::Deserialize, F2: serde::de::Deserialize { match params_len(¶ms) { diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 31bbf47fe..faae75c98 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -13,6 +13,15 @@ pub enum PollFilter { Block(BlockNumber), /// Hashes of all transactions which client was notified about. PendingTransaction(Vec), - /// Number of From block number, pending logs and log filter iself. + /// Number of From block number, pending logs and log filter itself. Logs(BlockNumber, HashSet, Filter) } + +/// Returns only last `n` logs +pub fn limit_logs(mut logs: Vec, limit: Option) -> Vec { + let len = logs.len(); + match limit { + Some(limit) if len >= limit => logs.split_off(len - limit), + _ => logs, + } +} diff --git a/rpc/src/v1/helpers/signer.rs b/rpc/src/v1/helpers/signer.rs new file mode 100644 index 000000000..2cebc8261 --- /dev/null +++ b/rpc/src/v1/helpers/signer.rs @@ -0,0 +1,61 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::ops::Deref; +use v1::helpers::signing_queue::{ConfirmationsQueue}; + +/// Manages communication with Signer crate +pub struct SignerService { + queue: Arc, + generate_new_token: Box Result + Send + Sync + 'static>, +} + +impl SignerService { + + /// Creates new Signer Service given function to generate new tokens. + pub fn new(new_token: F) -> Self + where F: Fn() -> Result + Send + Sync + 'static { + SignerService { + queue: Arc::new(ConfirmationsQueue::default()), + generate_new_token: Box::new(new_token), + } + } + + /// Generates new token. + pub fn generate_token(&self) -> Result { + (self.generate_new_token)() + } + + /// Returns a reference to `ConfirmationsQueue` + pub fn queue(&self) -> Arc { + self.queue.clone() + } + + #[cfg(test)] + /// Creates new Signer Service for tests. + pub fn new_test() -> Self { + SignerService::new(|| Ok("new_token".into())) + } +} + +impl Deref for SignerService { + type Target = ConfirmationsQueue; + fn deref(&self) -> &Self::Target { + &self.queue + } +} + diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index d3bf68735..755539ebd 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -42,10 +42,14 @@ use ethcore::log_entry::LogEntry; use ethcore::filter::Filter as EthcoreFilter; use self::ethash::SeedHashCompute; use v1::traits::Eth; -use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256}; -use v1::helpers::{CallRequest as CRequest, errors}; +use v1::types::{ + Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, + Transaction, CallRequest, Index, Filter, Log, Receipt, Work, + H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256, +}; +use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; use v1::helpers::dispatch::{default_gas_price, dispatch_transaction}; -use v1::helpers::params::{expect_no_params, params_len, from_params_default_second, from_params_default_third}; +use v1::helpers::auto_args::Trailing; /// Eth RPC options pub struct EthClientOptions { @@ -100,7 +104,7 @@ impl EthClient where } } - fn block(&self, id: BlockID, include_txs: bool) -> Result { + fn block(&self, id: BlockID, include_txs: bool) -> Result, Error> { let client = take_weak!(self.client); match (client.block(id.clone()), client.block_total_difficulty(id)) { (Some(bytes), Some(total_difficulty)) => { @@ -131,28 +135,28 @@ impl EthClient where }, extra_data: Bytes::new(view.extra_data()) }; - Ok(to_value(&block)) + Ok(Some(block)) }, - _ => Ok(Value::Null) + _ => Ok(None) } } - fn transaction(&self, id: TransactionID) -> Result { + fn transaction(&self, id: TransactionID) -> Result, Error> { match take_weak!(self.client).transaction(id) { - Some(t) => Ok(to_value(&Transaction::from(t))), - None => Ok(Value::Null) + Some(t) => Ok(Some(Transaction::from(t))), + None => Ok(None), } } - fn uncle(&self, id: UncleID) -> Result { + fn uncle(&self, id: UncleID) -> Result, Error> { let client = take_weak!(self.client); let uncle: BlockHeader = match client.uncle(id) { Some(rlp) => rlp::decode(&rlp), - None => { return Ok(Value::Null); } + None => { return Ok(None); } }; let parent_difficulty = match client.block_total_difficulty(BlockID::Hash(uncle.parent_hash().clone())) { Some(difficulty) => difficulty, - None => { return Ok(Value::Null); } + None => { return Ok(None); } }; let block = Block { @@ -177,7 +181,7 @@ impl EthClient where uncles: vec![], transactions: BlockTransactions::Hashes(vec![]), }; - Ok(to_value(&block)) + Ok(Some(block)) } fn sign_call(&self, request: CRequest) -> Result { @@ -240,20 +244,19 @@ impl Eth for EthClient where M: MinerService + 'static, EM: ExternalMinerService + 'static { - fn protocol_version(&self, params: Params) -> Result { + fn protocol_version(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())) + let version = take_weak!(self.sync).status().protocol_version.to_owned(); + Ok(format!("{}", version)) } - fn syncing(&self, params: Params) -> Result { + fn syncing(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let status = take_weak!(self.sync).status(); - let res = match status.state { - SyncState::Idle => SyncStatus::None, + match status.state { + SyncState::Idle => Ok(SyncStatus::None), SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead | SyncState::SnapshotManifest | SyncState::SnapshotData | SyncState::SnapshotWaiting => { let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number); @@ -265,271 +268,242 @@ impl Eth for EthClient where current_block: current_block.into(), highest_block: highest_block.into(), }; - SyncStatus::Info(info) + Ok(SyncStatus::Info(info)) } else { - SyncStatus::None + Ok(SyncStatus::None) } } - }; - Ok(to_value(&res)) + } } - fn author(&self, params: Params) -> Result { + fn author(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcH160::from(take_weak!(self.miner).author()))) + Ok(RpcH160::from(take_weak!(self.miner).author())) } - fn is_mining(&self, params: Params) -> Result { + fn is_mining(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&(take_weak!(self.miner).is_sealing()))) + Ok(take_weak!(self.miner).is_sealing()) } - fn hashrate(&self, params: Params) -> Result { + fn hashrate(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcU256::from(self.external_miner.hashrate()))) + Ok(RpcU256::from(self.external_miner.hashrate())) } - fn gas_price(&self, params: Params) -> Result { + fn gas_price(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let (client, miner) = (take_weak!(self.client), take_weak!(self.miner)); - Ok(to_value(&RpcU256::from(default_gas_price(&*client, &*miner)))) + Ok(RpcU256::from(default_gas_price(&*client, &*miner))) } - fn accounts(&self, params: Params) -> Result { + fn accounts(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); let store = take_weak!(self.accounts); let accounts = try!(store.accounts().map_err(|e| errors::internal("Could not fetch accounts.", e))); - Ok(to_value(&accounts.into_iter().map(Into::into).collect::>())) + Ok(accounts.into_iter().map(Into::into).collect()) } - fn block_number(&self, params: Params) -> Result { + fn block_number(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); - Ok(to_value(&RpcU256::from(take_weak!(self.client).chain_info().best_block_number))) + Ok(RpcU256::from(take_weak!(self.client).chain_info().best_block_number)) } - fn balance(&self, params: Params) -> Result { + fn balance(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).balance(&*take_weak!(self.client), &address)))), - id => match take_weak!(self.client).balance(&address, id.into()) { - Some(balance) => Ok(to_value(&RpcU256::from(balance))), - None => Err(errors::state_pruned()), - } - } - }) + + let address = address.into(); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).balance(&*take_weak!(self.client), &address).into()), + id => match take_weak!(self.client).balance(&address, id.into()) { + Some(balance) => Ok(balance.into()), + None => Err(errors::state_pruned()), + } + } } - fn storage_at(&self, params: Params) -> Result { + fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing) -> Result { try!(self.active()); - from_params_default_third::(params) - .and_then(|(address, position, block_number,)| { - let address: Address = RpcH160::into(address); - let position: U256 = RpcU256::into(position); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position))))), - id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) { - Some(s) => Ok(to_value(&RpcH256::from(s))), - None => Err(errors::state_pruned()), - } - } - }) - + let address: Address = RpcH160::into(address); + let position: U256 = RpcU256::into(pos); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)).into()), + id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) { + Some(s) => Ok(s.into()), + None => Err(errors::state_pruned()), + } + } } - fn transaction_count(&self, params: Params) -> Result { + fn transaction_count(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address)))), - id => match take_weak!(self.client).nonce(&address, id.into()) { - Some(nonce) => Ok(to_value(&RpcU256::from(nonce))), - None => Err(errors::state_pruned()), - } - } - }) + + let address: Address = RpcH160::into(address); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).nonce(&*take_weak!(self.client), &address).into()), + id => match take_weak!(self.client).nonce(&address, id.into()) { + Some(nonce) => Ok(nonce.into()), + None => Err(errors::state_pruned()), + } + } } - fn block_transaction_count_by_hash(&self, params: Params) -> Result { + fn block_transaction_count_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| // match - take_weak!(self.client).block(BlockID::Hash(hash.into())) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count()))))) + Ok( + take_weak!(self.client).block(BlockID::Hash(hash.into())) + .map(|bytes| BlockView::new(&bytes).transactions_count().into()) + ) } - fn block_transaction_count_by_number(&self, params: Params) -> Result { + fn block_transaction_count_by_number(&self, num: BlockNumber) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber,)>(params) - .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => Ok(to_value( - &RpcU256::from(take_weak!(self.miner).status().transactions_in_pending_block) - )), - _ => take_weak!(self.client).block(block_number.into()) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count())))) - }) + + match num { + BlockNumber::Pending => Ok(Some( + take_weak!(self.miner).status().transactions_in_pending_block.into() + )), + _ => Ok( + take_weak!(self.client).block(num.into()) + .map(|bytes| BlockView::new(&bytes).transactions_count().into()) + ) + } } - fn block_uncles_count_by_hash(&self, params: Params) -> Result { + fn block_uncles_count_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| - take_weak!(self.client).block(BlockID::Hash(hash.into())) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count()))))) + + Ok( + take_weak!(self.client).block(BlockID::Hash(hash.into())) + .map(|bytes| BlockView::new(&bytes).uncles_count().into()) + ) } - fn block_uncles_count_by_number(&self, params: Params) -> Result { + fn block_uncles_count_by_number(&self, num: BlockNumber) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber,)>(params) - .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => Ok(to_value(&RpcU256::from(0))), - _ => take_weak!(self.client).block(block_number.into()) - .map_or(Ok(Value::Null), |bytes| Ok(to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count())))) - }) + + match num { + BlockNumber::Pending => Ok(Some(0.into())), + _ => Ok( + take_weak!(self.client).block(num.into()) + .map(|bytes| BlockView::new(&bytes).uncles_count().into()) + ), + } } - fn code_at(&self, params: Params) -> Result { + fn code_at(&self, address: RpcH160, num: Trailing) -> Result { try!(self.active()); - from_params_default_second(params) - .and_then(|(address, block_number,)| { - let address: Address = RpcH160::into(address); - match block_number { - BlockNumber::Pending => Ok(to_value(&take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new))), - _ => match take_weak!(self.client).code(&address, block_number.into()) { - Some(code) => Ok(to_value(&code.map_or_else(Bytes::default, Bytes::new))), - None => Err(errors::state_pruned()), - }, - } - }) + + let address: Address = RpcH160::into(address); + match num.0 { + BlockNumber::Pending => Ok(take_weak!(self.miner).code(&*take_weak!(self.client), &address).map_or_else(Bytes::default, Bytes::new)), + _ => match take_weak!(self.client).code(&address, num.0.into()) { + Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)), + None => Err(errors::state_pruned()), + }, + } } - fn block_by_hash(&self, params: Params) -> Result { + fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, bool)>(params) - .and_then(|(hash, include_txs)| self.block(BlockID::Hash(hash.into()), include_txs)) + + self.block(BlockID::Hash(hash.into()), include_txs) } - fn block_by_number(&self, params: Params) -> Result { + fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, bool)>(params) - .and_then(|(number, include_txs)| self.block(number.into(), include_txs)) + + self.block(num.into(), include_txs) } - fn transaction_by_hash(&self, params: Params) -> Result { + fn transaction_by_hash(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| { - let miner = take_weak!(self.miner); - let hash: H256 = hash.into(); - match miner.transaction(&hash) { - Some(pending_tx) => Ok(to_value(&Transaction::from(pending_tx))), - None => self.transaction(TransactionID::Hash(hash)) - } - }) + + let miner = take_weak!(self.miner); + let hash: H256 = hash.into(); + match miner.transaction(&hash) { + Some(pending_tx) => Ok(Some(pending_tx.into())), + None => self.transaction(TransactionID::Hash(hash)) + } } - fn transaction_by_block_hash_and_index(&self, params: Params) -> Result { + fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, Index)>(params) - .and_then(|(hash, index)| self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value()))) + + self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value())) } - fn transaction_by_block_number_and_index(&self, params: Params) -> Result { + fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, Index)>(params) - .and_then(|(number, index)| self.transaction(TransactionID::Location(number.into(), index.value()))) + + self.transaction(TransactionID::Location(num.into(), index.value())) } - fn transaction_receipt(&self, params: Params) -> Result { + fn transaction_receipt(&self, hash: RpcH256) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256,)>(params) - .and_then(|(hash,)| { - let miner = take_weak!(self.miner); - let hash: H256 = hash.into(); - match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { - (Some(receipt), true) => Ok(to_value(&Receipt::from(receipt))), - _ => { - let client = take_weak!(self.client); - let receipt = client.transaction_receipt(TransactionID::Hash(hash)); - Ok(to_value(&receipt.map(Receipt::from))) - } - } - }) + + let miner = take_weak!(self.miner); + let hash: H256 = hash.into(); + match (miner.pending_receipt(&hash), self.options.allow_pending_receipt_query) { + (Some(receipt), true) => Ok(Some(receipt.into())), + _ => { + let client = take_weak!(self.client); + let receipt = client.transaction_receipt(TransactionID::Hash(hash)); + Ok(receipt.map(Into::into)) + } + } } - fn uncle_by_block_hash_and_index(&self, params: Params) -> Result { + fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(RpcH256, Index)>(params) - .and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() })) + + self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() }) } - fn uncle_by_block_number_and_index(&self, params: Params) -> Result { + fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber, Index)>(params) - .and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() })) + + self.uncle(UncleID { block: num.into(), position: index.value() }) } - fn compilers(&self, params: Params) -> Result { + fn compilers(&self) -> Result, Error> { try!(self.active()); - try!(expect_no_params(params)); let mut compilers = vec![]; if Command::new(SOLC).output().is_ok() { compilers.push("solidity".to_owned()) } - Ok(to_value(&compilers)) + + Ok(compilers) } - fn logs(&self, params: Params) -> Result { - try!(self.active()); - let params = match params_len(¶ms) { - 1 => from_params::<(Filter, )>(params).map(|(filter, )| (filter, None)), - _ => from_params::<(Filter, usize)>(params).map(|(filter, val)| (filter, Some(val))), - }; - params.and_then(|(filter, limit)| { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.into(); - let mut logs = take_weak!(self.client).logs(filter.clone(), limit) - .into_iter() - .map(From::from) - .collect::>(); + fn logs(&self, filter: Filter) -> Result, Error> { + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = filter.into(); + let mut logs = take_weak!(self.client).logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - if include_pending { - let pending = pending_logs(&*take_weak!(self.miner), &filter); - logs.extend(pending); - } + if include_pending { + let pending = pending_logs(&*take_weak!(self.miner), &filter); + logs.extend(pending); + } - let len = logs.len(); - match limit { - Some(limit) if len >= limit => { - logs = logs.split_off(len - limit); - }, - _ => {}, - } + let logs = limit_logs(logs, filter.limit); - Ok(to_value(&logs)) - }) + Ok(logs) } - fn work(&self, params: Params) -> Result { + fn work(&self, no_new_work_timeout: Trailing) -> Result { try!(self.active()); - let (no_new_work_timeout,) = from_params::<(u64,)>(params).unwrap_or((0,)); + let no_new_work_timeout = no_new_work_timeout.0; let client = take_weak!(self.client); // check if we're still syncing and return empty strings in that case @@ -561,115 +535,118 @@ impl Eth for EthClient where if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 { Err(errors::no_new_work()) } else if self.options.send_block_number_in_get_work { - let block_number = RpcU256::from(b.block().header().number()); - Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number))) + let block_number = b.block().header().number(); + Ok(Work { + pow_hash: pow_hash.into(), + seed_hash: seed_hash.into(), + target: target.into(), + number: Some(block_number), + }) } else { - Ok(to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target)))) + Ok(Work { + pow_hash: pow_hash.into(), + seed_hash: seed_hash.into(), + target: target.into(), + number: None + }) } }).unwrap_or(Err(Error::internal_error())) // no work found. } - fn submit_work(&self, params: Params) -> Result { + fn submit_work(&self, nonce: RpcH64, pow_hash: RpcH256, mix_hash: RpcH256) -> Result { try!(self.active()); - from_params::<(RpcH64, RpcH256, RpcH256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { - let nonce: H64 = nonce.into(); - let pow_hash: H256 = pow_hash.into(); - let mix_hash: H256 = mix_hash.into(); - trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let miner = take_weak!(self.miner); - let client = take_weak!(self.client); - let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()]; - let r = miner.submit_seal(&*client, pow_hash, seal); - Ok(to_value(&r.is_ok())) - }) + + let nonce: H64 = nonce.into(); + let pow_hash: H256 = pow_hash.into(); + let mix_hash: H256 = mix_hash.into(); + trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); + + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); + let seal = vec![rlp::encode(&mix_hash).to_vec(), rlp::encode(&nonce).to_vec()]; + Ok(miner.submit_seal(&*client, pow_hash, seal).is_ok()) } - fn submit_hashrate(&self, params: Params) -> Result { + fn submit_hashrate(&self, rate: RpcU256, id: RpcH256) -> Result { try!(self.active()); - from_params::<(RpcU256, RpcH256)>(params).and_then(|(rate, id)| { - self.external_miner.submit_hashrate(rate.into(), id.into()); - Ok(to_value(&true)) - }) + self.external_miner.submit_hashrate(rate.into(), id.into()); + Ok(true) } - fn send_raw_transaction(&self, params: Params) -> Result { + fn send_raw_transaction(&self, raw: Bytes) -> Result { try!(self.active()); - from_params::<(Bytes, )>(params) - .and_then(|(raw_transaction, )| { - let raw_transaction = raw_transaction.to_vec(); - match UntrustedRlp::new(&raw_transaction).as_val() { - Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction), - Err(_) => Ok(to_value(&RpcH256::from(H256::from(0)))), + + let raw_transaction = raw.to_vec(); + match UntrustedRlp::new(&raw_transaction).as_val() { + Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction), + Err(_) => Ok(RpcH256::from(H256::from(0))), + } + } + + fn call(&self, request: CallRequest, num: Trailing) -> Result { + try!(self.active()); + + let request = CallRequest::into(request); + let signed = try!(self.sign_call(request)); + + let r = match num.0 { + BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + }; + + Ok(r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![]))) + } + + fn estimate_gas(&self, request: CallRequest, num: Trailing) -> Result { + try!(self.active()); + + let request = CallRequest::into(request); + let signed = try!(self.sign_call(request)); + let r = match num.0 { + BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), + num => take_weak!(self.client).call(&signed, num.into(), Default::default()), + }; + + Ok(RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0)))) + } + + fn compile_lll(&self, _: String) -> Result { + try!(self.active()); + + rpc_unimplemented!() + } + + fn compile_serpent(&self, _: String) -> Result { + try!(self.active()); + + rpc_unimplemented!() + } + + fn compile_solidity(&self, code: String) -> Result { + try!(self.active()); + let maybe_child = Command::new(SOLC) + .arg("--bin") + .arg("--optimize") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn(); + + maybe_child + .map_err(errors::compilation) + .and_then(|mut child| { + try!(child.stdin.as_mut() + .expect("we called child.stdin(Stdio::piped()) before spawn; qed") + .write_all(code.as_bytes()) + .map_err(errors::compilation)); + let output = try!(child.wait_with_output().map_err(errors::compilation)); + + let s = String::from_utf8_lossy(&output.stdout); + if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() { + Ok(Bytes::new(hex.from_hex().unwrap_or(vec![]))) + } else { + Err(errors::compilation("Unexpected output.")) } - }) - } - - fn call(&self, params: Params) -> Result { - try!(self.active()); - from_params_default_second(params) - .and_then(|(request, block_number,)| { - let request = CallRequest::into(request); - let signed = try!(self.sign_call(request)); - let r = match block_number { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - block_number => take_weak!(self.client).call(&signed, block_number.into(), Default::default()), - }; - Ok(to_value(&r.map(|e| Bytes(e.output)).unwrap_or(Bytes::new(vec![])))) - }) - } - - fn estimate_gas(&self, params: Params) -> Result { - try!(self.active()); - from_params_default_second(params) - .and_then(|(request, block_number,)| { - let request = CallRequest::into(request); - let signed = try!(self.sign_call(request)); - let r = match block_number { - BlockNumber::Pending => take_weak!(self.miner).call(&*take_weak!(self.client), &signed, Default::default()), - block => take_weak!(self.client).call(&signed, block.into(), Default::default()), - }; - Ok(to_value(&RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0))))) - }) - } - - fn compile_lll(&self, _: Params) -> Result { - try!(self.active()); - rpc_unimplemented!() - } - - fn compile_serpent(&self, _: Params) -> Result { - try!(self.active()); - rpc_unimplemented!() - } - - fn compile_solidity(&self, params: Params) -> Result { - try!(self.active()); - from_params::<(String, )>(params) - .and_then(|(code, )| { - let maybe_child = Command::new(SOLC) - .arg("--bin") - .arg("--optimize") - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn(); - - maybe_child - .map_err(errors::compilation) - .and_then(|mut child| { - try!(child.stdin.as_mut() - .expect("we called child.stdin(Stdio::piped()) before spawn; qed") - .write_all(code.as_bytes()) - .map_err(errors::compilation)); - let output = try!(child.wait_with_output().map_err(errors::compilation)); - - let s = String::from_utf8_lossy(&output.stdout); - if let Some(hex) = s.lines().skip_while(|ref l| !l.contains("Binary")).skip(1).next() { - Ok(to_value(&Bytes::new(hex.from_hex().unwrap_or(vec![])))) - } else { - Err(errors::compilation("Unexpected output.")) - } - }) }) } } diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 38d6822d2..03d9d7215 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -24,9 +24,8 @@ use ethcore::filter::Filter as EthcoreFilter; use ethcore::client::{BlockChainClient, BlockID}; use util::Mutex; use v1::traits::EthFilter; -use v1::types::{BlockNumber, Index, Filter, Log, H256 as RpcH256, U256 as RpcU256}; -use v1::helpers::{PollFilter, PollManager}; -use v1::helpers::params::expect_no_params; +use v1::types::{BlockNumber, Index, Filter, FilterChanges, Log, H256 as RpcH256, U256 as RpcU256}; +use v1::helpers::{PollFilter, PollManager, limit_logs}; use v1::impls::eth::pending_logs; /// Eth filter rpc implementation. @@ -59,164 +58,154 @@ impl EthFilterClient where } } -impl EthFilter for EthFilterClient where - C: BlockChainClient + 'static, - M: MinerService + 'static { - - fn new_filter(&self, params: Params) -> Result { +impl EthFilter for EthFilterClient + where C: BlockChainClient + 'static, M: MinerService + 'static +{ + fn new_filter(&self, filter: Filter) -> Result { try!(self.active()); - from_params::<(Filter,)>(params) - .and_then(|(filter,)| { - let mut polls = self.polls.lock(); - let block_number = take_weak!(self.client).chain_info().best_block_number; - let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); - Ok(to_value(&RpcU256::from(id))) - }) + let mut polls = self.polls.lock(); + let block_number = take_weak!(self.client).chain_info().best_block_number; + let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter)); + Ok(id.into()) } - fn new_block_filter(&self, params: Params) -> Result { + fn new_block_filter(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let mut polls = self.polls.lock(); let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); - Ok(to_value(&RpcU256::from(id))) + Ok(id.into()) } - fn new_pending_transaction_filter(&self, params: Params) -> Result { + fn new_pending_transaction_filter(&self) -> Result { try!(self.active()); - try!(expect_no_params(params)); let mut polls = self.polls.lock(); let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); - - Ok(to_value(&RpcU256::from(id))) + Ok(id.into()) } - fn filter_changes(&self, params: Params) -> Result { + fn filter_changes(&self, index: Index) -> Result { try!(self.active()); let client = take_weak!(self.client); - from_params::<(Index,)>(params) - .and_then(|(index,)| { - let mut polls = self.polls.lock(); - match polls.poll_mut(&index.value()) { - None => Ok(Value::Array(vec![] as Vec)), - Some(filter) => match *filter { - PollFilter::Block(ref mut block_number) => { - // + 1, cause we want to return hashes including current block hash. - let current_number = client.chain_info().best_block_number + 1; - let hashes = (*block_number..current_number).into_iter() - .map(BlockID::Number) - .filter_map(|id| client.block_hash(id)) - .map(Into::into) - .collect::>(); + let mut polls = self.polls.lock(); + match polls.poll_mut(&index.value()) { + None => Ok(FilterChanges::Empty), + Some(filter) => match *filter { + PollFilter::Block(ref mut block_number) => { + // + 1, cause we want to return hashes including current block hash. + let current_number = client.chain_info().best_block_number + 1; + let hashes = (*block_number..current_number).into_iter() + .map(BlockID::Number) + .filter_map(|id| client.block_hash(id)) + .map(Into::into) + .collect::>(); - *block_number = current_number; + *block_number = current_number; - Ok(to_value(&hashes)) - }, - PollFilter::PendingTransaction(ref mut previous_hashes) => { - // get hashes of pending transactions - let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + Ok(FilterChanges::Hashes(hashes)) + }, + PollFilter::PendingTransaction(ref mut previous_hashes) => { + // get hashes of pending transactions + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); - let new_hashes = - { - let previous_hashes_set = previous_hashes.iter().collect::>(); + let new_hashes = + { + let previous_hashes_set = previous_hashes.iter().collect::>(); - // find all new hashes - current_hashes - .iter() - .filter(|hash| !previous_hashes_set.contains(hash)) - .cloned() - .map(Into::into) - .collect::>() - }; + // find all new hashes + current_hashes + .iter() + .filter(|hash| !previous_hashes_set.contains(hash)) + .cloned() + .map(Into::into) + .collect::>() + }; - // save all hashes of pending transactions - *previous_hashes = current_hashes; + // save all hashes of pending transactions + *previous_hashes = current_hashes; - // return new hashes - Ok(to_value(&new_hashes)) - }, - PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { - // retrive the current block number - let current_number = client.chain_info().best_block_number; + // return new hashes + Ok(FilterChanges::Hashes(new_hashes)) + }, + PollFilter::Logs(ref mut block_number, ref mut previous_logs, ref filter) => { + // retrive the current block number + let current_number = client.chain_info().best_block_number; - // check if we need to check pending hashes - let include_pending = filter.to_block == Some(BlockNumber::Pending); + // check if we need to check pending hashes + let include_pending = filter.to_block == Some(BlockNumber::Pending); - // build appropriate filter - let mut filter: EthcoreFilter = filter.clone().into(); - filter.from_block = BlockID::Number(*block_number); - filter.to_block = BlockID::Latest; + // build appropriate filter + let mut filter: EthcoreFilter = filter.clone().into(); + filter.from_block = BlockID::Number(*block_number); + filter.to_block = BlockID::Latest; - // retrieve logs in range from_block..min(BlockID::Latest..to_block) - let mut logs = client.logs(filter.clone(), None) - .into_iter() - .map(From::from) - .collect::>(); + // retrieve logs in range from_block..min(BlockID::Latest..to_block) + let mut logs = client.logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - // additionally retrieve pending logs - if include_pending { - let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); + // additionally retrieve pending logs + if include_pending { + let pending_logs = pending_logs(&*take_weak!(self.miner), &filter); - // remove logs about which client was already notified about - let new_pending_logs: Vec<_> = pending_logs.iter() - .filter(|p| !previous_logs.contains(p)) - .cloned() - .collect(); + // remove logs about which client was already notified about + let new_pending_logs: Vec<_> = pending_logs.iter() + .filter(|p| !previous_logs.contains(p)) + .cloned() + .collect(); - // save all logs retrieved by client - *previous_logs = pending_logs.into_iter().collect(); + // save all logs retrieved by client + *previous_logs = pending_logs.into_iter().collect(); - // append logs array with new pending logs - logs.extend(new_pending_logs); - } - - // save the number of the next block as a first block from which - // we want to get logs - *block_number = current_number + 1; - - Ok(to_value(&logs)) - } + // append logs array with new pending logs + logs.extend(new_pending_logs); } + + let logs = limit_logs(logs, filter.limit); + + // save the number of the next block as a first block from which + // we want to get logs + *block_number = current_number + 1; + + Ok(FilterChanges::Logs(logs)) } - }) + } + } } - fn filter_logs(&self, params: Params) -> Result { + fn filter_logs(&self, index: Index) -> Result, Error> { try!(self.active()); - from_params::<(Index,)>(params) - .and_then(|(index,)| { - let mut polls = self.polls.lock(); - match polls.poll(&index.value()) { - Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.clone().into(); - let mut logs = take_weak!(self.client).logs(filter.clone(), None) - .into_iter() - .map(From::from) - .collect::>(); - if include_pending { - logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); - } + let mut polls = self.polls.lock(); + match polls.poll(&index.value()) { + Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => { + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = filter.clone().into(); + let mut logs = take_weak!(self.client).logs(filter.clone()) + .into_iter() + .map(From::from) + .collect::>(); - Ok(to_value(&logs)) - }, - // just empty array - _ => Ok(Value::Array(vec![] as Vec)), + if include_pending { + logs.extend(pending_logs(&*take_weak!(self.miner), &filter)); } - }) + + let logs = limit_logs(logs, filter.limit); + + Ok(logs) + }, + // just empty array + _ => Ok(Vec::new()), + } } - fn uninstall_filter(&self, params: Params) -> Result { + fn uninstall_filter(&self, index: Index) -> Result { try!(self.active()); - from_params::<(Index,)>(params) - .map(|(index,)| { - self.polls.lock().remove_poll(&index.value()); - to_value(&true) - }) + + self.polls.lock().remove_poll(&index.value()); + Ok(true) } } diff --git a/rpc/src/v1/impls/eth_signing.rs b/rpc/src/v1/impls/eth_signing.rs index c19b5819d..9290a9425 100644 --- a/rpc/src/v1/impls/eth_signing.rs +++ b/rpc/src/v1/impls/eth_signing.rs @@ -23,10 +23,10 @@ use ethcore::client::MiningBlockChainClient; use util::{U256, Address, H256, Mutex}; use transient_hashmap::TransientHashMap; use ethcore::account_provider::AccountProvider; -use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationsQueue, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest}; +use v1::helpers::{errors, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationPayload, TransactionRequest as TRequest, FilledTransactionRequest as FilledRequest, SignerService}; use v1::helpers::dispatch::{default_gas_price, sign_and_dispatch}; use v1::traits::EthSigning; -use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256}; +use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, U256 as RpcU256, Bytes as RpcBytes}; fn fill_optional_fields(request: TRequest, client: &C, miner: &M) -> FilledRequest where C: MiningBlockChainClient, M: MinerService { @@ -43,7 +43,7 @@ fn fill_optional_fields(request: TRequest, client: &C, miner: &M) -> Fille /// Implementation of functions that require signing when no trusted signer is used. pub struct EthSigningQueueClient where C: MiningBlockChainClient, M: MinerService { - queue: Weak, + signer: Weak, accounts: Weak, client: Weak, miner: Weak, @@ -60,9 +60,9 @@ pub enum DispatchResult { impl EthSigningQueueClient where C: MiningBlockChainClient, M: MinerService { /// Creates a new signing queue client given shared signing queue. - pub fn new(queue: &Arc, client: &Arc, miner: &Arc, accounts: &Arc) -> Self { + pub fn new(signer: &Arc, client: &Arc, miner: &Arc, accounts: &Arc) -> Self { EthSigningQueueClient { - queue: Arc::downgrade(queue), + signer: Arc::downgrade(signer), accounts: Arc::downgrade(accounts), client: Arc::downgrade(client), miner: Arc::downgrade(miner), @@ -86,8 +86,8 @@ impl EthSigningQueueClient where C: MiningBlockChainClient, M: Miner return Ok(DispatchResult::Value(to_value(&accounts.sign(address, msg).ok().map_or_else(RpcH520::default, Into::into)))) } - let queue = take_weak!(self.queue); - queue.add_request(ConfirmationPayload::Sign(address, msg)) + let signer = take_weak!(self.signer); + signer.add_request(ConfirmationPayload::Sign(address, msg)) .map(DispatchResult::Promise) .map_err(|_| errors::request_rejected_limit()) }) @@ -105,9 +105,9 @@ impl EthSigningQueueClient where C: MiningBlockChainClient, M: Miner return sign_and_dispatch(&*client, &*miner, request, &*accounts, sender).map(DispatchResult::Value); } - let queue = take_weak!(self.queue); + let signer = take_weak!(self.signer); let request = fill_optional_fields(request, &*client, &*miner); - queue.add_request(ConfirmationPayload::Transaction(request)) + signer.add_request(ConfirmationPayload::Transaction(request)) .map(DispatchResult::Promise) .map_err(|_| errors::request_rejected_limit()) }) @@ -168,6 +168,13 @@ impl EthSigning for EthSigningQueueClient }) } + fn decrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(RpcH160, RpcBytes)>(params).and_then(|(_account, _ciphertext)| { + Err(errors::unimplemented()) + }) + } + fn check_request(&self, params: Params) -> Result { try!(self.active()); let mut pending = self.pending.lock(); @@ -241,6 +248,14 @@ impl EthSigning for EthSigningUnsafeClient where })) } + fn decrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(RpcH160, RpcBytes)>(params).and_then(|(address, ciphertext)| { + let s = try!(take_weak!(self.accounts).decrypt(address.into(), &[0; 0], &ciphertext.0).map_err(|_| Error::internal_error())); + Ok(to_value(RpcBytes::from(s))) + }) + } + fn post_sign(&self, _: Params) -> Result { // We don't support this in non-signer mode. Err(errors::signer_disabled()) diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index ee352e65a..220ead3dd 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -21,6 +21,7 @@ use std::collections::{BTreeMap}; use util::{RotatingLogger, Address}; use util::misc::version_data; +use crypto::ecies; use ethkey::{Brain, Generator}; use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; @@ -29,8 +30,8 @@ use ethcore::client::{MiningBlockChainClient}; use jsonrpc_core::*; use v1::traits::Ethcore; -use v1::types::{Bytes, U256, H160, Peers}; -use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, NetworkSettings}; +use v1::types::{Bytes, U256, H160, H512, Peers, Transaction}; +use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::params::expect_no_params; /// Ethcore implementation. @@ -45,7 +46,7 @@ pub struct EthcoreClient where net: Weak, logger: Arc, settings: Arc, - confirmations_queue: Option>, + signer: Option>, } impl EthcoreClient where C: MiningBlockChainClient, M: MinerService, S: SyncProvider { @@ -57,7 +58,7 @@ impl EthcoreClient where C: MiningBlockChainClient, M: net: &Arc, logger: Arc, settings: Arc, - queue: Option> + signer: Option> ) -> Self { EthcoreClient { client: Arc::downgrade(client), @@ -66,7 +67,7 @@ impl EthcoreClient where C: MiningBlockChainClient, M: net: Arc::downgrade(net), logger: logger, settings: settings, - confirmations_queue: queue, + signer: signer, } } @@ -198,9 +199,9 @@ impl Ethcore for EthcoreClient where M: MinerService + try!(self.active()); try!(expect_no_params(params)); - match self.confirmations_queue { + match self.signer { None => Err(errors::signer_disabled()), - Some(ref queue) => Ok(to_value(&queue.len())), + Some(ref signer) => Ok(to_value(&signer.len())), } } @@ -217,4 +218,19 @@ impl Ethcore for EthcoreClient where M: MinerService + to_value(&H160::from(Brain::new(phrase).generate().unwrap().address())) ) } + + fn encrypt_message(&self, params: Params) -> Result { + try!(self.active()); + from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { + let s = try!(ecies::encrypt(&key.into(), &[0; 0], &phrase.0).map_err(|_| Error::internal_error())); + Ok(to_value(&Bytes::from(s))) + }) + } + + fn pending_transactions(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + + Ok(to_value(&take_weak!(self.miner).all_transactions().into_iter().map(Into::into).collect::>())) + } } diff --git a/rpc/src/v1/impls/personal_signer.rs b/rpc/src/v1/impls/personal_signer.rs index 5cfda9a65..441ed679b 100644 --- a/rpc/src/v1/impls/personal_signer.rs +++ b/rpc/src/v1/impls/personal_signer.rs @@ -23,13 +23,13 @@ use ethcore::client::MiningBlockChainClient; use ethcore::miner::MinerService; use v1::traits::PersonalSigner; use v1::types::{TransactionModification, ConfirmationRequest, U256}; -use v1::helpers::{errors, SigningQueue, ConfirmationsQueue, ConfirmationPayload}; +use v1::helpers::{errors, SignerService, SigningQueue, ConfirmationPayload}; use v1::helpers::params::expect_no_params; use v1::helpers::dispatch::{unlock_sign_and_dispatch, signature_with_password}; /// Transactions confirmation (personal) rpc implementation. pub struct SignerClient where C: MiningBlockChainClient, M: MinerService { - queue: Weak, + signer: Weak, accounts: Weak, client: Weak, miner: Weak, @@ -38,9 +38,14 @@ pub struct SignerClient where C: MiningBlockChainClient, M: MinerService { impl SignerClient where C: MiningBlockChainClient, M: MinerService { /// Create new instance of signer client. - pub fn new(store: &Arc, client: &Arc, miner: &Arc, queue: &Arc) -> Self { + pub fn new( + store: &Arc, + client: &Arc, + miner: &Arc, + signer: &Arc, + ) -> Self { SignerClient { - queue: Arc::downgrade(queue), + signer: Arc::downgrade(signer), accounts: Arc::downgrade(store), client: Arc::downgrade(client), miner: Arc::downgrade(miner), @@ -59,8 +64,8 @@ impl PersonalSigner for SignerClient where C: Mini fn requests_to_confirm(&self, params: Params) -> Result { try!(self.active()); try!(expect_no_params(params)); - let queue = take_weak!(self.queue); - Ok(to_value(&queue.requests().into_iter().map(From::from).collect::>())) + let signer = take_weak!(self.signer); + Ok(to_value(&signer.requests().into_iter().map(From::from).collect::>())) } fn confirm_request(&self, params: Params) -> Result { @@ -71,11 +76,11 @@ impl PersonalSigner for SignerClient where C: Mini |(id, modification, pass)| { let id = id.into(); let accounts = take_weak!(self.accounts); - let queue = take_weak!(self.queue); + let signer = take_weak!(self.signer); let client = take_weak!(self.client); let miner = take_weak!(self.miner); - queue.peek(&id).map(|confirmation| { + signer.peek(&id).map(|confirmation| { let result = match confirmation.payload { ConfirmationPayload::Transaction(mut request) => { // apply modification @@ -90,7 +95,7 @@ impl PersonalSigner for SignerClient where C: Mini } }; if let Ok(ref response) = result { - queue.request_confirmed(id, Ok(response.clone())); + signer.request_confirmed(id, Ok(response.clone())); } result }).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id))) @@ -102,11 +107,20 @@ impl PersonalSigner for SignerClient where C: Mini try!(self.active()); from_params::<(U256, )>(params).and_then( |(id, )| { - let queue = take_weak!(self.queue); - let res = queue.request_rejected(id.into()); + let signer = take_weak!(self.signer); + let res = signer.request_rejected(id.into()); Ok(to_value(&res.is_some())) } ) } + + fn generate_token(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + let signer = take_weak!(self.signer); + signer.generate_token() + .map(|token| to_value(&token)) + .map_err(|e| errors::token(e)) + } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 897fcf623..889b7840b 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -28,4 +28,4 @@ pub mod types; pub use self::traits::{Web3, Eth, EthFilter, EthSigning, Personal, PersonalSigner, Net, Ethcore, EthcoreSet, Traces, Rpc}; pub use self::impls::*; -pub use self::helpers::{SigningQueue, ConfirmationsQueue, NetworkSettings}; +pub use self::helpers::{SigningQueue, SignerService, ConfirmationsQueue, NetworkSettings}; diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 0f1693963..eb3fbaf6e 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -27,7 +27,7 @@ use ethcore::receipt::LocalizedReceipt; use ethcore::transaction::{Transaction, Action}; use ethcore::miner::{ExternalMiner, MinerService}; use ethsync::SyncState; -use v1::{Eth, EthClient, EthClientOptions, EthSigning, EthSigningUnsafeClient}; +use v1::{Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, EthSigning, EthSigningUnsafeClient}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use rustc_serialize::hex::ToHex; use time::get_time; @@ -76,10 +76,12 @@ impl EthTester { let hashrates = Arc::new(Mutex::new(HashMap::new())); let external_miner = Arc::new(ExternalMiner::new(hashrates.clone())); let eth = EthClient::new(&client, &sync, &ap, &miner, &external_miner, options).to_delegate(); + let filter = EthFilterClient::new(&client, &miner).to_delegate(); let sign = EthSigningUnsafeClient::new(&client, &ap, &miner).to_delegate(); let io = IoHandler::new(); io.add_delegate(eth); io.add_delegate(sign); + io.add_delegate(filter); EthTester { client: client, @@ -152,23 +154,88 @@ fn rpc_eth_hashrate() { #[test] fn rpc_eth_logs() { let tester = EthTester::default(); + tester.client.set_logs(vec![LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }]); - let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}], "id": 1}"#; + let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1}], "id": 1}"#; + let request3 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":0}], "id": 1}"#; + + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response3 = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request1), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request2), Some(response2.to_owned())); + assert_eq!(tester.io.handle_request_sync(request3), Some(response3.to_owned())); } #[test] -fn rpc_eth_logs_with_limit() { +fn rpc_logs_filter() { let tester = EthTester::default(); + // Set some logs + tester.client.set_logs(vec![LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }, LocalizedLogEntry { + block_number: 1, + block_hash: H256::default(), + entry: LogEntry { + address: Address::default(), + topics: vec![], + data: vec![1,2,3], + }, + transaction_index: 0, + transaction_hash: H256::default(), + log_index: 0, + }]); - let request1 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 1], "id": 1}"#; - let request2 = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{}, 0], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + // Register filters first + let request_default = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{}], "id": 1}"#; + let request_limit = r#"{"jsonrpc": "2.0", "method": "eth_newFilter", "params": [{"limit":1}], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":"0x0","id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":"0x1","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request1), Some(response.to_owned())); - assert_eq!(tester.io.handle_request_sync(request2), Some(response.to_owned())); + assert_eq!(tester.io.handle_request_sync(request_default), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request_limit), Some(response2.to_owned())); + + let request_changes1 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x0"], "id": 1}"#; + let request_changes2 = r#"{"jsonrpc": "2.0", "method": "eth_getFilterChanges", "params": ["0x1"], "id": 1}"#; + let response1 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"},{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + let response2 = r#"{"jsonrpc":"2.0","result":[{"address":"0x0000000000000000000000000000000000000000","blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x1","data":"0x010203","logIndex":"0x0","topics":[],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000000","transactionIndex":"0x0","type":"mined"}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request_changes1), Some(response1.to_owned())); + assert_eq!(tester.io.handle_request_sync(request_changes2), Some(response2.to_owned())); } #[test] @@ -390,7 +457,7 @@ fn rpc_eth_pending_transaction_by_hash() { tester.miner.pending_transactions.lock().insert(H256::zero(), tx); } - let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"blockHash":null,"blockNumber":null,"creates":null,"from":"0x0f65fe9276bc9a24ae7083ae28e2660ef72df99e","gas":"0x5208","gasPrice":"0x1","hash":"0x41df922fd0d4766fcc02e161f8295ec28522f329ae487f14d811e4b64c8d6e31","input":"0x","nonce":"0x0","publicKey":"0x7ae46da747962c2ee46825839c1ef9298e3bd2e70ca2938495c3693a485ec3eaa8f196327881090ff64cf4fbb0a48485d4f83098e189ed3b7a87d5941b59f789","raw":"0xf85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804","to":"0x095e7baea6a6c7c4c2dfeb977efac326af552d87","transactionIndex":null,"value":"0xa"},"id":1}"#; let request = r#"{ "jsonrpc": "2.0", "method": "eth_getTransactionByHash", diff --git a/rpc/src/v1/tests/mocked/eth_signing.rs b/rpc/src/v1/tests/mocked/eth_signing.rs index f06d4027a..1bf901e5f 100644 --- a/rpc/src/v1/tests/mocked/eth_signing.rs +++ b/rpc/src/v1/tests/mocked/eth_signing.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use jsonrpc_core::{IoHandler, to_value}; use v1::impls::EthSigningQueueClient; use v1::traits::EthSigning; -use v1::helpers::{ConfirmationsQueue, SigningQueue}; +use v1::helpers::{SignerService, SigningQueue}; use v1::types::{H256 as RpcH256, H520 as RpcH520}; use v1::tests::helpers::TestMinerService; use util::{Address, FixedHash, Uint, U256, H256, H520}; @@ -28,7 +28,7 @@ use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Transaction, Action}; struct EthSigningTester { - pub queue: Arc, + pub signer: Arc, pub client: Arc, pub miner: Arc, pub accounts: Arc, @@ -37,15 +37,15 @@ struct EthSigningTester { impl Default for EthSigningTester { fn default() -> Self { - let queue = Arc::new(ConfirmationsQueue::default()); + let signer = Arc::new(SignerService::new_test()); let client = Arc::new(TestBlockChainClient::default()); let miner = Arc::new(TestMinerService::default()); let accounts = Arc::new(AccountProvider::transient_provider()); let io = IoHandler::new(); - io.add_delegate(EthSigningQueueClient::new(&queue, &client, &miner, &accounts).to_delegate()); + io.add_delegate(EthSigningQueueClient::new(&signer, &client, &miner, &accounts).to_delegate()); EthSigningTester { - queue: queue, + signer: signer, client: client, miner: miner, accounts: accounts, @@ -63,7 +63,7 @@ fn should_add_sign_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -79,9 +79,9 @@ fn should_add_sign_to_queue() { // then let async_result = tester.io.handle_request(&request).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // respond - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default())))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH520::from(H520::default())))); assert!(async_result.on_result(move |res| { assert_eq!(res, response.to_owned()); })); @@ -92,7 +92,7 @@ fn should_post_sign_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -108,7 +108,7 @@ fn should_post_sign_to_queue() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] @@ -155,7 +155,7 @@ fn should_check_status_of_request_when_its_resolved() { "id": 1 }"#; tester.io.handle_request_sync(&request).expect("Sent"); - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!"))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&"Hello World!"))); // when let request = r#"{ @@ -192,7 +192,7 @@ fn should_sign_if_account_is_unlocked() { }"#; let response = r#"{"jsonrpc":"2.0","result":""#.to_owned() + format!("0x{}", signature).as_ref() + r#"","id":1}"#; assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); } #[test] @@ -200,7 +200,7 @@ fn should_add_transaction_to_queue() { // given let tester = eth_signing(); let address = Address::random(); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); // when let request = r#"{ @@ -219,9 +219,9 @@ fn should_add_transaction_to_queue() { // then let async_result = tester.io.handle_request(&request).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // respond - tester.queue.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default())))); + tester.signer.request_confirmed(U256::from(1), Ok(to_value(&RpcH256::from(H256::default())))); assert!(async_result.on_result(move |res| { assert_eq!(res, response.to_owned()); })); diff --git a/rpc/src/v1/tests/mocked/ethcore.rs b/rpc/src/v1/tests/mocked/ethcore.rs index d8121b6d6..811ccced4 100644 --- a/rpc/src/v1/tests/mocked/ethcore.rs +++ b/rpc/src/v1/tests/mocked/ethcore.rs @@ -22,7 +22,7 @@ use ethcore::client::{TestBlockChainClient}; use jsonrpc_core::IoHandler; use v1::{Ethcore, EthcoreClient}; -use v1::helpers::{ConfirmationsQueue, NetworkSettings}; +use v1::helpers::{SignerService, NetworkSettings}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use super::manage_network::TestManageNetwork; @@ -262,8 +262,8 @@ fn rpc_ethcore_unsigned_transactions_count() { let sync = sync_provider(); let net = network_service(); let io = IoHandler::new(); - let queue = Arc::new(ConfirmationsQueue::default()); - let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(queue)).to_delegate(); + let signer = Arc::new(SignerService::new_test()); + let ethcore = EthcoreClient::new(&client, &miner, &sync, &net, logger(), settings(), Some(signer)).to_delegate(); io.add_delegate(ethcore); let request = r#"{"jsonrpc": "2.0", "method": "ethcore_unsignedTransactionsCount", "params":[], "id": 1}"#; @@ -286,3 +286,18 @@ fn rpc_ethcore_unsigned_transactions_count_when_signer_disabled() { assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } + +#[test] +fn rpc_ethcore_pending_transactions() { + let miner = miner_service(); + let client = client_service(); + let sync = sync_provider(); + let net = network_service(); + let io = IoHandler::new(); + io.add_delegate(ethcore_client(&client, &miner, &sync, &net).to_delegate()); + + let request = r#"{"jsonrpc": "2.0", "method": "ethcore_pendingTransactions", "params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/mocked/personal_signer.rs b/rpc/src/v1/tests/mocked/personal_signer.rs index 976b232cc..04ae829ee 100644 --- a/rpc/src/v1/tests/mocked/personal_signer.rs +++ b/rpc/src/v1/tests/mocked/personal_signer.rs @@ -23,10 +23,10 @@ use ethcore::client::TestBlockChainClient; use ethcore::transaction::{Transaction, Action}; use v1::{SignerClient, PersonalSigner}; use v1::tests::helpers::TestMinerService; -use v1::helpers::{SigningQueue, ConfirmationsQueue, FilledTransactionRequest, ConfirmationPayload}; +use v1::helpers::{SigningQueue, SignerService, FilledTransactionRequest, ConfirmationPayload}; struct PersonalSignerTester { - queue: Arc, + signer: Arc, accounts: Arc, io: IoHandler, miner: Arc, @@ -49,16 +49,16 @@ fn miner_service() -> Arc { } fn signer_tester() -> PersonalSignerTester { - let queue = Arc::new(ConfirmationsQueue::default()); + let signer = Arc::new(SignerService::new_test()); let accounts = accounts_provider(); let client = blockchain_client(); let miner = miner_service(); let io = IoHandler::new(); - io.add_delegate(SignerClient::new(&accounts, &client, &miner, &queue).to_delegate()); + io.add_delegate(SignerClient::new(&accounts, &client, &miner, &signer).to_delegate()); PersonalSignerTester { - queue: queue, + signer: signer, accounts: accounts, io: io, miner: miner, @@ -71,7 +71,7 @@ fn signer_tester() -> PersonalSignerTester { fn should_return_list_of_items_to_confirm() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -80,7 +80,7 @@ fn should_return_list_of_items_to_confirm() { data: vec![], nonce: None, })).unwrap(); - tester.queue.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap(); + tester.signer.add_request(ConfirmationPayload::Sign(1.into(), 5.into())).unwrap(); // when let request = r#"{"jsonrpc":"2.0","method":"personal_requestsToConfirm","params":[],"id":1}"#; @@ -100,7 +100,7 @@ fn should_return_list_of_items_to_confirm() { fn should_reject_transaction_from_queue_without_dispatching() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -109,7 +109,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { data: vec![], nonce: None, })).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_rejectRequest","params":["0x1"],"id":1}"#; @@ -117,7 +117,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); assert_eq!(tester.miner.imported_transactions.lock().len(), 0); } @@ -125,7 +125,7 @@ fn should_reject_transaction_from_queue_without_dispatching() { fn should_not_remove_transaction_if_password_is_invalid() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: Address::from(1), to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()), gas_price: U256::from(10_000), @@ -134,7 +134,7 @@ fn should_not_remove_transaction_if_password_is_invalid() { data: vec![], nonce: None, })).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; @@ -142,15 +142,15 @@ fn should_not_remove_transaction_if_password_is_invalid() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] fn should_not_remove_sign_if_password_is_invalid() { // given let tester = signer_tester(); - tester.queue.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap(); - assert_eq!(tester.queue.requests().len(), 1); + tester.signer.add_request(ConfirmationPayload::Sign(0.into(), 5.into())).unwrap(); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{"jsonrpc":"2.0","method":"personal_confirmRequest","params":["0x1",{},"xxx"],"id":1}"#; @@ -158,7 +158,7 @@ fn should_not_remove_sign_if_password_is_invalid() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); } #[test] @@ -167,7 +167,7 @@ fn should_confirm_transaction_and_dispatch() { let tester = signer_tester(); let address = tester.accounts.new_account("test").unwrap(); let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); - tester.queue.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { + tester.signer.add_request(ConfirmationPayload::Transaction(FilledTransactionRequest { from: address, to: Some(recipient), gas_price: U256::from(10_000), @@ -189,7 +189,7 @@ fn should_confirm_transaction_and_dispatch() { let signature = tester.accounts.sign(address, t.hash()).unwrap(); let t = t.with_signature(signature); - assert_eq!(tester.queue.requests().len(), 1); + assert_eq!(tester.signer.requests().len(), 1); // when let request = r#"{ @@ -202,7 +202,24 @@ fn should_confirm_transaction_and_dispatch() { // then assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); - assert_eq!(tester.queue.requests().len(), 0); + assert_eq!(tester.signer.requests().len(), 0); assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } +#[test] +fn should_generate_new_token() { + // given + let tester = signer_tester(); + + // when + let request = r#"{ + "jsonrpc":"2.0", + "method":"personal_generateAuthorizationToken", + "params":[], + "id":1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"new_token","id":1}"#; + + // then + assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 610591f1f..80789fd0e 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -18,186 +18,185 @@ use std::sync::Arc; use jsonrpc_core::*; -/// Eth rpc interface. -pub trait Eth: Sized + Send + Sync + 'static { - /// Returns protocol version. - fn protocol_version(&self, _: Params) -> Result; +use v1::types::{Block, BlockNumber, Bytes, CallRequest, Filter, FilterChanges, Index}; +use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; +use v1::types::{H64, H160, H256, U256}; - /// Returns an object with data about the sync status or false. (wtf?) - fn syncing(&self, _: Params) -> Result; +use v1::helpers::auto_args::{Trailing, Wrap}; - /// Returns the number of hashes per second that the node is mining with. - fn hashrate(&self, _: Params) -> Result; +build_rpc_trait! { + /// Eth rpc interface. + pub trait Eth { + /// Returns protocol version encoded as a string (quotes are necessary). + #[name("eth_protocolVersion")] + fn protocol_version(&self) -> Result; - /// Returns block author. - fn author(&self, _: Params) -> Result; + /// Returns an object with data about the sync status or false. (wtf?) + #[name("eth_syncing")] + fn syncing(&self) -> Result; - /// Returns true if client is actively mining new blocks. - fn is_mining(&self, _: Params) -> Result; + /// Returns the number of hashes per second that the node is mining with. + #[name("eth_hashrate")] + fn hashrate(&self) -> Result; - /// Returns current gas_price. - fn gas_price(&self, _: Params) -> Result; + /// Returns block author. + #[name("eth_coinbase")] + fn author(&self) -> Result; - /// Returns accounts list. - fn accounts(&self, _: Params) -> Result; + /// Returns true if client is actively mining new blocks. + #[name("eth_mining")] + fn is_mining(&self) -> Result; - /// Returns highest block number. - fn block_number(&self, _: Params) -> Result; + /// Returns current gas_price. + #[name("eth_gasPrice")] + fn gas_price(&self) -> Result; - /// Returns balance of the given account. - fn balance(&self, _: Params) -> Result; + /// Returns accounts list. + #[name("eth_accounts")] + fn accounts(&self) -> Result, Error>; - /// Returns content of the storage at given address. - fn storage_at(&self, _: Params) -> Result; + /// Returns highest block number. + #[name("eth_blockNumber")] + fn block_number(&self) -> Result; - /// Returns block with given hash. - fn block_by_hash(&self, _: Params) -> Result; + /// Returns balance of the given account. + #[name("eth_getBalance")] + fn balance(&self, H160, Trailing) -> Result; - /// Returns block with given number. - fn block_by_number(&self, _: Params) -> Result; + /// Returns content of the storage at given address. + #[name("eth_getStorageAt")] + fn storage_at(&self, H160, U256, Trailing) -> Result; - /// Returns the number of transactions sent from given address at given time (block number). - fn transaction_count(&self, _: Params) -> Result; + /// Returns block with given hash. + #[name("eth_getBlockByHash")] + fn block_by_hash(&self, H256, bool) -> Result, Error>; - /// Returns the number of transactions in a block with given hash. - fn block_transaction_count_by_hash(&self, _: Params) -> Result; + /// Returns block with given number. + #[name("eth_getBlockByNumber")] + fn block_by_number(&self, BlockNumber, bool) -> Result, Error>; - /// Returns the number of transactions in a block with given block number. - fn block_transaction_count_by_number(&self, _: Params) -> Result; + /// Returns the number of transactions sent from given address at given time (block number). + #[name("eth_getTransactionCount")] + fn transaction_count(&self, H160, Trailing) -> Result; - /// Returns the number of uncles in a block with given hash. - fn block_uncles_count_by_hash(&self, _: Params) -> Result; + /// Returns the number of transactions in a block with given hash. + #[name("eth_getBlockTransactionCountByHash")] + fn block_transaction_count_by_hash(&self, H256) -> Result, Error>; - /// Returns the number of uncles in a block with given block number. - fn block_uncles_count_by_number(&self, _: Params) -> Result; + /// Returns the number of transactions in a block with given block number. + #[name("eth_getBlockTransactionCountByNumber")] + fn block_transaction_count_by_number(&self, BlockNumber) -> Result, Error>; - /// Returns the code at given address at given time (block number). - fn code_at(&self, _: Params) -> Result; + /// Returns the number of uncles in a block with given hash. + #[name("eth_getUncleCountByBlockHash")] + fn block_uncles_count_by_hash(&self, H256) -> Result, Error>; - /// Sends signed transaction. - fn send_raw_transaction(&self, _: Params) -> Result; + /// Returns the number of uncles in a block with given block number. + #[name("eth_getUncleCountByBlockNumber")] + fn block_uncles_count_by_number(&self, BlockNumber) -> Result, Error>; - /// Call contract. - fn call(&self, _: Params) -> Result; + /// Returns the code at given address at given time (block number). + #[name("eth_getCode")] + fn code_at(&self, H160, Trailing) -> Result; - /// Estimate gas needed for execution of given contract. - fn estimate_gas(&self, _: Params) -> Result; + /// Sends signed transaction, returning its hash. + #[name("eth_sendRawTransaction")] + fn send_raw_transaction(&self, Bytes) -> Result; - /// Get transaction by its hash. - fn transaction_by_hash(&self, _: Params) -> Result; + /// Call contract, returning the output data. + #[name("eth_call")] + fn call(&self, CallRequest, Trailing) -> Result; - /// Returns transaction at given block hash and index. - fn transaction_by_block_hash_and_index(&self, _: Params) -> Result; + /// Estimate gas needed for execution of given contract. + #[name("eth_estimateGas")] + fn estimate_gas(&self, CallRequest, Trailing) -> Result; - /// Returns transaction by given block number and index. - fn transaction_by_block_number_and_index(&self, _: Params) -> Result; + /// Get transaction by its hash. + #[name("eth_getTransactionByHash")] + fn transaction_by_hash(&self, H256) -> Result, Error>; - /// Returns transaction receipt. - fn transaction_receipt(&self, _: Params) -> Result; + /// Returns transaction at given block hash and index. + #[name("eth_getTransactionByBlockHashAndIndex")] + fn transaction_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; - /// Returns an uncles at given block and index. - fn uncle_by_block_hash_and_index(&self, _: Params) -> Result; + /// Returns transaction by given block number and index. + #[name("eth_getTransactionByBlockNumberAndIndex")] + fn transaction_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; - /// Returns an uncles at given block and index. - fn uncle_by_block_number_and_index(&self, _: Params) -> Result; + /// Returns transaction receipt. + #[name("eth_getTransactionReceipt")] + fn transaction_receipt(&self, H256) -> Result, Error>; - /// Returns available compilers. - fn compilers(&self, _: Params) -> Result; + /// Returns an uncles at given block and index. + #[name("eth_getUncleByBlockHashAndIndex")] + fn uncle_by_block_hash_and_index(&self, H256, Index) -> Result, Error>; - /// Compiles lll code. - fn compile_lll(&self, _: Params) -> Result; + /// Returns an uncles at given block and index. + #[name("eth_getUncleByBlockNumberAndIndex")] + fn uncle_by_block_number_and_index(&self, BlockNumber, Index) -> Result, Error>; - /// Compiles solidity. - fn compile_solidity(&self, _: Params) -> Result; + /// Returns available compilers. + #[name("eth_getCompilers")] + fn compilers(&self) -> Result, Error>; - /// Compiles serpent. - fn compile_serpent(&self, _: Params) -> Result; + /// Compiles lll code. + #[name("eth_compileLLL")] + fn compile_lll(&self, String) -> Result; - /// Returns logs matching given filter object. - fn logs(&self, _: Params) -> Result; + /// Compiles solidity. + #[name("eth_compileSolidity")] + fn compile_solidity(&self, String) -> Result; - /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. - fn work(&self, _: Params) -> Result; + /// Compiles serpent. + #[name("eth_compileSerpent")] + fn compile_serpent(&self, String) -> Result; - /// Used for submitting a proof-of-work solution. - fn submit_work(&self, _: Params) -> Result; + /// Returns logs matching given filter object. + #[name("eth_getLogs")] + fn logs(&self, Filter) -> Result, Error>; - /// Used for submitting mining hashrate. - fn submit_hashrate(&self, _: Params) -> Result; + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met. + #[name("eth_getWork")] + fn work(&self, Trailing) -> Result; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_protocolVersion", Eth::protocol_version); - delegate.add_method("eth_syncing", Eth::syncing); - delegate.add_method("eth_hashrate", Eth::hashrate); - delegate.add_method("eth_coinbase", Eth::author); - delegate.add_method("eth_mining", Eth::is_mining); - delegate.add_method("eth_gasPrice", Eth::gas_price); - delegate.add_method("eth_accounts", Eth::accounts); - delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_getBalance", Eth::balance); - delegate.add_method("eth_getStorageAt", Eth::storage_at); - delegate.add_method("eth_getTransactionCount", Eth::transaction_count); - delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); - delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); - delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); - delegate.add_method("eth_getCode", Eth::code_at); - delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction); - delegate.add_method("eth_call", Eth::call); - delegate.add_method("eth_estimateGas", Eth::estimate_gas); - delegate.add_method("eth_getBlockByHash", Eth::block_by_hash); - delegate.add_method("eth_getBlockByNumber", Eth::block_by_number); - delegate.add_method("eth_getTransactionByHash", Eth::transaction_by_hash); - delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_by_block_hash_and_index); - delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_by_block_number_and_index); - delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt); - delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_by_block_hash_and_index); - delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_by_block_number_and_index); - delegate.add_method("eth_getCompilers", Eth::compilers); - delegate.add_method("eth_compileLLL", Eth::compile_lll); - delegate.add_method("eth_compileSolidity", Eth::compile_solidity); - delegate.add_method("eth_compileSerpent", Eth::compile_serpent); - delegate.add_method("eth_getLogs", Eth::logs); - delegate.add_method("eth_getWork", Eth::work); - delegate.add_method("eth_submitWork", Eth::submit_work); - delegate.add_method("eth_submitHashrate", Eth::submit_hashrate); - delegate + /// Used for submitting a proof-of-work solution. + #[name("eth_submitWork")] + fn submit_work(&self, H64, H256, H256) -> Result; + + /// Used for submitting mining hashrate. + #[name("eth_submitHashrate")] + fn submit_hashrate(&self, U256, H256) -> Result; } } -/// Eth filters rpc api (polling). -// TODO: do filters api properly -pub trait EthFilter: Sized + Send + Sync + 'static { - /// Returns id of new filter. - fn new_filter(&self, _: Params) -> Result; +build_rpc_trait! { - /// Returns id of new block filter. - fn new_block_filter(&self, _: Params) -> Result; + /// Eth filters rpc api (polling). + // TODO: do filters api properly + pub trait EthFilter { + /// Returns id of new filter. + #[name("eth_newFilter")] + fn new_filter(&self, Filter) -> Result; - /// Returns id of new block filter. - fn new_pending_transaction_filter(&self, _: Params) -> Result; + /// Returns id of new block filter. + #[name("eth_newBlockFilter")] + fn new_block_filter(&self) -> Result; - /// Returns filter changes since last poll. - fn filter_changes(&self, _: Params) -> Result; + /// Returns id of new block filter. + #[name("eth_newPendingTransactionFilter")] + fn new_pending_transaction_filter(&self) -> Result; - /// Returns all logs matching given filter (in a range 'from' - 'to'). - fn filter_logs(&self, _: Params) -> Result; + /// Returns filter changes since last poll. + #[name("eth_getFilterChanges")] + fn filter_changes(&self, Index) -> Result; - /// Uninstalls filter. - fn uninstall_filter(&self, _: Params) -> Result; + /// Returns all logs matching given filter (in a range 'from' - 'to'). + #[name("eth_getFilterLogs")] + fn filter_logs(&self, Index) -> Result, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("eth_newFilter", EthFilter::new_filter); - delegate.add_method("eth_newBlockFilter", EthFilter::new_block_filter); - delegate.add_method("eth_newPendingTransactionFilter", EthFilter::new_pending_transaction_filter); - delegate.add_method("eth_getFilterChanges", EthFilter::filter_changes); - delegate.add_method("eth_getFilterLogs", EthFilter::filter_logs); - delegate.add_method("eth_uninstallFilter", EthFilter::uninstall_filter); - delegate + /// Uninstalls filter. + #[name("eth_uninstallFilter")] + fn uninstall_filter(&self, Index) -> Result; } } @@ -227,6 +226,10 @@ pub trait EthSigning: Sized + Send + Sync + 'static { /// or an error. fn check_request(&self, _: Params) -> Result; + /// Decrypt some ECIES-encrypted message. + /// First parameter is the address with which it is encrypted, second is the ciphertext. + fn decrypt_message(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); @@ -235,6 +238,7 @@ pub trait EthSigning: Sized + Send + Sync + 'static { delegate.add_method("eth_postSign", EthSigning::post_sign); delegate.add_method("eth_postTransaction", EthSigning::post_transaction); delegate.add_method("eth_checkRequest", EthSigning::check_request); + delegate.add_method("ethcore_decryptMessage", EthSigning::decrypt_message); delegate } } diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index efd838297..56c27534a 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -76,6 +76,13 @@ pub trait Ethcore: Sized + Send + Sync + 'static { /// Returns the value of the registrar for this network. fn registry_address(&self, _: Params) -> Result; + /// Encrypt some data with a public key under ECIES. + /// First parameter is the 512-byte destination public key, second is the message. + fn encrypt_message(&self, _: Params) -> Result; + + /// Returns all pending (current) transactions from transaction queue. + fn pending_transactions(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); @@ -98,7 +105,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static { delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); - + delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); + delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); delegate } } diff --git a/rpc/src/v1/traits/personal.rs b/rpc/src/v1/traits/personal.rs index 89d63c863..988091958 100644 --- a/rpc/src/v1/traits/personal.rs +++ b/rpc/src/v1/traits/personal.rs @@ -92,12 +92,16 @@ pub trait PersonalSigner: Sized + Send + Sync + 'static { /// Reject the confirmation request. fn reject_request(&self, _: Params) -> Result; + /// Generates new authorization token. + fn generate_token(&self, _: Params) -> Result; + /// Should be used to convert object to io delegate. fn to_delegate(self) -> IoDelegate { let mut delegate = IoDelegate::new(Arc::new(self)); delegate.add_method("personal_requestsToConfirm", PersonalSigner::requests_to_confirm); delegate.add_method("personal_confirmRequest", PersonalSigner::confirm_request); delegate.add_method("personal_rejectRequest", PersonalSigner::reject_request); + delegate.add_method("personal_generateAuthorizationToken", PersonalSigner::generate_token); delegate } } diff --git a/rpc/src/v1/types/block.rs b/rpc/src/v1/types/block.rs index 21459d026..70f39ba73 100644 --- a/rpc/src/v1/types/block.rs +++ b/rpc/src/v1/types/block.rs @@ -103,7 +103,7 @@ mod tests { fn test_serialize_block_transactions() { let t = BlockTransactions::Full(vec![Transaction::default()]); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}]"#); + assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}]"#); let t = BlockTransactions::Hashes(vec![H256::default().into()]); let serialized = serde_json::to_string(&t).unwrap(); diff --git a/rpc/src/v1/types/block_number.rs b/rpc/src/v1/types/block_number.rs index 302d099d5..01625f8ed 100644 --- a/rpc/src/v1/types/block_number.rs +++ b/rpc/src/v1/types/block_number.rs @@ -31,6 +31,12 @@ pub enum BlockNumber { Pending, } +impl Default for BlockNumber { + fn default() -> Self { + BlockNumber::Latest + } +} + impl Deserialize for BlockNumber { fn deserialize(deserializer: &mut D) -> Result where D: Deserializer { diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index 09c899057..57ff9f22e 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -70,10 +70,16 @@ impl Visitor for BytesVisitor { type Value = Bytes; fn visit_str(&mut self, value: &str) -> Result where E: Error { - if value.len() >= 2 && &value[0..2] == "0x" { - Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![]))) + if value.is_empty() { + warn!( + target: "deprecated", + "Deserializing empty string as empty bytes. This is a non-standard behaviour that will be removed in future versions. Please update your code to send `0x` instead!" + ); + Ok(Bytes::new(Vec::new())) + } else if value.len() >= 2 && &value[0..2] == "0x" && value.len() & 1 == 0 { + Ok(Bytes::new(try!(FromHex::from_hex(&value[2..]).map_err(|_| Error::custom("invalid hex"))))) } else { - Err(Error::custom("invalid hex")) + Err(Error::custom("invalid format")) } } @@ -95,5 +101,31 @@ mod tests { let serialized = serde_json::to_string(&bytes).unwrap(); assert_eq!(serialized, r#""0x0123456789abcdef""#); } + + #[test] + fn test_bytes_deserialize() { + // TODO [ToDr] Uncomment when Mist starts sending correct data + // let bytes1: Result = serde_json::from_str(r#""""#); + let bytes2: Result = serde_json::from_str(r#""0x123""#); + let bytes3: Result = serde_json::from_str(r#""0xgg""#); + + let bytes4: Bytes = serde_json::from_str(r#""0x""#).unwrap(); + let bytes5: Bytes = serde_json::from_str(r#""0x12""#).unwrap(); + let bytes6: Bytes = serde_json::from_str(r#""0x0123""#).unwrap(); + + // assert!(bytes1.is_err()); + assert!(bytes2.is_err()); + assert!(bytes3.is_err()); + assert_eq!(bytes4, Bytes(vec![])); + assert_eq!(bytes5, Bytes(vec![0x12])); + assert_eq!(bytes6, Bytes(vec![0x1, 0x23])); + } + + // TODO [ToDr] Remove when Mist starts sending correct data + #[test] + fn test_bytes_lenient_against_the_spec_deserialize_for_empty_string_for_mist_compatibility() { + let deserialized: Bytes = serde_json::from_str(r#""""#).unwrap(); + assert_eq!(deserialized, Bytes(Vec::new())); + } } diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index e07845211..b4a45272b 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use serde::{Deserialize, Deserializer, Error}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, Error}; use serde_json::value; use jsonrpc_core::Value; use ethcore::filter::Filter as EthFilter; use ethcore::client::BlockID; -use v1::types::{BlockNumber, H160, H256}; +use v1::types::{BlockNumber, H160, H256, Log}; /// Variadic value #[derive(Debug, PartialEq, Clone)] @@ -66,6 +66,8 @@ pub struct Filter { pub address: Option, /// Topics pub topics: Option>, + /// Limit + pub limit: Option, } impl Into for Filter { @@ -85,7 +87,29 @@ impl Into for Filter { VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect()) }).filter_map(|m| m).collect()).into_iter(); vec![iter.next(), iter.next(), iter.next(), iter.next()] - } + }, + limit: self.limit, + } + } +} + +/// Results of the filter_changes RPC. +#[derive(Debug, PartialEq)] +pub enum FilterChanges { + /// New logs. + Logs(Vec), + /// New hashes (block or transactions) + Hashes(Vec), + /// Empty result, + Empty, +} + +impl Serialize for FilterChanges { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer { + match *self { + FilterChanges::Logs(ref logs) => logs.serialize(s), + FilterChanges::Hashes(ref hashes) => hashes.serialize(s), + FilterChanges::Empty => (&[] as &[Value]).serialize(s), } } } @@ -120,7 +144,8 @@ mod tests { from_block: Some(BlockNumber::Earliest), to_block: Some(BlockNumber::Latest), address: None, - topics: None + topics: None, + limit: None, }); } } diff --git a/rpc/src/v1/types/hash.rs b/rpc/src/v1/types/hash.rs index 47c529235..3080aa031 100644 --- a/rpc/src/v1/types/hash.rs +++ b/rpc/src/v1/types/hash.rs @@ -20,7 +20,7 @@ use std::cmp::Ordering; use std::hash::{Hash, Hasher}; use serde; use rustc_serialize::hex::{ToHex, FromHex}; -use util::{H64 as Eth64, H256 as EthH256, H520 as EthH520, H2048 as Eth2048, H160 as Eth160}; +use util::{H64 as Eth64, H160 as Eth160, H256 as Eth256, H520 as Eth520, H512 as Eth512, H2048 as Eth2048}; macro_rules! impl_hash { ($name: ident, $other: ident, $size: expr) => { @@ -144,6 +144,7 @@ macro_rules! impl_hash { impl_hash!(H64, Eth64, 8); impl_hash!(H160, Eth160, 20); -impl_hash!(H256, EthH256, 32); -impl_hash!(H520, EthH520, 65); +impl_hash!(H256, Eth256, 32); +impl_hash!(H512, Eth512, 64); +impl_hash!(H520, Eth520, 65); impl_hash!(H2048, Eth2048, 256); diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 312e93818..1369037ed 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -30,14 +30,15 @@ mod receipt; mod trace; mod trace_filter; mod uint; +mod work; pub use self::bytes::Bytes; pub use self::block::{Block, BlockTransactions}; pub use self::block_number::BlockNumber; pub use self::call_request::CallRequest; pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, TransactionModification}; -pub use self::filter::Filter; -pub use self::hash::{H64, H160, H256, H520, H2048}; +pub use self::filter::{Filter, FilterChanges}; +pub use self::hash::{H64, H160, H256, H512, H520, H2048}; pub use self::index::Index; pub use self::log::Log; pub use self::sync::{SyncStatus, SyncInfo, Peers}; @@ -47,3 +48,4 @@ pub use self::receipt::Receipt; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; pub use self::uint::U256; +pub use self::work::Work; diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index d4697aff2..6aa9ee899 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -16,7 +16,7 @@ use ethcore::contract_address; use ethcore::transaction::{LocalizedTransaction, Action, SignedTransaction}; -use v1::types::{Bytes, H160, H256, U256}; +use v1::types::{Bytes, H160, H256, U256, H512}; /// Transaction #[derive(Debug, Default, Serialize)] @@ -51,6 +51,9 @@ pub struct Transaction { pub creates: Option, /// Raw transaction data pub raw: Bytes, + /// Public key of the signer. + #[serde(rename="publicKey")] + pub public_key: Option, } impl From for Transaction { @@ -75,6 +78,7 @@ impl From for Transaction { Action::Call(_) => None, }, raw: ::rlp::encode(&t.signed).to_vec().into(), + public_key: t.public_key().ok().map(Into::into), } } } @@ -101,6 +105,7 @@ impl From for Transaction { Action::Call(_) => None, }, raw: ::rlp::encode(&t).to_vec().into(), + public_key: t.public_key().ok().map(Into::into), } } } @@ -114,7 +119,7 @@ mod tests { fn test_transaction_serialize() { let t = Transaction::default(); let serialized = serde_json::to_string(&t).unwrap(); - assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x"}"#); + assert_eq!(serialized, r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x0","gasPrice":"0x0","gas":"0x0","input":"0x","creates":null,"raw":"0x","publicKey":null}"#); } } diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index bcd874a18..9be7b1170 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -77,6 +77,10 @@ macro_rules! impl_uint { return Err(serde::Error::custom("Invalid length.")); } + if &value[0..2] != "0x" { + return Err(serde::Error::custom("Use hex encoded numbers with 0x prefix.")) + } + $other::from_str(&value[2..]).map($name).map_err(|_| serde::Error::custom("Invalid hex value.")) } @@ -100,6 +104,8 @@ mod tests { use super::U256; use serde_json; + type Res = Result; + #[test] fn should_serialize_u256() { let serialized1 = serde_json::to_string(&U256(0.into())).unwrap(); @@ -113,6 +119,21 @@ mod tests { assert_eq!(serialized4, r#""0x100""#); } + #[test] + fn should_fail_to_deserialize_decimals() { + let deserialized1: Res = serde_json::from_str(r#""""#); + let deserialized2: Res = serde_json::from_str(r#""0""#); + let deserialized3: Res = serde_json::from_str(r#""10""#); + let deserialized4: Res = serde_json::from_str(r#""1000000""#); + let deserialized5: Res = serde_json::from_str(r#""1000000000000000000""#); + + assert!(deserialized1.is_err()); + assert!(deserialized2.is_err()); + assert!(deserialized3.is_err()); + assert!(deserialized4.is_err()); + assert!(deserialized5.is_err()); + } + #[test] fn should_deserialize_u256() { let deserialized1: U256 = serde_json::from_str(r#""0x""#).unwrap(); diff --git a/rpc/src/v1/types/work.rs b/rpc/src/v1/types/work.rs new file mode 100644 index 000000000..0817eb24a --- /dev/null +++ b/rpc/src/v1/types/work.rs @@ -0,0 +1,43 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use super::{H256, U256}; + +use serde::{Serialize, Serializer}; + +/// The result of an `eth_getWork` call: it differs based on an option +/// whether to send the block number. +#[derive(Debug, PartialEq, Eq)] +pub struct Work { + /// The proof-of-work hash. + pub pow_hash: H256, + /// The seed hash. + pub seed_hash: H256, + /// The target. + pub target: H256, + /// The block number: this isn't always stored. + pub number: Option, +} + +impl Serialize for Work { + fn serialize(&self, s: &mut S) -> Result<(), S::Error> where S: Serializer { + match self.number.as_ref() { + Some(num) => (&self.pow_hash, &self.seed_hash, &self.target, U256::from(*num)).serialize(s), + None => (&self.pow_hash, &self.seed_hash, &self.target).serialize(s), + } + } +} + diff --git a/signer/src/authcode_store.rs b/signer/src/authcode_store.rs index 7b9ff1d6b..d8068fc88 100644 --- a/signer/src/authcode_store.rs +++ b/signer/src/authcode_store.rs @@ -48,6 +48,7 @@ impl TimeProvider for DefaultTimeProvider { /// No of seconds the hash is valid const TIME_THRESHOLD: u64 = 7; const TOKEN_LENGTH: usize = 16; +const INITIAL_TOKEN: &'static str = "initial"; /// Manages authorization codes for `SignerUIs` pub struct AuthCodes { @@ -98,7 +99,7 @@ impl AuthCodes { } /// Checks if given hash is correct identifier of `SignerUI` - pub fn is_valid(&self, hash: &H256, time: u64) -> bool { + pub fn is_valid(&mut self, hash: &H256, time: u64) -> bool { let now = self.now.now(); // check time if time >= now + TIME_THRESHOLD || time <= now - TIME_THRESHOLD { @@ -106,9 +107,21 @@ impl AuthCodes { return false; } + let as_token = |code| format!("{}:{}", code, time).sha3(); + + // Check if it's the initial token. + if self.is_empty() { + let initial = &as_token(INITIAL_TOKEN) == hash; + // Initial token can be used only once. + if initial { + let _ = self.generate_new(); + } + return initial; + } + // look for code self.codes.iter() - .any(|code| &format!("{}:{}", code, time).sha3() == hash) + .any(|code| &as_token(code) == hash) } /// Generates and returns a new code that can be used by `SignerUIs` @@ -124,6 +137,11 @@ impl AuthCodes { self.codes.push(code); Ok(readable_code) } + + /// Returns true if there are no tokens in this store + pub fn is_empty(&self) -> bool { + self.codes.is_empty() + } } @@ -137,12 +155,28 @@ mod tests { format!("{}:{}", val, time).sha3() } + #[test] + fn should_return_true_if_code_is_initial_and_store_is_empty() { + // given + let code = "initial"; + let time = 99; + let mut codes = AuthCodes::new(vec![], || 100); + + // when + let res1 = codes.is_valid(&generate_hash(code, time), time); + let res2 = codes.is_valid(&generate_hash(code, time), time); + + // then + assert_eq!(res1, true); + assert_eq!(res2, false); + } + #[test] fn should_return_true_if_hash_is_valid() { // given let code = "23521352asdfasdfadf"; let time = 99; - let codes = AuthCodes::new(vec![code.into()], || 100); + let mut codes = AuthCodes::new(vec![code.into()], || 100); // when let res = codes.is_valid(&generate_hash(code, time), time); @@ -156,7 +190,7 @@ mod tests { // given let code = "23521352asdfasdfadf"; let time = 99; - let codes = AuthCodes::new(vec!["1".into()], || 100); + let mut codes = AuthCodes::new(vec!["1".into()], || 100); // when let res = codes.is_valid(&generate_hash(code, time), time); @@ -171,7 +205,7 @@ mod tests { let code = "23521352asdfasdfadf"; let time = 107; let time2 = 93; - let codes = AuthCodes::new(vec![code.into()], || 100); + let mut codes = AuthCodes::new(vec![code.into()], || 100); // when let res1 = codes.is_valid(&generate_hash(code, time), time); diff --git a/signer/src/tests/mod.rs b/signer/src/tests/mod.rs index eaed49de8..61b2ff1d3 100644 --- a/signer/src/tests/mod.rs +++ b/signer/src/tests/mod.rs @@ -14,24 +14,48 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::env; +use std::ops::{Deref, DerefMut}; use std::thread; -use std::time::Duration; +use std::time::{self, Duration}; use std::sync::Arc; -use devtools::http_client; +use devtools::{http_client, RandomTempPath}; use rpc::ConfirmationsQueue; +use util::Hashable; use rand; use ServerBuilder; use Server; +use AuthCodes; -pub fn serve() -> Server { +pub struct GuardedAuthCodes { + authcodes: AuthCodes, + path: RandomTempPath, +} +impl Deref for GuardedAuthCodes { + type Target = AuthCodes; + fn deref(&self) -> &Self::Target { + &self.authcodes + } +} +impl DerefMut for GuardedAuthCodes { + fn deref_mut(&mut self) -> &mut AuthCodes { + &mut self.authcodes + } +} + +pub fn serve() -> (Server, usize, GuardedAuthCodes) { + let mut path = RandomTempPath::new(); + path.panic_on_drop_failure = false; let queue = Arc::new(ConfirmationsQueue::default()); - let builder = ServerBuilder::new(queue, env::temp_dir()); + let builder = ServerBuilder::new(queue, path.to_path_buf()); let port = 35000 + rand::random::() % 10000; let res = builder.start(format!("127.0.0.1:{}", port).parse().unwrap()).unwrap(); thread::sleep(Duration::from_millis(25)); - res + + (res, port, GuardedAuthCodes { + authcodes: AuthCodes::from_file(&path).unwrap(), + path: path, + }) } pub fn request(server: Server, request: &str) -> http_client::Response { @@ -41,7 +65,7 @@ pub fn request(server: Server, request: &str) -> http_client::Response { #[test] fn should_reject_invalid_host() { // given - let server = serve(); + let server = serve().0; // when let response = request(server, @@ -62,7 +86,7 @@ fn should_reject_invalid_host() { #[test] fn should_serve_styles_even_on_disallowed_domain() { // given - let server = serve(); + let server = serve().0; // when let response = request(server, @@ -79,3 +103,103 @@ fn should_serve_styles_even_on_disallowed_domain() { assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); } +#[test] +fn should_block_if_authorization_is_incorrect() { + // given + let (server, port, _) = serve(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Upgrade\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: wrong\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", port) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); +} + +#[test] +fn should_allow_if_authorization_is_correct() { + // given + let (server, port, mut authcodes) = serve(); + let code = authcodes.generate_new().unwrap().replace("-", ""); + authcodes.to_file(&authcodes.path).unwrap(); + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: {:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned()); +} + +#[test] +fn should_allow_initial_connection_but_only_once() { + // given + let (server, port, authcodes) = serve(); + let code = "initial"; + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + assert!(authcodes.is_empty()); + + // when + let response1 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + let response2 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + + // then + assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned()); + assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); +} diff --git a/signer/src/ws_server/mod.rs b/signer/src/ws_server/mod.rs index 57223ccd9..697fbd4c7 100644 --- a/signer/src/ws_server/mod.rs +++ b/signer/src/ws_server/mod.rs @@ -180,7 +180,6 @@ impl Drop for Server { self.queue.finish(); self.broadcaster_handle.take().unwrap().join().unwrap(); self.handle.take().unwrap().join().unwrap(); - } } diff --git a/signer/src/ws_server/session.rs b/signer/src/ws_server/session.rs index cd3e2eee3..afc6606d7 100644 --- a/signer/src/ws_server/session.rs +++ b/signer/src/ws_server/session.rs @@ -59,7 +59,7 @@ fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool { } } -fn auth_is_valid(codes: &Path, protocols: ws::Result>) -> bool { +fn auth_is_valid(codes_path: &Path, protocols: ws::Result>) -> bool { match protocols { Ok(ref protocols) if protocols.len() == 1 => { protocols.iter().any(|protocol| { @@ -69,8 +69,15 @@ fn auth_is_valid(codes: &Path, protocols: ws::Result>) -> bool { if let (Some(auth), Some(time)) = (auth, time) { // Check if the code is valid - AuthCodes::from_file(codes) - .map(|codes| codes.is_valid(&auth, time)) + AuthCodes::from_file(codes_path) + .map(|mut codes| { + let res = codes.is_valid(&auth, time); + // make sure to save back authcodes - it might have been modified + if let Err(_) = codes.to_file(codes_path) { + warn!(target: "signer", "Couldn't save authorization codes to file."); + } + res + }) .unwrap_or(false) } else { false diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index 753ba7111..ad842ced6 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -233,7 +233,7 @@ impl BlockCollection { fn insert_body(&mut self, b: Bytes) -> Result<(), NetworkError> { let body = UntrustedRlp::new(&b); let tx = try!(body.at(0)); - let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec()).collect()); //TODO: get rid of vectors here + let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw().to_vec())); //TODO: get rid of vectors here let uncles = try!(body.at(1)).as_raw().sha3(); let header_id = HeaderId { transactions_root: tx_root, diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 2e47b5617..565c53827 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -108,7 +108,6 @@ known_heap_size!(0, PeerInfo); type PacketDecodeError = DecoderError; -const PROTOCOL_VERSION: u8 = 64u8; const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; @@ -1274,7 +1273,7 @@ impl ChainSync { let pv64 = io.eth_protocol_version(peer) >= 64; let mut packet = RlpStream::new_list(if pv64 { 7 } else { 5 }); let chain = io.chain().chain_info(); - packet.append(&(PROTOCOL_VERSION as u32)); + packet.append(&(io.eth_protocol_version(peer) as u32)); packet.append(&self.network_id); packet.append(&chain.total_difficulty); packet.append(&chain.best_block_hash); diff --git a/util/rlp/src/bytes.rs b/util/rlp/src/bytes.rs index 5940d21d2..07ac108d6 100644 --- a/util/rlp/src/bytes.rs +++ b/util/rlp/src/bytes.rs @@ -174,6 +174,8 @@ pub enum FromBytesError { DataIsTooLong, /// Integer-representation is non-canonically prefixed with zero byte(s). ZeroPrefixedInt, + /// String representation is not utf-8 + InvalidUtf8, } impl StdError for FromBytesError { @@ -199,7 +201,7 @@ pub trait FromBytes: Sized { impl FromBytes for String { fn from_bytes(bytes: &[u8]) -> FromBytesResult { - Ok(::std::str::from_utf8(bytes).unwrap().to_owned()) + ::std::str::from_utf8(bytes).map(|s| s.to_owned()).map_err(|_| FromBytesError::InvalidUtf8) } } diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 7c5e929f4..80b44c0e7 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -20,6 +20,7 @@ //! as use std::fmt; +use std::cmp::min; use std::ops::{Deref, DerefMut}; /// Slice pretty print helper @@ -71,6 +72,32 @@ pub enum BytesRef<'a> { Fixed(&'a mut [u8]) } +impl<'a> BytesRef<'a> { + /// Writes given `input` to this `BytesRef` starting at `offset`. + /// Returns number of bytes written to the ref. + /// NOTE can return number greater then `input.len()` in case flexible vector had to be extended. + pub fn write(&mut self, offset: usize, input: &[u8]) -> usize { + match *self { + BytesRef::Flexible(ref mut data) => { + let data_len = data.len(); + let wrote = input.len() + if data_len > offset { 0 } else { offset - data_len }; + + data.resize(offset, 0); + data.extend_from_slice(input); + wrote + }, + BytesRef::Fixed(ref mut data) if offset < data.len() => { + let max = min(data.len() - offset, input.len()); + for i in 0..max { + data[offset + i] = input[i]; + } + max + }, + _ => 0 + } + } +} + impl<'a> Deref for BytesRef<'a> { type Target = [u8]; @@ -92,4 +119,61 @@ impl <'a> DerefMut for BytesRef<'a> { } /// Vector of bytes. -pub type Bytes = Vec; \ No newline at end of file +pub type Bytes = Vec; + +#[cfg(test)] +mod tests { + use super::BytesRef; + + #[test] + fn should_write_bytes_to_fixed_bytesref() { + // given + let mut data1 = vec![0, 0, 0]; + let mut data2 = vec![0, 0, 0]; + let (res1, res2) = { + let mut bytes1 = BytesRef::Fixed(&mut data1[..]); + let mut bytes2 = BytesRef::Fixed(&mut data2[1..2]); + + // when + let res1 = bytes1.write(1, &[1, 1, 1]); + let res2 = bytes2.write(3, &[1, 1, 1]); + (res1, res2) + }; + + // then + assert_eq!(&data1, &[0, 1, 1]); + assert_eq!(res1, 2); + + assert_eq!(&data2, &[0, 0, 0]); + assert_eq!(res2, 0); + } + + #[test] + fn should_write_bytes_to_flexible_bytesref() { + // given + let mut data1 = vec![0, 0, 0]; + let mut data2 = vec![0, 0, 0]; + let mut data3 = vec![0, 0, 0]; + let (res1, res2, res3) = { + let mut bytes1 = BytesRef::Flexible(&mut data1); + let mut bytes2 = BytesRef::Flexible(&mut data2); + let mut bytes3 = BytesRef::Flexible(&mut data3); + + // when + let res1 = bytes1.write(1, &[1, 1, 1]); + let res2 = bytes2.write(3, &[1, 1, 1]); + let res3 = bytes3.write(5, &[1, 1, 1]); + (res1, res2, res3) + }; + + // then + assert_eq!(&data1, &[0, 1, 1, 1]); + assert_eq!(res1, 3); + + assert_eq!(&data2, &[0, 0, 0, 1, 1, 1]); + assert_eq!(res2, 3); + + assert_eq!(&data3, &[0, 0, 0, 0, 0, 1, 1, 1]); + assert_eq!(res3, 5); + } +} diff --git a/util/src/trie/fatdb.rs b/util/src/trie/fatdb.rs index bb35bd467..f4c65a84b 100644 --- a/util/src/trie/fatdb.rs +++ b/util/src/trie/fatdb.rs @@ -46,8 +46,8 @@ impl<'db> FatDB<'db> { } impl<'db> Trie for FatDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(FatDBIterator::new(&self.raw)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + FatDBIterator::new(&self.raw).map(|iter| Box::new(iter) as Box<_>) } fn root(&self) -> &H256 { @@ -73,22 +73,24 @@ pub struct FatDBIterator<'db> { impl<'db> FatDBIterator<'db> { /// Creates new iterator. - pub fn new(trie: &'db TrieDB) -> Self { - FatDBIterator { - trie_iterator: TrieDBIterator::new(trie), + pub fn new(trie: &'db TrieDB) -> super::Result { + Ok(FatDBIterator { + trie_iterator: try!(TrieDBIterator::new(trie)), trie: trie, - } + }) } } impl<'db> Iterator for FatDBIterator<'db> { - type Item = (Vec, &'db [u8]); + type Item = TrieItem<'db>; fn next(&mut self) -> Option { self.trie_iterator.next() - .map(|(hash, value)| { - (self.trie.db().get_aux(&hash).expect("Missing fatdb hash"), value) - }) + .map(|res| + res.map(|(hash, value)| { + (self.trie.db().get_aux(&hash).expect("Missing fatdb hash"), value) + }) + ) } } @@ -105,5 +107,5 @@ fn fatdb_to_trie() { } let t = FatDB::new(&memdb, &root).unwrap(); assert_eq!(t.get(&[0x01u8, 0x23]).unwrap().unwrap(), &[0x01u8, 0x23]); - assert_eq!(t.iter().collect::>(), vec![(vec![0x01u8, 0x23], &[0x01u8, 0x23] as &[u8])]); + assert_eq!(t.iter().unwrap().map(Result::unwrap).collect::>(), vec![(vec![0x01u8, 0x23], &[0x01u8, 0x23] as &[u8])]); } diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index b71f6a5e2..6eebd8f5d 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -72,12 +72,12 @@ impl fmt::Display for TrieError { } } -/// Trie-Item type. -pub type TrieItem<'a> = (Vec, &'a [u8]); - /// Trie result type. Boxed to avoid copying around extra space for `H256`s on successful queries. pub type Result = ::std::result::Result>; +/// Trie-Item type. +pub type TrieItem<'a> = Result<(Vec, &'a [u8])>; + /// A key-value datastore implemented as a database-backed modified Merkle tree. pub trait Trie { /// Return the root of the trie. @@ -102,7 +102,7 @@ pub trait Trie { where 'a: 'b, R: Recorder; /// Returns an iterator over elements of trie. - fn iter<'a>(&'a self) -> Box + 'a>; + fn iter<'a>(&'a self) -> Result + 'a>>; } /// A key-value datastore implemented as a database-backed modified Merkle tree. @@ -193,7 +193,7 @@ impl<'db> Trie for TrieKinds<'db> { wrapper!(self, get_recorded, key, r) } - fn iter<'a>(&'a self) -> Box + 'a> { + fn iter<'a>(&'a self) -> Result + 'a>> { wrapper!(self, iter,) } } diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index 9e807884c..d7108dc3e 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -49,8 +49,8 @@ impl<'db> SecTrieDB<'db> { } impl<'db> Trie for SecTrieDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(TrieDB::iter(&self.raw)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + TrieDB::iter(&self.raw) } fn root(&self) -> &H256 { self.raw.root() } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 99ef1f118..f5de26f8e 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -279,30 +279,38 @@ pub struct TrieDBIterator<'a> { impl<'a> TrieDBIterator<'a> { /// Create a new iterator. - pub fn new(db: &'a TrieDB) -> TrieDBIterator<'a> { + pub fn new(db: &'a TrieDB) -> super::Result> { let mut r = TrieDBIterator { db: db, trail: vec![], key_nibbles: Vec::new(), }; - r.descend(db.root_data(&mut NoOp).unwrap()); - r + + try!(db.root_data(&mut NoOp).and_then(|root| r.descend(root))); + Ok(r) } /// Descend into a payload. - fn descend(&mut self, d: &'a [u8]) { + fn descend(&mut self, d: &'a [u8]) -> super::Result<()> { self.trail.push(Crumb { status: Status::Entering, - node: self.db.get_node(d, &mut NoOp, 0).unwrap(), + node: try!(self.db.get_node(d, &mut NoOp, 0)), }); match self.trail.last().unwrap().node { Node::Leaf(n, _) | Node::Extension(n, _) => { self.key_nibbles.extend(n.iter()); }, _ => {} } + + Ok(()) } /// Descend into a payload and get the next item. - fn descend_next(&mut self, d: &'a [u8]) -> Option<(Bytes, &'a [u8])> { self.descend(d); self.next() } + fn descend_next(&mut self, d: &'a [u8]) -> Option> { + match self.descend(d) { + Ok(()) => self.next(), + Err(e) => Some(Err(e)), + } + } /// The present key. fn key(&self) -> Bytes { @@ -312,12 +320,12 @@ impl<'a> TrieDBIterator<'a> { } impl<'a> Iterator for TrieDBIterator<'a> { - type Item = (Bytes, &'a [u8]); + type Item = TrieItem<'a>; fn next(&mut self) -> Option { let b = match self.trail.last_mut() { Some(mut b) => { b.increment(); b.clone() }, - None => return None + None => return None, }; match (b.status, b.node) { (Status::Exiting, n) => { @@ -332,7 +340,7 @@ impl<'a> Iterator for TrieDBIterator<'a> { self.trail.pop(); self.next() }, - (Status::At, Node::Leaf(_, v)) | (Status::At, Node::Branch(_, Some(v))) => Some((self.key(), v)), + (Status::At, Node::Leaf(_, v)) | (Status::At, Node::Branch(_, Some(v))) => Some(Ok((self.key(), v))), (Status::At, Node::Extension(_, d)) => self.descend_next(d), (Status::At, Node::Branch(_, _)) => self.next(), (Status::AtChild(i), Node::Branch(children, _)) if children[i].len() > 0 => { @@ -352,8 +360,8 @@ impl<'a> Iterator for TrieDBIterator<'a> { } impl<'db> Trie for TrieDB<'db> { - fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(TrieDBIterator::new(self)) + fn iter<'a>(&'a self) -> super::Result + 'a>> { + TrieDBIterator::new(self).map(|iter| Box::new(iter) as Box<_>) } fn root(&self) -> &H256 { self.root } @@ -392,6 +400,6 @@ fn iterator() { } let t = TrieDB::new(&memdb, &root).unwrap(); - assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), t.iter().map(|x|x.0).collect::>()); - assert_eq!(d, t.iter().map(|x|x.1).collect::>()); + assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), t.iter().unwrap().map(|x| x.unwrap().0).collect::>()); + assert_eq!(d, t.iter().unwrap().map(|x| x.unwrap().1).collect::>()); } diff --git a/util/src/triehash.rs b/util/src/triehash.rs index f49b588d4..c8ab5bb08 100644 --- a/util/src/triehash.rs +++ b/util/src/triehash.rs @@ -40,7 +40,9 @@ use vector::SharedPrefix; /// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap()); /// } /// ``` -pub fn ordered_trie_root(input: Vec>) -> H256 { +pub fn ordered_trie_root(input: I) -> H256 + where I: IntoIterator> +{ let gen_input = input // first put elements into btree to sort them by nibbles // optimize it later