From c46fe330dc4e3b513eff7560868ef6be99936e03 Mon Sep 17 00:00:00 2001 From: Adria Massanet Date: Wed, 13 Jan 2021 17:03:12 +0000 Subject: [PATCH] Big folder refactor --- .gitmodules | 8 +- Cargo.lock | 70 - Cargo.toml | 79 +- {chainspec => bin/chainspec}/Cargo.toml | 2 +- {chainspec => bin/chainspec}/src/main.rs | 0 .../ethkey/cli => bin/ethkey}/Cargo.toml | 4 +- .../ethkey/cli => bin/ethkey}/src/main.rs | 0 .../ethstore/cli => bin/ethstore}/Cargo.toml | 6 +- .../cli => bin/ethstore}/src/crack.rs | 0 .../ethstore/cli => bin/ethstore}/src/main.rs | 0 {evmbin => bin/evmbin}/Cargo.toml | 12 +- {evmbin => bin/evmbin}/README.md | 0 {evmbin => bin/evmbin}/benches/mod.rs | 0 {evmbin => bin/evmbin}/res/testchain.json | 0 {evmbin => bin/evmbin}/src/display/config.rs | 0 {evmbin => bin/evmbin}/src/display/json.rs | 0 {evmbin => bin/evmbin}/src/display/mod.rs | 0 {evmbin => bin/evmbin}/src/display/simple.rs | 0 .../evmbin}/src/display/std_json.rs | 0 {evmbin => bin/evmbin}/src/info.rs | 0 {evmbin => bin/evmbin}/src/main.rs | 0 {parity => bin/oe}/account.rs | 0 {parity => bin/oe}/account_utils.rs | 0 {parity => bin/oe}/blockchain.rs | 0 {parity => bin/oe}/cache.rs | 0 {parity => bin/oe}/cli/mod.rs | 0 .../oe}/cli/presets/config.dev-insecure.toml | 0 .../oe}/cli/presets/config.dev.toml | 0 .../oe}/cli/presets/config.insecure.toml | 0 .../oe}/cli/presets/config.mining.toml | 0 .../presets/config.non-standard-ports.toml | 0 {parity => bin/oe}/cli/presets/mod.rs | 0 {parity => bin/oe}/cli/tests/config.full.toml | 0 .../oe}/cli/tests/config.invalid1.toml | 0 .../oe}/cli/tests/config.invalid2.toml | 0 .../oe}/cli/tests/config.invalid3.toml | 0 .../oe}/cli/tests/config.invalid4.toml | 0 {parity => bin/oe}/cli/tests/config.toml | 0 {parity => bin/oe}/cli/usage.rs | 0 {parity => bin/oe}/cli/usage_header.txt | 0 {parity => bin/oe}/cli/version.txt | 0 {parity => bin/oe}/configuration.rs | 0 {parity => bin/oe}/db/mod.rs | 0 {parity => bin/oe}/db/rocksdb/blooms.rs | 0 {parity => bin/oe}/db/rocksdb/helpers.rs | 0 {parity => bin/oe}/db/rocksdb/migration.rs | 0 {parity => bin/oe}/db/rocksdb/mod.rs | 0 {parity => bin/oe}/helpers.rs | 0 {parity => bin/oe}/informant.rs | 0 {parity => bin/oe}/lib.rs | 1 - {parity => bin/oe}/logger/Cargo.toml | 0 {parity => bin/oe}/logger/src/lib.rs | 0 {parity => bin/oe}/logger/src/rotating.rs | 0 {parity => bin/oe}/main.rs | 0 {parity => bin/oe}/metrics.rs | 0 {parity => bin/oe}/modules.rs | 0 {parity => bin/oe}/params.rs | 0 {parity => bin/oe}/presale.rs | 0 {parity => bin/oe}/rpc.rs | 0 {parity => bin/oe}/rpc_apis.rs | 0 {parity => bin/oe}/run.rs | 0 {parity => bin/oe}/secretstore.rs | 0 {parity => bin/oe}/signer.rs | 0 {parity => bin/oe}/snapshot.rs | 0 {parity => bin/oe}/stratum.rs | 0 {parity => bin/oe}/upgrade.rs | 0 {parity => bin/oe}/user_defaults.rs | 0 {accounts => crates/accounts}/Cargo.toml | 0 .../accounts}/ethkey/.gitignore | 0 .../accounts}/ethkey/.travis.yml | 0 .../accounts}/ethkey/Cargo.toml | 2 +- .../accounts}/ethkey/README.md | 0 .../accounts}/ethkey/src/brain.rs | 0 .../accounts}/ethkey/src/brain_prefix.rs | 0 .../accounts}/ethkey/src/brain_recover.rs | 0 .../accounts}/ethkey/src/crypto.rs | 0 .../accounts}/ethkey/src/error.rs | 0 .../accounts}/ethkey/src/extended.rs | 0 .../accounts}/ethkey/src/keccak.rs | 0 .../accounts}/ethkey/src/keypair.rs | 0 .../accounts}/ethkey/src/lib.rs | 0 .../accounts}/ethkey/src/math.rs | 0 .../accounts}/ethkey/src/password.rs | 0 .../accounts}/ethkey/src/prefix.rs | 0 .../accounts}/ethkey/src/random.rs | 0 .../accounts}/ethkey/src/secret.rs | 0 .../accounts}/ethkey/src/signature.rs | 0 .../accounts}/ethstore/.editorconfig | 0 .../accounts}/ethstore/.gitignore | 0 .../accounts}/ethstore/.travis.yml | 0 .../accounts}/ethstore/Cargo.toml | 0 .../accounts}/ethstore/README.md | 0 .../accounts}/ethstore/src/account/cipher.rs | 0 .../accounts}/ethstore/src/account/crypto.rs | 0 .../accounts}/ethstore/src/account/kdf.rs | 0 .../accounts}/ethstore/src/account/mod.rs | 0 .../ethstore/src/account/safe_account.rs | 0 .../accounts}/ethstore/src/account/version.rs | 0 .../ethstore/src/accounts_dir/disk.rs | 0 .../ethstore/src/accounts_dir/memory.rs | 0 .../ethstore/src/accounts_dir/mod.rs | 0 .../ethstore/src/accounts_dir/vault.rs | 0 .../accounts}/ethstore/src/error.rs | 0 .../accounts}/ethstore/src/ethkey.rs | 0 .../accounts}/ethstore/src/ethstore.rs | 0 .../accounts}/ethstore/src/import.rs | 0 .../accounts}/ethstore/src/json/bytes.rs | 0 .../accounts}/ethstore/src/json/cipher.rs | 0 .../accounts}/ethstore/src/json/crypto.rs | 0 .../accounts}/ethstore/src/json/error.rs | 0 .../accounts}/ethstore/src/json/hash.rs | 0 .../accounts}/ethstore/src/json/id.rs | 0 .../accounts}/ethstore/src/json/kdf.rs | 0 .../accounts}/ethstore/src/json/key_file.rs | 0 .../accounts}/ethstore/src/json/mod.rs | 0 .../accounts}/ethstore/src/json/presale.rs | 0 .../accounts}/ethstore/src/json/vault_file.rs | 0 .../ethstore/src/json/vault_key_file.rs | 0 .../accounts}/ethstore/src/json/version.rs | 0 .../accounts}/ethstore/src/lib.rs | 0 .../accounts}/ethstore/src/presale.rs | 0 .../accounts}/ethstore/src/random.rs | 0 .../accounts}/ethstore/src/secret_store.rs | 0 .../accounts}/ethstore/tests/api.rs | 0 .../ethstore/tests/res/ciphertext/30.json | 0 .../ethstore/tests/res/ciphertext/31.json | 0 ...--3f49624084b67849c7b4e805c5988c21a430f9d9 | 0 ...--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf | 0 ...--63121b431a52f8043c16fcf0d1df9cb7b5f66649 | 0 .../accounts}/ethstore/tests/res/pat/p1.json | 0 .../accounts}/ethstore/tests/res/pat/p2.json | 0 .../accounts}/ethstore/tests/util/mod.rs | 0 .../ethstore/tests/util/transient_dir.rs | 0 .../accounts}/src/account_data.rs | 0 {accounts => crates/accounts}/src/error.rs | 0 {accounts => crates/accounts}/src/lib.rs | 0 {accounts => crates/accounts}/src/stores.rs | 0 .../concensus/ethash}/Cargo.toml | 0 .../concensus/ethash}/benches/basic.rs | 0 .../concensus/ethash}/benches/progpow.rs | 0 .../ethash}/res/progpow_testvectors.json | 0 .../concensus/ethash}/src/cache.rs | 0 .../concensus/ethash}/src/compute.rs | 0 .../concensus/ethash}/src/keccak.rs | 0 .../concensus/ethash}/src/lib.rs | 0 .../concensus/ethash}/src/progpow.rs | 0 .../concensus/ethash}/src/seed_compute.rs | 0 .../concensus/ethash}/src/shared.rs | 0 {miner => crates/concensus/miner}/Cargo.toml | 12 +- .../concensus/miner}/local-store/Cargo.toml | 6 +- .../concensus/miner}/local-store/src/lib.rs | 0 .../concensus/miner}/price-info/Cargo.toml | 6 +- .../concensus/miner}/price-info/src/lib.rs | 0 .../res/contracts/service_transaction.json | 0 .../concensus/miner}/src/external.rs | 0 .../miner}/src/gas_price_calibrator.rs | 0 .../concensus/miner}/src/gas_pricer.rs | 0 {miner => crates/concensus/miner}/src/lib.rs | 0 .../concensus/miner}/src/local_accounts.rs | 0 .../concensus/miner}/src/pool/client.rs | 0 .../concensus/miner}/src/pool/listener.rs | 0 .../miner}/src/pool/local_transactions.rs | 0 .../concensus/miner}/src/pool/mod.rs | 0 .../concensus/miner}/src/pool/queue.rs | 0 .../concensus/miner}/src/pool/ready.rs | 0 .../concensus/miner}/src/pool/replace.rs | 0 .../miner}/src/pool/res/big_transaction.data | 0 .../concensus/miner}/src/pool/scoring.rs | 0 .../concensus/miner}/src/pool/tests/client.rs | 0 .../concensus/miner}/src/pool/tests/mod.rs | 0 .../concensus/miner}/src/pool/tests/tx.rs | 0 .../concensus/miner}/src/pool/verifier.rs | 0 .../miner}/src/service_transaction_checker.rs | 0 .../concensus/miner}/src/work_notify.rs | 0 .../concensus/miner}/stratum/Cargo.toml | 0 .../concensus/miner}/stratum/src/lib.rs | 0 .../concensus/miner}/stratum/src/traits.rs | 0 .../concensus/miner}/using-queue/Cargo.toml | 0 .../concensus/miner}/using-queue/src/lib.rs | 0 {util => crates/db}/bloom/Cargo.toml | 0 {util => crates/db}/bloom/src/lib.rs | 0 {util => crates/db}/blooms-db/Cargo.toml | 0 .../db}/blooms-db/benches/blooms.rs | 0 {util => crates/db}/blooms-db/src/db.rs | 0 {util => crates/db}/blooms-db/src/file.rs | 0 {util => crates/db}/blooms-db/src/lib.rs | 0 {ethcore => crates/db}/db/Cargo.toml | 2 +- .../db}/db/src/cache_manager.rs | 0 {ethcore => crates/db}/db/src/db.rs | 0 {ethcore => crates/db}/db/src/keys.rs | 0 {ethcore => crates/db}/db/src/lib.rs | 0 {util => crates/db}/journaldb/Cargo.toml | 4 +- .../db}/journaldb/src/archivedb.rs | 0 .../db}/journaldb/src/as_hash_db_impls.rs | 0 .../db}/journaldb/src/earlymergedb.rs | 0 {util => crates/db}/journaldb/src/lib.rs | 0 .../db}/journaldb/src/overlaydb.rs | 0 .../db}/journaldb/src/overlayrecentdb.rs | 0 .../db}/journaldb/src/refcounteddb.rs | 0 {util => crates/db}/journaldb/src/traits.rs | 0 {util => crates/db}/journaldb/src/util.rs | 0 .../db}/memory-db/.cargo_vcs_info.json | 0 {util => crates/db}/memory-db/Cargo.toml | 0 {util => crates/db}/memory-db/Cargo.toml.orig | 0 {util => crates/db}/memory-db/README.md | 0 .../db}/memory-db/benches/bench.rs | 0 {util => crates/db}/memory-db/src/lib.rs | 0 .../db}/migration-rocksdb/Cargo.toml | 2 +- .../db}/migration-rocksdb/src/lib.rs | 0 .../db}/migration-rocksdb/tests/tests.rs | 0 .../db}/patricia-trie-ethereum/Cargo.toml | 2 +- .../db}/patricia-trie-ethereum/src/lib.rs | 0 .../src/rlp_node_codec.rs | 0 {ethcore => crates/ethcore}/Cargo.toml | 40 +- .../ethcore}/benches/builtin.rs | 0 .../ethcore}/blockchain/Cargo.toml | 4 +- .../ethcore}/blockchain/src/best_block.rs | 0 .../ethcore}/blockchain/src/block_info.rs | 0 .../ethcore}/blockchain/src/blockchain.rs | 0 .../ethcore}/blockchain/src/cache.rs | 0 .../ethcore}/blockchain/src/config.rs | 0 .../ethcore}/blockchain/src/generator.rs | 0 .../ethcore}/blockchain/src/import_route.rs | 0 .../ethcore}/blockchain/src/lib.rs | 0 .../ethcore}/blockchain/src/update.rs | 0 .../ethcore/res/chainspec}/callisto.json | 0 .../ethcore/res/chainspec}/ellaism.json | 0 .../ethcore/res/chainspec}/ewc.json | 0 .../ethcore/res/chainspec}/foundation.json | 0 .../ethcore/res/chainspec}/goerli.json | 0 .../ethcore/res/chainspec}/instant_seal.json | 0 .../ethcore/res/chainspec}/kovan.json | 0 .../ethcore/res/chainspec}/mix.json | 0 .../ethcore/res/chainspec}/morden.json | 0 .../ethcore/res/chainspec}/musicoin.json | 0 .../ethcore/res/chainspec}/poacore.json | 0 .../ethcore/res/chainspec}/poasokol.json | 0 .../ethcore/res/chainspec}/rinkeby.json | 0 .../ethcore/res/chainspec}/ropsten.json | 0 .../res/chainspec/test}/authority_round.json | 0 ...authority_round_block_reward_contract.json | 0 .../test}/authority_round_empty_steps.json | 0 .../res/chainspec/test}/basic_authority.json | 0 .../res/chainspec/test}/berlin_test.json | 0 .../chainspec/test}/builtin_multi_bench.json | 0 .../test}/builtin_one_activation_bench.json | 0 .../res/chainspec/test}/byzantium_test.json | 0 ...yzantium_to_constantinoplefixat5_test.json | 0 .../chainspec/test}/constantinople_test.json | 0 .../res/chainspec/test}/constructor.json | 0 .../test}/contract_ver_2_genesis.json | 0 .../test}/deprecated_contract_genesis.json | 0 .../res/chainspec/test}/eip150_test.json | 0 .../res/chainspec/test}/eip161_test.json | 0 .../res/chainspec/test}/eip210_test.json | 0 .../chainspec/test}/frontier_like_test.json | 0 .../res/chainspec/test}/frontier_test.json | 0 .../res/chainspec/test}/homestead_test.json | 0 .../res/chainspec/test}/istanbul_test.json | 0 .../res/chainspec/test}/kovan_wasm_test.json | 0 .../res/chainspec/test}/mcip3_test.json | 0 .../ethcore/res/chainspec/test}/null.json | 0 .../res/chainspec/test}/null_morden.json | 0 .../test}/null_morden_with_finality.json | 0 .../test}/null_morden_with_reward.json | 0 .../test}/spec_backward_compability.json | 0 .../res/chainspec/test}/st_peters_test.json | 0 .../res/chainspec/test}/transition_test.json | 0 .../chainspec/test}/validator_contract.json | 0 .../res/chainspec/test}/validator_multi.json | 0 .../test}/validator_safe_contract.json | 0 .../res/chainspec/test}/yolo3_test.json | 0 .../ethcore/res/chainspec}/volta.json | 0 .../ethcore/res/chainspec}/xdai.json | 0 .../ethcore}/res/contracts/block_reward.json | 0 .../ethcore}/res/contracts/registrar.json | 2 +- .../res/contracts/test_validator_set.json | 0 .../ethcore}/res/contracts/tx_acl.json | 0 .../res/contracts/tx_acl_deprecated.json | 0 .../res/contracts/validator_report.json | 0 .../ethcore}/res/contracts/validator_set.json | 0 .../tests => crates/ethcore/res/json_tests | 0 crates/ethcore/res/json_tests.json | 60 + .../ethcore}/service/Cargo.toml | 4 +- .../ethcore}/service/src/error.rs | 0 .../ethcore}/service/src/lib.rs | 0 .../ethcore}/service/src/service.rs | 0 .../ethcore}/service/src/stop_guard.rs | 0 {ethcore => crates/ethcore}/src/account_db.rs | 0 {ethcore => crates/ethcore}/src/block.rs | 0 .../ethcore}/src/client/ancient_import.rs | 0 .../ethcore}/src/client/bad_blocks.rs | 0 .../ethcore}/src/client/chain_notify.rs | 0 .../ethcore}/src/client/client.rs | 0 .../ethcore}/src/client/config.rs | 0 .../ethcore}/src/client/evm_test_client.rs | 0 .../ethcore}/src/client/io_message.rs | 0 {ethcore => crates/ethcore}/src/client/mod.rs | 0 .../ethcore}/src/client/test_client.rs | 0 .../ethcore}/src/client/trace.rs | 0 .../ethcore}/src/client/traits.rs | 0 .../src/engines/authority_round/finality.rs | 0 .../src/engines/authority_round/mod.rs | 0 .../ethcore}/src/engines/basic_authority.rs | 2 +- .../ethcore}/src/engines/block_reward.rs | 0 .../src/engines/clique/block_state.rs | 0 .../ethcore}/src/engines/clique/mod.rs | 0 .../ethcore}/src/engines/clique/params.rs | 0 .../ethcore}/src/engines/clique/tests.rs | 0 .../ethcore}/src/engines/clique/util.rs | 0 .../ethcore}/src/engines/instant_seal.rs | 0 .../ethcore}/src/engines/mod.rs | 0 .../ethcore}/src/engines/null_engine.rs | 0 .../ethcore}/src/engines/signer.rs | 0 .../src/engines/validator_set/contract.rs | 0 .../ethcore}/src/engines/validator_set/mod.rs | 0 .../src/engines/validator_set/multi.rs | 0 .../engines/validator_set/safe_contract.rs | 0 .../src/engines/validator_set/simple_list.rs | 0 .../src/engines/validator_set/test.rs | 0 {ethcore => crates/ethcore}/src/error.rs | 0 .../ethcore}/src/ethereum/denominations.rs | 0 .../ethcore}/src/ethereum/ethash.rs | 0 .../ethcore}/src/ethereum/mod.rs | 78 +- {ethcore => crates/ethcore}/src/executed.rs | 0 {ethcore => crates/ethcore}/src/executive.rs | 0 .../ethcore}/src/externalities.rs | 0 {ethcore => crates/ethcore}/src/factory.rs | 0 .../ethcore}/src/json_tests/chain.rs | 0 .../ethcore}/src/json_tests/difficulty.rs | 0 .../ethcore}/src/json_tests/executive.rs | 0 .../ethcore}/src/json_tests/mod.rs | 0 .../ethcore}/src/json_tests/runner.rs | 2 +- .../ethcore}/src/json_tests/skip.rs | 0 .../ethcore}/src/json_tests/state.rs | 0 .../ethcore}/src/json_tests/test_common.rs | 0 .../ethcore}/src/json_tests/transaction.rs | 0 .../ethcore}/src/json_tests/trie.rs | 0 {ethcore => crates/ethcore}/src/lib.rs | 0 .../ethcore}/src/machine/impls.rs | 0 .../ethcore}/src/machine/mod.rs | 0 .../ethcore}/src/machine/traits.rs | 0 .../ethcore}/src/miner/miner.rs | 0 {ethcore => crates/ethcore}/src/miner/mod.rs | 0 .../ethcore}/src/miner/pool_client.rs | 0 .../ethcore}/src/miner/stratum.rs | 0 .../ethcore}/src/pod_account.rs | 0 {ethcore => crates/ethcore}/src/pod_state.rs | 0 .../ethcore}/src/snapshot/account.rs | 0 .../ethcore}/src/snapshot/block.rs | 0 .../src/snapshot/consensus/authority.rs | 0 .../ethcore}/src/snapshot/consensus/mod.rs | 0 .../ethcore}/src/snapshot/consensus/work.rs | 0 .../ethcore}/src/snapshot/error.rs | 0 .../ethcore}/src/snapshot/io.rs | 0 .../ethcore}/src/snapshot/mod.rs | 0 .../ethcore}/src/snapshot/service.rs | 0 .../ethcore}/src/snapshot/tests/helpers.rs | 0 .../ethcore}/src/snapshot/tests/mod.rs | 0 .../src/snapshot/tests/proof_of_authority.rs | 0 .../src/snapshot/tests/proof_of_work.rs | 0 .../ethcore}/src/snapshot/tests/service.rs | 0 .../ethcore}/src/snapshot/tests/state.rs | 0 .../tests/test_validator_contract.json | 0 .../ethcore}/src/snapshot/traits.rs | 0 .../ethcore}/src/snapshot/watcher.rs | 0 .../ethcore}/src/spec/genesis.rs | 0 {ethcore => crates/ethcore}/src/spec/mod.rs | 0 {ethcore => crates/ethcore}/src/spec/seal.rs | 0 {ethcore => crates/ethcore}/src/spec/spec.rs | 28 +- .../ethcore}/src/state/account.rs | 0 .../ethcore}/src/state/backend.rs | 0 {ethcore => crates/ethcore}/src/state/mod.rs | 0 .../ethcore}/src/state/substate.rs | 0 {ethcore => crates/ethcore}/src/state_db.rs | 0 .../ethcore}/src/test_helpers.rs | 0 .../ethcore}/src/tests/blockchain.rs | 0 .../ethcore}/src/tests/client.rs | 0 {ethcore => crates/ethcore}/src/tests/evm.rs | 0 {ethcore => crates/ethcore}/src/tests/mod.rs | 0 .../ethcore}/src/tests/trace.rs | 0 .../ethcore}/src/trace/config.rs | 0 {ethcore => crates/ethcore}/src/trace/db.rs | 0 .../ethcore}/src/trace/executive_tracer.rs | 0 .../ethcore}/src/trace/import.rs | 0 {ethcore => crates/ethcore}/src/trace/mod.rs | 0 .../ethcore}/src/trace/noop_tracer.rs | 0 .../ethcore}/src/trace/types/error.rs | 0 .../ethcore}/src/trace/types/filter.rs | 0 .../ethcore}/src/trace/types/flat.rs | 0 .../ethcore}/src/trace/types/localized.rs | 0 .../ethcore}/src/trace/types/mod.rs | 0 .../ethcore}/src/trace/types/trace.rs | 0 .../ethcore}/src/transaction_ext.rs | 0 {ethcore => crates/ethcore}/src/tx_filter.rs | 4 +- .../src/verification/canon_verifier.rs | 0 .../ethcore}/src/verification/mod.rs | 0 .../src/verification/noop_verifier.rs | 0 .../ethcore}/src/verification/queue/kind.rs | 0 .../ethcore}/src/verification/queue/mod.rs | 0 .../ethcore}/src/verification/verification.rs | 0 .../ethcore}/src/verification/verifier.rs | 0 {ethcore => crates/ethcore}/sync/Cargo.toml | 8 +- {ethcore => crates/ethcore}/sync/src/api.rs | 0 .../ethcore}/sync/src/block_sync.rs | 0 .../ethcore}/sync/src/blocks.rs | 0 .../ethcore}/sync/src/chain/fork_filter.rs | 0 .../ethcore}/sync/src/chain/handler.rs | 0 .../ethcore}/sync/src/chain/mod.rs | 0 .../ethcore}/sync/src/chain/propagator.rs | 0 .../ethcore}/sync/src/chain/requester.rs | 0 .../ethcore}/sync/src/chain/supplier.rs | 0 .../ethcore}/sync/src/chain/sync_packet.rs | 0 {ethcore => crates/ethcore}/sync/src/lib.rs | 0 .../ethcore}/sync/src/res/private_spec.json | 0 .../ethcore}/sync/src/snapshot.rs | 0 .../ethcore}/sync/src/sync_io.rs | 0 .../ethcore}/sync/src/tests/chain.rs | 0 .../ethcore}/sync/src/tests/consensus.rs | 0 .../ethcore}/sync/src/tests/helpers.rs | 0 .../ethcore}/sync/src/tests/mod.rs | 0 .../ethcore}/sync/src/tests/rpc.rs | 0 .../ethcore}/sync/src/tests/snapshot.rs | 0 .../ethcore}/sync/src/transactions_stats.rs | 0 {ethcore => crates/ethcore}/types/Cargo.toml | 2 +- .../ethcore}/types/src/account_diff.rs | 0 .../ethcore}/types/src/ancestry_action.rs | 0 .../ethcore}/types/src/basic_account.rs | 0 .../ethcore}/types/src/block.rs | 0 .../ethcore}/types/src/block_status.rs | 0 .../ethcore}/types/src/blockchain_info.rs | 0 .../ethcore}/types/src/call_analytics.rs | 0 .../ethcore}/types/src/creation_status.rs | 0 .../ethcore}/types/src/data_format.rs | 0 .../ethcore}/types/src/encoded.rs | 0 .../ethcore}/types/src/engines/epoch.rs | 0 .../ethcore}/types/src/engines/mod.rs | 0 .../ethcore}/types/src/filter.rs | 0 .../ethcore}/types/src/header.rs | 0 {ethcore => crates/ethcore}/types/src/ids.rs | 0 {ethcore => crates/ethcore}/types/src/lib.rs | 0 .../ethcore}/types/src/log_entry.rs | 0 .../ethcore}/types/src/pruning_info.rs | 0 .../ethcore}/types/src/receipt.rs | 0 .../ethcore}/types/src/restoration_status.rs | 0 .../ethcore}/types/src/security_level.rs | 0 .../ethcore}/types/src/snapshot_manifest.rs | 0 .../ethcore}/types/src/state_diff.rs | 0 .../ethcore}/types/src/trace_filter.rs | 0 .../ethcore}/types/src/transaction/error.rs | 0 .../ethcore}/types/src/transaction/mod.rs | 0 .../types/src/transaction/transaction.rs | 0 .../types/src/transaction/transaction_id.rs | 0 .../ethcore}/types/src/tree_route.rs | 0 .../types/src/verification_queue_info.rs | 0 .../ethcore}/types/src/views/block.rs | 0 .../ethcore}/types/src/views/body.rs | 0 .../ethcore}/types/src/views/header.rs | 0 .../ethcore}/types/src/views/mod.rs | 0 .../ethcore}/types/src/views/transaction.rs | 0 .../ethcore}/types/src/views/view_rlp.rs | 0 {json => crates/ethjson}/Cargo.toml | 0 .../ethjson}/src/blockchain/account.rs | 0 .../ethjson}/src/blockchain/block.rs | 0 .../ethjson}/src/blockchain/blockchain.rs | 0 .../ethjson}/src/blockchain/header.rs | 0 .../ethjson}/src/blockchain/mod.rs | 0 .../ethjson}/src/blockchain/state.rs | 1 - .../ethjson}/src/blockchain/test.rs | 0 .../ethjson}/src/blockchain/transaction.rs | 0 {json => crates/ethjson}/src/bytes.rs | 0 {json => crates/ethjson}/src/hash.rs | 0 {json => crates/ethjson}/src/lib.rs | 0 {json => crates/ethjson}/src/maybe.rs | 0 {json => crates/ethjson}/src/spec/account.rs | 0 .../ethjson}/src/spec/authority_round.rs | 0 .../ethjson}/src/spec/basic_authority.rs | 0 {json => crates/ethjson}/src/spec/builtin.rs | 0 {json => crates/ethjson}/src/spec/clique.rs | 0 {json => crates/ethjson}/src/spec/engine.rs | 0 {json => crates/ethjson}/src/spec/ethash.rs | 0 {json => crates/ethjson}/src/spec/genesis.rs | 0 .../ethjson}/src/spec/instant_seal.rs | 0 {json => crates/ethjson}/src/spec/mod.rs | 0 .../ethjson}/src/spec/null_engine.rs | 0 {json => crates/ethjson}/src/spec/params.rs | 0 {json => crates/ethjson}/src/spec/seal.rs | 0 {json => crates/ethjson}/src/spec/spec.rs | 0 {json => crates/ethjson}/src/spec/state.rs | 0 .../ethjson}/src/spec/validator_set.rs | 0 {json => crates/ethjson}/src/state/log.rs | 0 {json => crates/ethjson}/src/state/mod.rs | 0 {json => crates/ethjson}/src/state/state.rs | 0 {json => crates/ethjson}/src/state/test.rs | 0 .../ethjson}/src/state/transaction.rs | 0 {json => crates/ethjson}/src/test/mod.rs | 0 .../ethjson}/src/transaction/mod.rs | 0 .../ethjson}/src/transaction/test.rs | 0 .../ethjson}/src/transaction/transaction.rs | 0 .../ethjson}/src/transaction/txtest.rs | 0 {json => crates/ethjson}/src/trie/input.rs | 0 {json => crates/ethjson}/src/trie/mod.rs | 0 {json => crates/ethjson}/src/trie/test.rs | 0 {json => crates/ethjson}/src/trie/trie.rs | 0 {json => crates/ethjson}/src/uint.rs | 0 {json => crates/ethjson}/src/vm/call.rs | 0 {json => crates/ethjson}/src/vm/env.rs | 0 {json => crates/ethjson}/src/vm/mod.rs | 0 {json => crates/ethjson}/src/vm/test.rs | 0 .../ethjson}/src/vm/transaction.rs | 0 {json => crates/ethjson}/src/vm/vm.rs | 0 {util => crates/net}/fake-fetch/Cargo.toml | 0 {util => crates/net}/fake-fetch/src/lib.rs | 0 {util => crates/net}/fetch/Cargo.toml | 0 {util => crates/net}/fetch/src/client.rs | 0 {util => crates/net}/fetch/src/lib.rs | 0 .../net}/network-devp2p/Cargo.toml | 4 +- .../net}/network-devp2p/src/connection.rs | 0 .../net}/network-devp2p/src/discovery.rs | 0 .../net}/network-devp2p/src/handshake.rs | 0 .../net}/network-devp2p/src/host.rs | 0 .../net}/network-devp2p/src/ip_utils.rs | 0 .../net}/network-devp2p/src/lib.rs | 0 .../net}/network-devp2p/src/node_table.rs | 0 .../net}/network-devp2p/src/service.rs | 0 .../net}/network-devp2p/src/session.rs | 0 .../net}/network-devp2p/tests/tests.rs | 0 {util => crates/net}/network/Cargo.toml | 4 +- .../net}/network/src/client_version.rs | 0 .../net}/network/src/connection_filter.rs | 0 {util => crates/net}/network/src/error.rs | 0 {util => crates/net}/network/src/lib.rs | 0 .../net}/node-filter/Cargo.toml | 12 +- .../net}/node-filter/res/node_filter.json | 0 .../net}/node-filter/res/peer_set.json | 0 .../net}/node-filter/src/lib.rs | 0 {rpc => crates/rpc}/Cargo.toml | 22 +- {rpc => crates/rpc}/src/authcodes.rs | 0 {rpc => crates/rpc}/src/http_common.rs | 0 {rpc => crates/rpc}/src/lib.rs | 0 {rpc => crates/rpc}/src/tests/helpers.rs | 0 {rpc => crates/rpc}/src/tests/http_client.rs | 0 {rpc => crates/rpc}/src/tests/mod.rs | 0 {rpc => crates/rpc}/src/tests/rpc.rs | 0 {rpc => crates/rpc}/src/tests/ws.rs | 0 {rpc => crates/rpc}/src/v1/extractors.rs | 0 .../rpc}/src/v1/helpers/block_import.rs | 0 .../rpc}/src/v1/helpers/deprecated.rs | 0 .../rpc}/src/v1/helpers/dispatch/full.rs | 0 .../rpc}/src/v1/helpers/dispatch/mod.rs | 0 .../v1/helpers/dispatch/prospective_signer.rs | 0 .../rpc}/src/v1/helpers/dispatch/signing.rs | 0 {rpc => crates/rpc}/src/v1/helpers/eip191.rs | 0 .../rpc}/src/v1/helpers/engine_signer.rs | 0 {rpc => crates/rpc}/src/v1/helpers/errors.rs | 0 .../src/v1/helpers/external_signer/mod.rs | 0 .../src/v1/helpers/external_signer/oneshot.rs | 0 .../helpers/external_signer/signing_queue.rs | 0 .../rpc}/src/v1/helpers/fake_sign.rs | 0 {rpc => crates/rpc}/src/v1/helpers/mod.rs | 0 .../rpc}/src/v1/helpers/network_settings.rs | 0 {rpc => crates/rpc}/src/v1/helpers/nonce.rs | 0 .../rpc}/src/v1/helpers/poll_filter.rs | 0 .../rpc}/src/v1/helpers/poll_manager.rs | 0 .../rpc}/src/v1/helpers/requests.rs | 0 .../rpc}/src/v1/helpers/secretstore.rs | 0 .../rpc}/src/v1/helpers/signature.rs | 0 .../rpc}/src/v1/helpers/subscribers.rs | 0 .../src/v1/helpers/subscription_manager.rs | 0 {rpc => crates/rpc}/src/v1/helpers/work.rs | 0 {rpc => crates/rpc}/src/v1/impls/debug.rs | 0 {rpc => crates/rpc}/src/v1/impls/eth.rs | 0 .../rpc}/src/v1/impls/eth_filter.rs | 0 .../rpc}/src/v1/impls/eth_pubsub.rs | 0 {rpc => crates/rpc}/src/v1/impls/mod.rs | 0 {rpc => crates/rpc}/src/v1/impls/net.rs | 0 {rpc => crates/rpc}/src/v1/impls/parity.rs | 0 .../rpc}/src/v1/impls/parity_accounts.rs | 0 .../rpc}/src/v1/impls/parity_set.rs | 0 {rpc => crates/rpc}/src/v1/impls/personal.rs | 0 {rpc => crates/rpc}/src/v1/impls/pubsub.rs | 0 .../rpc}/src/v1/impls/secretstore.rs | 0 {rpc => crates/rpc}/src/v1/impls/signer.rs | 0 {rpc => crates/rpc}/src/v1/impls/signing.rs | 0 .../rpc}/src/v1/impls/signing_unsafe.rs | 0 {rpc => crates/rpc}/src/v1/impls/traces.rs | 0 {rpc => crates/rpc}/src/v1/impls/web3.rs | 0 {rpc => crates/rpc}/src/v1/informant.rs | 0 {rpc => crates/rpc}/src/v1/metadata.rs | 0 {rpc => crates/rpc}/src/v1/mod.rs | 0 {rpc => crates/rpc}/src/v1/tests/eth.rs | 0 .../src/v1/tests/helpers/miner_service.rs | 0 .../rpc}/src/v1/tests/helpers/mod.rs | 0 .../src/v1/tests/helpers/snapshot_service.rs | 0 .../src/v1/tests/helpers/sync_provider.rs | 0 .../rpc}/src/v1/tests/mocked/debug.rs | 0 .../rpc}/src/v1/tests/mocked/eth.rs | 0 .../rpc}/src/v1/tests/mocked/eth_pubsub.rs | 0 .../src/v1/tests/mocked/manage_network.rs | 0 .../rpc}/src/v1/tests/mocked/mod.rs | 0 .../rpc}/src/v1/tests/mocked/net.rs | 0 .../rpc}/src/v1/tests/mocked/parity.rs | 0 .../src/v1/tests/mocked/parity_accounts.rs | 0 .../rpc}/src/v1/tests/mocked/parity_set.rs | 0 .../rpc}/src/v1/tests/mocked/personal.rs | 0 .../rpc}/src/v1/tests/mocked/pubsub.rs | 0 .../rpc}/src/v1/tests/mocked/secretstore.rs | 0 .../rpc}/src/v1/tests/mocked/signer.rs | 0 .../rpc}/src/v1/tests/mocked/signing.rs | 0 .../src/v1/tests/mocked/signing_unsafe.rs | 0 .../rpc}/src/v1/tests/mocked/traces.rs | 0 .../rpc}/src/v1/tests/mocked/web3.rs | 0 {rpc => crates/rpc}/src/v1/tests/mod.rs | 2 +- {rpc => crates/rpc}/src/v1/traits/debug.rs | 0 {rpc => crates/rpc}/src/v1/traits/eth.rs | 0 .../rpc}/src/v1/traits/eth_pubsub.rs | 0 .../rpc}/src/v1/traits/eth_signing.rs | 0 {rpc => crates/rpc}/src/v1/traits/mod.rs | 0 {rpc => crates/rpc}/src/v1/traits/net.rs | 0 {rpc => crates/rpc}/src/v1/traits/parity.rs | 0 .../rpc}/src/v1/traits/parity_accounts.rs | 0 .../rpc}/src/v1/traits/parity_set.rs | 0 .../rpc}/src/v1/traits/parity_signing.rs | 0 {rpc => crates/rpc}/src/v1/traits/personal.rs | 0 {rpc => crates/rpc}/src/v1/traits/pubsub.rs | 0 .../rpc}/src/v1/traits/secretstore.rs | 0 {rpc => crates/rpc}/src/v1/traits/signer.rs | 0 {rpc => crates/rpc}/src/v1/traits/traces.rs | 0 {rpc => crates/rpc}/src/v1/traits/web3.rs | 0 .../rpc}/src/v1/types/account_info.rs | 0 {rpc => crates/rpc}/src/v1/types/block.rs | 0 .../rpc}/src/v1/types/block_number.rs | 0 {rpc => crates/rpc}/src/v1/types/bytes.rs | 0 .../rpc}/src/v1/types/call_request.rs | 0 .../rpc}/src/v1/types/confirmations.rs | 0 .../rpc}/src/v1/types/derivation.rs | 0 {rpc => crates/rpc}/src/v1/types/eip191.rs | 0 {rpc => crates/rpc}/src/v1/types/eth_types.rs | 0 {rpc => crates/rpc}/src/v1/types/filter.rs | 0 {rpc => crates/rpc}/src/v1/types/histogram.rs | 0 {rpc => crates/rpc}/src/v1/types/index.rs | 0 {rpc => crates/rpc}/src/v1/types/log.rs | 0 {rpc => crates/rpc}/src/v1/types/mod.rs | 0 {rpc => crates/rpc}/src/v1/types/node_kind.rs | 0 .../rpc}/src/v1/types/provenance.rs | 0 {rpc => crates/rpc}/src/v1/types/pubsub.rs | 0 {rpc => crates/rpc}/src/v1/types/receipt.rs | 0 .../rpc}/src/v1/types/rpc_settings.rs | 0 .../rpc}/src/v1/types/secretstore.rs | 0 {rpc => crates/rpc}/src/v1/types/sync.rs | 0 {rpc => crates/rpc}/src/v1/types/trace.rs | 0 .../rpc}/src/v1/types/trace_filter.rs | 0 .../rpc}/src/v1/types/transaction.rs | 0 .../src/v1/types/transaction_condition.rs | 0 .../rpc}/src/v1/types/transaction_request.rs | 0 {rpc => crates/rpc}/src/v1/types/work.rs | 0 {util => crates/runtime}/io/Cargo.toml | 0 {util => crates/runtime}/io/src/lib.rs | 0 .../runtime}/io/src/service_mio.rs | 0 .../runtime}/io/src/service_non_mio.rs | 0 {util => crates/runtime}/io/src/worker.rs | 0 {util => crates/runtime}/runtime/Cargo.toml | 0 {util => crates/runtime}/runtime/src/lib.rs | 0 {util => crates/util}/EIP-152/Cargo.toml | 0 {util => crates/util}/EIP-152/src/lib.rs | 0 {util => crates/util}/EIP-712/Cargo.toml | 0 {util => crates/util}/EIP-712/README.md | 0 {util => crates/util}/EIP-712/src/eip712.rs | 0 {util => crates/util}/EIP-712/src/encode.rs | 0 {util => crates/util}/EIP-712/src/error.rs | 0 {util => crates/util}/EIP-712/src/lib.rs | 0 {util => crates/util}/EIP-712/src/parser.rs | 0 .../util/cli-signer}/Cargo.toml | 2 +- .../util/cli-signer}/rpc-client/Cargo.toml | 2 +- .../util/cli-signer}/rpc-client/src/client.rs | 0 .../util/cli-signer}/rpc-client/src/lib.rs | 0 .../rpc-client/src/signer_client.rs | 0 .../util/cli-signer}/src/lib.rs | 0 {util => crates/util}/dir/Cargo.toml | 2 +- {util => crates/util}/dir/src/helpers.rs | 0 {util => crates/util}/dir/src/lib.rs | 0 {util => crates/util}/fastmap/Cargo.toml | 0 {util => crates/util}/fastmap/src/lib.rs | 0 .../util}/keccak-hasher/Cargo.toml | 0 .../util}/keccak-hasher/src/lib.rs | 0 .../util}/len-caching-lock/Cargo.toml | 0 .../util}/len-caching-lock/src/lib.rs | 0 .../util}/len-caching-lock/src/mutex.rs | 0 .../util}/len-caching-lock/src/rwlock.rs | 0 {util => crates/util}/macros/Cargo.toml | 0 {util => crates/util}/macros/src/lib.rs | 0 {util => crates/util}/memory-cache/Cargo.toml | 0 {util => crates/util}/memory-cache/src/lib.rs | 0 {util => crates/util}/memzero/Cargo.toml | 0 {util => crates/util}/memzero/src/lib.rs | 0 {util => crates/util}/panic-hook/Cargo.toml | 0 {util => crates/util}/panic-hook/src/lib.rs | 0 {util => crates/util}/rlp-compress/Cargo.toml | 0 .../util}/rlp-compress/src/common.rs | 0 {util => crates/util}/rlp-compress/src/lib.rs | 0 .../util}/rlp-compress/tests/compress.rs | 0 {util => crates/util}/rlp-derive/Cargo.toml | 0 {util => crates/util}/rlp-derive/src/de.rs | 0 {util => crates/util}/rlp-derive/src/en.rs | 0 {util => crates/util}/rlp-derive/src/lib.rs | 0 {util => crates/util}/rlp-derive/tests/rlp.rs | 0 {util => crates/util}/stats/Cargo.toml | 0 {util => crates/util}/stats/src/lib.rs | 0 {util => crates/util}/time-utils/Cargo.toml | 0 {util => crates/util}/time-utils/src/lib.rs | 0 .../util}/triehash-ethereum/Cargo.toml | 0 .../util}/triehash-ethereum/src/lib.rs | 0 {util => crates/util}/unexpected/Cargo.toml | 0 {util => crates/util}/unexpected/src/lib.rs | 0 {util => crates/util}/version/Cargo.toml | 0 {util => crates/util}/version/build.rs | 0 {util => crates/util}/version/src/lib.rs | 0 {ethcore => crates/vm}/builtin/Cargo.toml | 2 +- {ethcore => crates/vm}/builtin/src/lib.rs | 0 .../vm}/call-contract/Cargo.toml | 2 +- .../vm}/call-contract/src/call_contract.rs | 0 .../vm}/call-contract/src/lib.rs | 0 {ethcore => crates/vm}/evm/Cargo.toml | 0 {ethcore => crates/vm}/evm/benches/basic.rs | 0 {ethcore => crates/vm}/evm/src/evm.rs | 0 {ethcore => crates/vm}/evm/src/factory.rs | 0 .../vm}/evm/src/instructions.rs | 0 .../vm}/evm/src/interpreter/gasometer.rs | 0 .../vm}/evm/src/interpreter/informant.rs | 0 .../vm}/evm/src/interpreter/memory.rs | 0 .../vm}/evm/src/interpreter/mod.rs | 0 .../vm}/evm/src/interpreter/shared_cache.rs | 0 .../vm}/evm/src/interpreter/stack.rs | 0 {ethcore => crates/vm}/evm/src/lib.rs | 0 {ethcore => crates/vm}/evm/src/tests.rs | 0 {ethcore => crates/vm}/evm/src/vmtype.rs | 0 {ethcore => crates/vm}/vm/Cargo.toml | 4 +- {ethcore => crates/vm}/vm/src/access_list.rs | 0 .../vm}/vm/src/action_params.rs | 0 {ethcore => crates/vm}/vm/src/call_type.rs | 0 {ethcore => crates/vm}/vm/src/env_info.rs | 0 {ethcore => crates/vm}/vm/src/error.rs | 0 {ethcore => crates/vm}/vm/src/ext.rs | 0 {ethcore => crates/vm}/vm/src/lib.rs | 0 {ethcore => crates/vm}/vm/src/return_data.rs | 0 {ethcore => crates/vm}/vm/src/schedule.rs | 0 {ethcore => crates/vm}/vm/src/tests.rs | 0 {ethcore => crates/vm}/wasm/Cargo.toml | 0 {ethcore => crates/vm}/wasm/src/env.rs | 0 {ethcore => crates/vm}/wasm/src/lib.rs | 3 - .../vm}/wasm/src/panic_payload.rs | 0 {ethcore => crates/vm}/wasm/src/parser.rs | 0 {ethcore => crates/vm}/wasm/src/runtime.rs | 0 ethcore/res/ethereum/runner/full.json | 60 - .../res/ethereum/tests-issues/currents.json | 4 - ethcore/res/wasm-tests | 1 - ethcore/wasm/run/Cargo.toml | 20 - ethcore/wasm/run/res/sample-fixture.json | 45 - ethcore/wasm/run/res/sample1.wasm | Bin 125 -> 0 bytes ethcore/wasm/run/res/sample2.wasm | Bin 15410 -> 0 bytes ethcore/wasm/run/res/sample3.wasm | Bin 9736 -> 0 bytes ethcore/wasm/run/src/fixture.rs | 88 - ethcore/wasm/run/src/main.rs | 71 - ethcore/wasm/run/src/runner.rs | 345 --- ethcore/wasm/src/tests.rs | 1181 --------- scripts/add_license.sh | 20 - scripts/doc.sh | 5 - scripts/evm_jsontests_bench.sh | 24 +- scripts/hook.sh | 12 - scripts/remove_duplicate_empty_lines.sh | 6 - secret-store/Cargo.toml | 49 - secret-store/res/acl_storage.json | 3 - secret-store/res/key_server_set.json | 24 - secret-store/res/service.json | 33 - secret-store/src/acl_storage.rs | 172 -- secret-store/src/helpers.rs | 30 - secret-store/src/key_server.rs | 809 ------ .../key_version_negotiation_session.rs | 1258 ---------- .../key_server_cluster/admin_sessions/mod.rs | 55 - .../servers_set_change_session.rs | 1832 -------------- .../admin_sessions/sessions_queue.rs | 58 - .../admin_sessions/share_add_session.rs | 1463 ----------- .../admin_sessions/share_change_session.rs | 431 ---- .../client_sessions/decryption_session.rs | 2211 ----------------- .../client_sessions/encryption_session.rs | 415 ---- .../client_sessions/generation_session.rs | 1571 ------------ .../key_server_cluster/client_sessions/mod.rs | 21 - .../client_sessions/signing_session_ecdsa.rs | 1626 ------------ .../signing_session_schnorr.rs | 1478 ----------- .../src/key_server_cluster/cluster.rs | 1805 -------------- .../key_server_cluster/cluster_connections.rs | 180 -- .../cluster_connections_net.rs | 588 ----- .../cluster_message_processor.rs | 429 ---- .../key_server_cluster/cluster_sessions.rs | 937 ------- .../cluster_sessions_creator.rs | 678 ----- .../key_server_cluster/connection_trigger.rs | 519 ---- .../connection_trigger_with_migration.rs | 1123 --------- .../src/key_server_cluster/io/deadline.rs | 91 - .../src/key_server_cluster/io/handshake.rs | 533 ---- .../src/key_server_cluster/io/message.rs | 673 ----- secret-store/src/key_server_cluster/io/mod.rs | 38 - .../src/key_server_cluster/io/read_header.rs | 55 - .../src/key_server_cluster/io/read_message.rs | 98 - .../src/key_server_cluster/io/read_payload.rs | 77 - .../io/shared_tcp_stream.rs | 71 - .../key_server_cluster/io/write_message.rs | 85 - .../jobs/consensus_session.rs | 1395 ----------- .../key_server_cluster/jobs/decryption_job.rs | 265 -- .../src/key_server_cluster/jobs/dummy_job.rs | 68 - .../key_server_cluster/jobs/job_session.rs | 1043 -------- .../key_server_cluster/jobs/key_access_job.rs | 130 - .../src/key_server_cluster/jobs/mod.rs | 25 - .../jobs/servers_set_change_access_job.rs | 206 -- .../jobs/signing_job_ecdsa.rs | 193 -- .../jobs/signing_job_schnorr.rs | 194 -- .../jobs/unknown_sessions_job.rs | 108 - secret-store/src/key_server_cluster/math.rs | 1574 ------------ .../src/key_server_cluster/message.rs | 1675 ------------- secret-store/src/key_server_cluster/mod.rs | 92 - .../net/accept_connection.rs | 69 - .../src/key_server_cluster/net/connect.rs | 95 - .../src/key_server_cluster/net/connection.rs | 31 - .../src/key_server_cluster/net/mod.rs | 25 - secret-store/src/key_server_set.rs | 982 -------- secret-store/src/key_storage.rs | 738 ------ secret-store/src/lib.rs | 250 -- secret-store/src/listener/http_listener.rs | 634 ----- secret-store/src/listener/mod.rs | 172 -- secret-store/src/listener/service_contract.rs | 1365 ---------- .../listener/service_contract_aggregate.rs | 161 -- .../src/listener/service_contract_listener.rs | 1971 --------------- secret-store/src/listener/tasks_queue.rs | 82 - secret-store/src/node_key_pair.rs | 124 - secret-store/src/serialization.rs | 195 -- secret-store/src/traits.rs | 150 -- secret-store/src/trusted_client.rs | 126 - secret-store/src/types/all.rs | 156 -- secret-store/src/types/error.rs | 213 -- secret-store/src/types/mod.rs | 22 - .../key_server_cluster/cluster_connections.rs | 176 -- .../cluster_connections_net.rs | 539 ---- .../cluster_message_processor.rs | 357 --- util/registrar/Cargo.toml | 13 - util/registrar/res/registrar.json | 21 - util/registrar/src/lib.rs | 27 - util/registrar/src/registrar.rs | 78 - 846 files changed, 255 insertions(+), 39400 deletions(-) rename {chainspec => bin/chainspec}/Cargo.toml (80%) rename {chainspec => bin/chainspec}/src/main.rs (100%) rename {accounts/ethkey/cli => bin/ethkey}/Cargo.toml (76%) rename {accounts/ethkey/cli => bin/ethkey}/src/main.rs (100%) rename {accounts/ethstore/cli => bin/ethstore}/Cargo.toml (72%) rename {accounts/ethstore/cli => bin/ethstore}/src/crack.rs (100%) rename {accounts/ethstore/cli => bin/ethstore}/src/main.rs (100%) rename {evmbin => bin/evmbin}/Cargo.toml (57%) rename {evmbin => bin/evmbin}/README.md (100%) rename {evmbin => bin/evmbin}/benches/mod.rs (100%) rename {evmbin => bin/evmbin}/res/testchain.json (100%) rename {evmbin => bin/evmbin}/src/display/config.rs (100%) rename {evmbin => bin/evmbin}/src/display/json.rs (100%) rename {evmbin => bin/evmbin}/src/display/mod.rs (100%) rename {evmbin => bin/evmbin}/src/display/simple.rs (100%) rename {evmbin => bin/evmbin}/src/display/std_json.rs (100%) rename {evmbin => bin/evmbin}/src/info.rs (100%) rename {evmbin => bin/evmbin}/src/main.rs (100%) rename {parity => bin/oe}/account.rs (100%) rename {parity => bin/oe}/account_utils.rs (100%) rename {parity => bin/oe}/blockchain.rs (100%) rename {parity => bin/oe}/cache.rs (100%) rename {parity => bin/oe}/cli/mod.rs (100%) rename {parity => bin/oe}/cli/presets/config.dev-insecure.toml (100%) rename {parity => bin/oe}/cli/presets/config.dev.toml (100%) rename {parity => bin/oe}/cli/presets/config.insecure.toml (100%) rename {parity => bin/oe}/cli/presets/config.mining.toml (100%) rename {parity => bin/oe}/cli/presets/config.non-standard-ports.toml (100%) rename {parity => bin/oe}/cli/presets/mod.rs (100%) rename {parity => bin/oe}/cli/tests/config.full.toml (100%) rename {parity => bin/oe}/cli/tests/config.invalid1.toml (100%) rename {parity => bin/oe}/cli/tests/config.invalid2.toml (100%) rename {parity => bin/oe}/cli/tests/config.invalid3.toml (100%) rename {parity => bin/oe}/cli/tests/config.invalid4.toml (100%) rename {parity => bin/oe}/cli/tests/config.toml (100%) rename {parity => bin/oe}/cli/usage.rs (100%) rename {parity => bin/oe}/cli/usage_header.txt (100%) rename {parity => bin/oe}/cli/version.txt (100%) rename {parity => bin/oe}/configuration.rs (100%) rename {parity => bin/oe}/db/mod.rs (100%) rename {parity => bin/oe}/db/rocksdb/blooms.rs (100%) rename {parity => bin/oe}/db/rocksdb/helpers.rs (100%) rename {parity => bin/oe}/db/rocksdb/migration.rs (100%) rename {parity => bin/oe}/db/rocksdb/mod.rs (100%) rename {parity => bin/oe}/helpers.rs (100%) rename {parity => bin/oe}/informant.rs (100%) rename {parity => bin/oe}/lib.rs (99%) rename {parity => bin/oe}/logger/Cargo.toml (100%) rename {parity => bin/oe}/logger/src/lib.rs (100%) rename {parity => bin/oe}/logger/src/rotating.rs (100%) rename {parity => bin/oe}/main.rs (100%) rename {parity => bin/oe}/metrics.rs (100%) rename {parity => bin/oe}/modules.rs (100%) rename {parity => bin/oe}/params.rs (100%) rename {parity => bin/oe}/presale.rs (100%) rename {parity => bin/oe}/rpc.rs (100%) rename {parity => bin/oe}/rpc_apis.rs (100%) rename {parity => bin/oe}/run.rs (100%) rename {parity => bin/oe}/secretstore.rs (100%) rename {parity => bin/oe}/signer.rs (100%) rename {parity => bin/oe}/snapshot.rs (100%) rename {parity => bin/oe}/stratum.rs (100%) rename {parity => bin/oe}/upgrade.rs (100%) rename {parity => bin/oe}/user_defaults.rs (100%) rename {accounts => crates/accounts}/Cargo.toml (100%) rename {accounts => crates/accounts}/ethkey/.gitignore (100%) rename {accounts => crates/accounts}/ethkey/.travis.yml (100%) rename {accounts => crates/accounts}/ethkey/Cargo.toml (90%) rename {accounts => crates/accounts}/ethkey/README.md (100%) rename {accounts => crates/accounts}/ethkey/src/brain.rs (100%) rename {accounts => crates/accounts}/ethkey/src/brain_prefix.rs (100%) rename {accounts => crates/accounts}/ethkey/src/brain_recover.rs (100%) rename {accounts => crates/accounts}/ethkey/src/crypto.rs (100%) rename {accounts => crates/accounts}/ethkey/src/error.rs (100%) rename {accounts => crates/accounts}/ethkey/src/extended.rs (100%) rename {accounts => crates/accounts}/ethkey/src/keccak.rs (100%) rename {accounts => crates/accounts}/ethkey/src/keypair.rs (100%) rename {accounts => crates/accounts}/ethkey/src/lib.rs (100%) rename {accounts => crates/accounts}/ethkey/src/math.rs (100%) rename {accounts => crates/accounts}/ethkey/src/password.rs (100%) rename {accounts => crates/accounts}/ethkey/src/prefix.rs (100%) rename {accounts => crates/accounts}/ethkey/src/random.rs (100%) rename {accounts => crates/accounts}/ethkey/src/secret.rs (100%) rename {accounts => crates/accounts}/ethkey/src/signature.rs (100%) rename {accounts => crates/accounts}/ethstore/.editorconfig (100%) rename {accounts => crates/accounts}/ethstore/.gitignore (100%) rename {accounts => crates/accounts}/ethstore/.travis.yml (100%) rename {accounts => crates/accounts}/ethstore/Cargo.toml (100%) rename {accounts => crates/accounts}/ethstore/README.md (100%) rename {accounts => crates/accounts}/ethstore/src/account/cipher.rs (100%) rename {accounts => crates/accounts}/ethstore/src/account/crypto.rs (100%) rename {accounts => crates/accounts}/ethstore/src/account/kdf.rs (100%) rename {accounts => crates/accounts}/ethstore/src/account/mod.rs (100%) rename {accounts => crates/accounts}/ethstore/src/account/safe_account.rs (100%) rename {accounts => crates/accounts}/ethstore/src/account/version.rs (100%) rename {accounts => crates/accounts}/ethstore/src/accounts_dir/disk.rs (100%) rename {accounts => crates/accounts}/ethstore/src/accounts_dir/memory.rs (100%) rename {accounts => crates/accounts}/ethstore/src/accounts_dir/mod.rs (100%) rename {accounts => crates/accounts}/ethstore/src/accounts_dir/vault.rs (100%) rename {accounts => crates/accounts}/ethstore/src/error.rs (100%) rename {accounts => crates/accounts}/ethstore/src/ethkey.rs (100%) rename {accounts => crates/accounts}/ethstore/src/ethstore.rs (100%) rename {accounts => crates/accounts}/ethstore/src/import.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/bytes.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/cipher.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/crypto.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/error.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/hash.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/id.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/kdf.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/key_file.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/mod.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/presale.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/vault_file.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/vault_key_file.rs (100%) rename {accounts => crates/accounts}/ethstore/src/json/version.rs (100%) rename {accounts => crates/accounts}/ethstore/src/lib.rs (100%) rename {accounts => crates/accounts}/ethstore/src/presale.rs (100%) rename {accounts => crates/accounts}/ethstore/src/random.rs (100%) rename {accounts => crates/accounts}/ethstore/src/secret_store.rs (100%) rename {accounts => crates/accounts}/ethstore/tests/api.rs (100%) rename {accounts => crates/accounts}/ethstore/tests/res/ciphertext/30.json (100%) rename {accounts => crates/accounts}/ethstore/tests/res/ciphertext/31.json (100%) rename {accounts => crates/accounts}/ethstore/tests/res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9 (100%) rename {accounts => crates/accounts}/ethstore/tests/res/geth_keystore/UTC--2016-02-20T09-33-03.984382741Z--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf (100%) rename {accounts => crates/accounts}/ethstore/tests/res/geth_keystore/UTC--2016-04-03T08-58-49.834202900Z--63121b431a52f8043c16fcf0d1df9cb7b5f66649 (100%) rename {accounts => crates/accounts}/ethstore/tests/res/pat/p1.json (100%) rename {accounts => crates/accounts}/ethstore/tests/res/pat/p2.json (100%) rename {accounts => crates/accounts}/ethstore/tests/util/mod.rs (100%) rename {accounts => crates/accounts}/ethstore/tests/util/transient_dir.rs (100%) rename {accounts => crates/accounts}/src/account_data.rs (100%) rename {accounts => crates/accounts}/src/error.rs (100%) rename {accounts => crates/accounts}/src/lib.rs (100%) rename {accounts => crates/accounts}/src/stores.rs (100%) rename {ethash => crates/concensus/ethash}/Cargo.toml (100%) rename {ethash => crates/concensus/ethash}/benches/basic.rs (100%) rename {ethash => crates/concensus/ethash}/benches/progpow.rs (100%) rename {ethash => crates/concensus/ethash}/res/progpow_testvectors.json (100%) rename {ethash => crates/concensus/ethash}/src/cache.rs (100%) rename {ethash => crates/concensus/ethash}/src/compute.rs (100%) rename {ethash => crates/concensus/ethash}/src/keccak.rs (100%) rename {ethash => crates/concensus/ethash}/src/lib.rs (100%) rename {ethash => crates/concensus/ethash}/src/progpow.rs (100%) rename {ethash => crates/concensus/ethash}/src/seed_compute.rs (100%) rename {ethash => crates/concensus/ethash}/src/shared.rs (100%) rename {miner => crates/concensus/miner}/Cargo.toml (73%) rename {miner => crates/concensus/miner}/local-store/Cargo.toml (70%) rename {miner => crates/concensus/miner}/local-store/src/lib.rs (100%) rename {miner => crates/concensus/miner}/price-info/Cargo.toml (68%) rename {miner => crates/concensus/miner}/price-info/src/lib.rs (100%) rename {miner => crates/concensus/miner}/res/contracts/service_transaction.json (100%) rename {miner => crates/concensus/miner}/src/external.rs (100%) rename {miner => crates/concensus/miner}/src/gas_price_calibrator.rs (100%) rename {miner => crates/concensus/miner}/src/gas_pricer.rs (100%) rename {miner => crates/concensus/miner}/src/lib.rs (100%) rename {miner => crates/concensus/miner}/src/local_accounts.rs (100%) rename {miner => crates/concensus/miner}/src/pool/client.rs (100%) rename {miner => crates/concensus/miner}/src/pool/listener.rs (100%) rename {miner => crates/concensus/miner}/src/pool/local_transactions.rs (100%) rename {miner => crates/concensus/miner}/src/pool/mod.rs (100%) rename {miner => crates/concensus/miner}/src/pool/queue.rs (100%) rename {miner => crates/concensus/miner}/src/pool/ready.rs (100%) rename {miner => crates/concensus/miner}/src/pool/replace.rs (100%) rename {miner => crates/concensus/miner}/src/pool/res/big_transaction.data (100%) rename {miner => crates/concensus/miner}/src/pool/scoring.rs (100%) rename {miner => crates/concensus/miner}/src/pool/tests/client.rs (100%) rename {miner => crates/concensus/miner}/src/pool/tests/mod.rs (100%) rename {miner => crates/concensus/miner}/src/pool/tests/tx.rs (100%) rename {miner => crates/concensus/miner}/src/pool/verifier.rs (100%) rename {miner => crates/concensus/miner}/src/service_transaction_checker.rs (100%) rename {miner => crates/concensus/miner}/src/work_notify.rs (100%) rename {miner => crates/concensus/miner}/stratum/Cargo.toml (100%) rename {miner => crates/concensus/miner}/stratum/src/lib.rs (100%) rename {miner => crates/concensus/miner}/stratum/src/traits.rs (100%) rename {miner => crates/concensus/miner}/using-queue/Cargo.toml (100%) rename {miner => crates/concensus/miner}/using-queue/src/lib.rs (100%) rename {util => crates/db}/bloom/Cargo.toml (100%) rename {util => crates/db}/bloom/src/lib.rs (100%) rename {util => crates/db}/blooms-db/Cargo.toml (100%) rename {util => crates/db}/blooms-db/benches/blooms.rs (100%) rename {util => crates/db}/blooms-db/src/db.rs (100%) rename {util => crates/db}/blooms-db/src/file.rs (100%) rename {util => crates/db}/blooms-db/src/lib.rs (100%) rename {ethcore => crates/db}/db/Cargo.toml (90%) rename {ethcore => crates/db}/db/src/cache_manager.rs (100%) rename {ethcore => crates/db}/db/src/db.rs (100%) rename {ethcore => crates/db}/db/src/keys.rs (100%) rename {ethcore => crates/db}/db/src/lib.rs (100%) rename {util => crates/db}/journaldb/Cargo.toml (85%) rename {util => crates/db}/journaldb/src/archivedb.rs (100%) rename {util => crates/db}/journaldb/src/as_hash_db_impls.rs (100%) rename {util => crates/db}/journaldb/src/earlymergedb.rs (100%) rename {util => crates/db}/journaldb/src/lib.rs (100%) rename {util => crates/db}/journaldb/src/overlaydb.rs (100%) rename {util => crates/db}/journaldb/src/overlayrecentdb.rs (100%) rename {util => crates/db}/journaldb/src/refcounteddb.rs (100%) rename {util => crates/db}/journaldb/src/traits.rs (100%) rename {util => crates/db}/journaldb/src/util.rs (100%) rename {util => crates/db}/memory-db/.cargo_vcs_info.json (100%) rename {util => crates/db}/memory-db/Cargo.toml (100%) rename {util => crates/db}/memory-db/Cargo.toml.orig (100%) rename {util => crates/db}/memory-db/README.md (100%) rename {util => crates/db}/memory-db/benches/bench.rs (100%) rename {util => crates/db}/memory-db/src/lib.rs (100%) rename {util => crates/db}/migration-rocksdb/Cargo.toml (83%) rename {util => crates/db}/migration-rocksdb/src/lib.rs (100%) rename {util => crates/db}/migration-rocksdb/tests/tests.rs (100%) rename {util => crates/db}/patricia-trie-ethereum/Cargo.toml (86%) rename {util => crates/db}/patricia-trie-ethereum/src/lib.rs (100%) rename {util => crates/db}/patricia-trie-ethereum/src/rlp_node_codec.rs (100%) rename {ethcore => crates/ethcore}/Cargo.toml (78%) rename {ethcore => crates/ethcore}/benches/builtin.rs (100%) rename {ethcore => crates/ethcore}/blockchain/Cargo.toml (92%) rename {ethcore => crates/ethcore}/blockchain/src/best_block.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/block_info.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/blockchain.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/cache.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/config.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/generator.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/import_route.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/lib.rs (100%) rename {ethcore => crates/ethcore}/blockchain/src/update.rs (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/callisto.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/ellaism.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/ewc.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/foundation.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/goerli.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec}/instant_seal.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/kovan.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/mix.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/morden.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/musicoin.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/poacore.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/poasokol.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/rinkeby.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/ropsten.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/authority_round.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/authority_round_block_reward_contract.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/authority_round_empty_steps.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/basic_authority.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/berlin_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/builtin_multi_bench.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/builtin_one_activation_bench.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/byzantium_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/byzantium_to_constantinoplefixat5_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/constantinople_test.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/constructor.json (100%) rename {ethcore/res/tx_permission_tests => crates/ethcore/res/chainspec/test}/contract_ver_2_genesis.json (100%) rename {ethcore/res/tx_permission_tests => crates/ethcore/res/chainspec/test}/deprecated_contract_genesis.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/eip150_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/eip161_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/eip210_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/frontier_like_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/frontier_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/homestead_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/istanbul_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/kovan_wasm_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/mcip3_test.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/null.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/null_morden.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/null_morden_with_finality.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/null_morden_with_reward.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/spec_backward_compability.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/st_peters_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/transition_test.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/validator_contract.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/validator_multi.json (100%) rename {ethcore/res => crates/ethcore/res/chainspec/test}/validator_safe_contract.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec/test}/yolo3_test.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/volta.json (100%) rename {ethcore/res/ethereum => crates/ethcore/res/chainspec}/xdai.json (100%) rename {ethcore => crates/ethcore}/res/contracts/block_reward.json (100%) rename {ethcore => crates/ethcore}/res/contracts/registrar.json (99%) rename {ethcore => crates/ethcore}/res/contracts/test_validator_set.json (100%) rename {ethcore => crates/ethcore}/res/contracts/tx_acl.json (100%) rename {ethcore => crates/ethcore}/res/contracts/tx_acl_deprecated.json (100%) rename {ethcore => crates/ethcore}/res/contracts/validator_report.json (100%) rename {ethcore => crates/ethcore}/res/contracts/validator_set.json (100%) rename ethcore/res/ethereum/tests => crates/ethcore/res/json_tests (100%) create mode 100644 crates/ethcore/res/json_tests.json rename {ethcore => crates/ethcore}/service/Cargo.toml (88%) rename {ethcore => crates/ethcore}/service/src/error.rs (100%) rename {ethcore => crates/ethcore}/service/src/lib.rs (100%) rename {ethcore => crates/ethcore}/service/src/service.rs (100%) rename {ethcore => crates/ethcore}/service/src/stop_guard.rs (100%) rename {ethcore => crates/ethcore}/src/account_db.rs (100%) rename {ethcore => crates/ethcore}/src/block.rs (100%) rename {ethcore => crates/ethcore}/src/client/ancient_import.rs (100%) rename {ethcore => crates/ethcore}/src/client/bad_blocks.rs (100%) rename {ethcore => crates/ethcore}/src/client/chain_notify.rs (100%) rename {ethcore => crates/ethcore}/src/client/client.rs (100%) rename {ethcore => crates/ethcore}/src/client/config.rs (100%) rename {ethcore => crates/ethcore}/src/client/evm_test_client.rs (100%) rename {ethcore => crates/ethcore}/src/client/io_message.rs (100%) rename {ethcore => crates/ethcore}/src/client/mod.rs (100%) rename {ethcore => crates/ethcore}/src/client/test_client.rs (100%) rename {ethcore => crates/ethcore}/src/client/trace.rs (100%) rename {ethcore => crates/ethcore}/src/client/traits.rs (100%) rename {ethcore => crates/ethcore}/src/engines/authority_round/finality.rs (100%) rename {ethcore => crates/ethcore}/src/engines/authority_round/mod.rs (100%) rename {ethcore => crates/ethcore}/src/engines/basic_authority.rs (99%) rename {ethcore => crates/ethcore}/src/engines/block_reward.rs (100%) rename {ethcore => crates/ethcore}/src/engines/clique/block_state.rs (100%) rename {ethcore => crates/ethcore}/src/engines/clique/mod.rs (100%) rename {ethcore => crates/ethcore}/src/engines/clique/params.rs (100%) rename {ethcore => crates/ethcore}/src/engines/clique/tests.rs (100%) rename {ethcore => crates/ethcore}/src/engines/clique/util.rs (100%) rename {ethcore => crates/ethcore}/src/engines/instant_seal.rs (100%) rename {ethcore => crates/ethcore}/src/engines/mod.rs (100%) rename {ethcore => crates/ethcore}/src/engines/null_engine.rs (100%) rename {ethcore => crates/ethcore}/src/engines/signer.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/contract.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/mod.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/multi.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/safe_contract.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/simple_list.rs (100%) rename {ethcore => crates/ethcore}/src/engines/validator_set/test.rs (100%) rename {ethcore => crates/ethcore}/src/error.rs (100%) rename {ethcore => crates/ethcore}/src/ethereum/denominations.rs (100%) rename {ethcore => crates/ethcore}/src/ethereum/ethash.rs (100%) rename {ethcore => crates/ethcore}/src/ethereum/mod.rs (78%) rename {ethcore => crates/ethcore}/src/executed.rs (100%) rename {ethcore => crates/ethcore}/src/executive.rs (100%) rename {ethcore => crates/ethcore}/src/externalities.rs (100%) rename {ethcore => crates/ethcore}/src/factory.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/chain.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/difficulty.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/executive.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/mod.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/runner.rs (99%) rename {ethcore => crates/ethcore}/src/json_tests/skip.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/state.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/test_common.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/transaction.rs (100%) rename {ethcore => crates/ethcore}/src/json_tests/trie.rs (100%) rename {ethcore => crates/ethcore}/src/lib.rs (100%) rename {ethcore => crates/ethcore}/src/machine/impls.rs (100%) rename {ethcore => crates/ethcore}/src/machine/mod.rs (100%) rename {ethcore => crates/ethcore}/src/machine/traits.rs (100%) rename {ethcore => crates/ethcore}/src/miner/miner.rs (100%) rename {ethcore => crates/ethcore}/src/miner/mod.rs (100%) rename {ethcore => crates/ethcore}/src/miner/pool_client.rs (100%) rename {ethcore => crates/ethcore}/src/miner/stratum.rs (100%) rename {ethcore => crates/ethcore}/src/pod_account.rs (100%) rename {ethcore => crates/ethcore}/src/pod_state.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/account.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/block.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/consensus/authority.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/consensus/mod.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/consensus/work.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/error.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/io.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/mod.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/service.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/helpers.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/mod.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/proof_of_authority.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/proof_of_work.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/service.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/state.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/tests/test_validator_contract.json (100%) rename {ethcore => crates/ethcore}/src/snapshot/traits.rs (100%) rename {ethcore => crates/ethcore}/src/snapshot/watcher.rs (100%) rename {ethcore => crates/ethcore}/src/spec/genesis.rs (100%) rename {ethcore => crates/ethcore}/src/spec/mod.rs (100%) rename {ethcore => crates/ethcore}/src/spec/seal.rs (100%) rename {ethcore => crates/ethcore}/src/spec/spec.rs (98%) rename {ethcore => crates/ethcore}/src/state/account.rs (100%) rename {ethcore => crates/ethcore}/src/state/backend.rs (100%) rename {ethcore => crates/ethcore}/src/state/mod.rs (100%) rename {ethcore => crates/ethcore}/src/state/substate.rs (100%) rename {ethcore => crates/ethcore}/src/state_db.rs (100%) rename {ethcore => crates/ethcore}/src/test_helpers.rs (100%) rename {ethcore => crates/ethcore}/src/tests/blockchain.rs (100%) rename {ethcore => crates/ethcore}/src/tests/client.rs (100%) rename {ethcore => crates/ethcore}/src/tests/evm.rs (100%) rename {ethcore => crates/ethcore}/src/tests/mod.rs (100%) rename {ethcore => crates/ethcore}/src/tests/trace.rs (100%) rename {ethcore => crates/ethcore}/src/trace/config.rs (100%) rename {ethcore => crates/ethcore}/src/trace/db.rs (100%) rename {ethcore => crates/ethcore}/src/trace/executive_tracer.rs (100%) rename {ethcore => crates/ethcore}/src/trace/import.rs (100%) rename {ethcore => crates/ethcore}/src/trace/mod.rs (100%) rename {ethcore => crates/ethcore}/src/trace/noop_tracer.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/error.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/filter.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/flat.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/localized.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/mod.rs (100%) rename {ethcore => crates/ethcore}/src/trace/types/trace.rs (100%) rename {ethcore => crates/ethcore}/src/transaction_ext.rs (100%) rename {ethcore => crates/ethcore}/src/tx_filter.rs (98%) rename {ethcore => crates/ethcore}/src/verification/canon_verifier.rs (100%) rename {ethcore => crates/ethcore}/src/verification/mod.rs (100%) rename {ethcore => crates/ethcore}/src/verification/noop_verifier.rs (100%) rename {ethcore => crates/ethcore}/src/verification/queue/kind.rs (100%) rename {ethcore => crates/ethcore}/src/verification/queue/mod.rs (100%) rename {ethcore => crates/ethcore}/src/verification/verification.rs (100%) rename {ethcore => crates/ethcore}/src/verification/verifier.rs (100%) rename {ethcore => crates/ethcore}/sync/Cargo.toml (88%) rename {ethcore => crates/ethcore}/sync/src/api.rs (100%) rename {ethcore => crates/ethcore}/sync/src/block_sync.rs (100%) rename {ethcore => crates/ethcore}/sync/src/blocks.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/fork_filter.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/handler.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/mod.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/propagator.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/requester.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/supplier.rs (100%) rename {ethcore => crates/ethcore}/sync/src/chain/sync_packet.rs (100%) rename {ethcore => crates/ethcore}/sync/src/lib.rs (100%) rename {ethcore => crates/ethcore}/sync/src/res/private_spec.json (100%) rename {ethcore => crates/ethcore}/sync/src/snapshot.rs (100%) rename {ethcore => crates/ethcore}/sync/src/sync_io.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/chain.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/consensus.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/helpers.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/mod.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/rpc.rs (100%) rename {ethcore => crates/ethcore}/sync/src/tests/snapshot.rs (100%) rename {ethcore => crates/ethcore}/sync/src/transactions_stats.rs (100%) rename {ethcore => crates/ethcore}/types/Cargo.toml (93%) rename {ethcore => crates/ethcore}/types/src/account_diff.rs (100%) rename {ethcore => crates/ethcore}/types/src/ancestry_action.rs (100%) rename {ethcore => crates/ethcore}/types/src/basic_account.rs (100%) rename {ethcore => crates/ethcore}/types/src/block.rs (100%) rename {ethcore => crates/ethcore}/types/src/block_status.rs (100%) rename {ethcore => crates/ethcore}/types/src/blockchain_info.rs (100%) rename {ethcore => crates/ethcore}/types/src/call_analytics.rs (100%) rename {ethcore => crates/ethcore}/types/src/creation_status.rs (100%) rename {ethcore => crates/ethcore}/types/src/data_format.rs (100%) rename {ethcore => crates/ethcore}/types/src/encoded.rs (100%) rename {ethcore => crates/ethcore}/types/src/engines/epoch.rs (100%) rename {ethcore => crates/ethcore}/types/src/engines/mod.rs (100%) rename {ethcore => crates/ethcore}/types/src/filter.rs (100%) rename {ethcore => crates/ethcore}/types/src/header.rs (100%) rename {ethcore => crates/ethcore}/types/src/ids.rs (100%) rename {ethcore => crates/ethcore}/types/src/lib.rs (100%) rename {ethcore => crates/ethcore}/types/src/log_entry.rs (100%) rename {ethcore => crates/ethcore}/types/src/pruning_info.rs (100%) rename {ethcore => crates/ethcore}/types/src/receipt.rs (100%) rename {ethcore => crates/ethcore}/types/src/restoration_status.rs (100%) rename {ethcore => crates/ethcore}/types/src/security_level.rs (100%) rename {ethcore => crates/ethcore}/types/src/snapshot_manifest.rs (100%) rename {ethcore => crates/ethcore}/types/src/state_diff.rs (100%) rename {ethcore => crates/ethcore}/types/src/trace_filter.rs (100%) rename {ethcore => crates/ethcore}/types/src/transaction/error.rs (100%) rename {ethcore => crates/ethcore}/types/src/transaction/mod.rs (100%) rename {ethcore => crates/ethcore}/types/src/transaction/transaction.rs (100%) rename {ethcore => crates/ethcore}/types/src/transaction/transaction_id.rs (100%) rename {ethcore => crates/ethcore}/types/src/tree_route.rs (100%) rename {ethcore => crates/ethcore}/types/src/verification_queue_info.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/block.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/body.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/header.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/mod.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/transaction.rs (100%) rename {ethcore => crates/ethcore}/types/src/views/view_rlp.rs (100%) rename {json => crates/ethjson}/Cargo.toml (100%) rename {json => crates/ethjson}/src/blockchain/account.rs (100%) rename {json => crates/ethjson}/src/blockchain/block.rs (100%) rename {json => crates/ethjson}/src/blockchain/blockchain.rs (100%) rename {json => crates/ethjson}/src/blockchain/header.rs (100%) rename {json => crates/ethjson}/src/blockchain/mod.rs (100%) rename {json => crates/ethjson}/src/blockchain/state.rs (99%) rename {json => crates/ethjson}/src/blockchain/test.rs (100%) rename {json => crates/ethjson}/src/blockchain/transaction.rs (100%) rename {json => crates/ethjson}/src/bytes.rs (100%) rename {json => crates/ethjson}/src/hash.rs (100%) rename {json => crates/ethjson}/src/lib.rs (100%) rename {json => crates/ethjson}/src/maybe.rs (100%) rename {json => crates/ethjson}/src/spec/account.rs (100%) rename {json => crates/ethjson}/src/spec/authority_round.rs (100%) rename {json => crates/ethjson}/src/spec/basic_authority.rs (100%) rename {json => crates/ethjson}/src/spec/builtin.rs (100%) rename {json => crates/ethjson}/src/spec/clique.rs (100%) rename {json => crates/ethjson}/src/spec/engine.rs (100%) rename {json => crates/ethjson}/src/spec/ethash.rs (100%) rename {json => crates/ethjson}/src/spec/genesis.rs (100%) rename {json => crates/ethjson}/src/spec/instant_seal.rs (100%) rename {json => crates/ethjson}/src/spec/mod.rs (100%) rename {json => crates/ethjson}/src/spec/null_engine.rs (100%) rename {json => crates/ethjson}/src/spec/params.rs (100%) rename {json => crates/ethjson}/src/spec/seal.rs (100%) rename {json => crates/ethjson}/src/spec/spec.rs (100%) rename {json => crates/ethjson}/src/spec/state.rs (100%) rename {json => crates/ethjson}/src/spec/validator_set.rs (100%) rename {json => crates/ethjson}/src/state/log.rs (100%) rename {json => crates/ethjson}/src/state/mod.rs (100%) rename {json => crates/ethjson}/src/state/state.rs (100%) rename {json => crates/ethjson}/src/state/test.rs (100%) rename {json => crates/ethjson}/src/state/transaction.rs (100%) rename {json => crates/ethjson}/src/test/mod.rs (100%) rename {json => crates/ethjson}/src/transaction/mod.rs (100%) rename {json => crates/ethjson}/src/transaction/test.rs (100%) rename {json => crates/ethjson}/src/transaction/transaction.rs (100%) rename {json => crates/ethjson}/src/transaction/txtest.rs (100%) rename {json => crates/ethjson}/src/trie/input.rs (100%) rename {json => crates/ethjson}/src/trie/mod.rs (100%) rename {json => crates/ethjson}/src/trie/test.rs (100%) rename {json => crates/ethjson}/src/trie/trie.rs (100%) rename {json => crates/ethjson}/src/uint.rs (100%) rename {json => crates/ethjson}/src/vm/call.rs (100%) rename {json => crates/ethjson}/src/vm/env.rs (100%) rename {json => crates/ethjson}/src/vm/mod.rs (100%) rename {json => crates/ethjson}/src/vm/test.rs (100%) rename {json => crates/ethjson}/src/vm/transaction.rs (100%) rename {json => crates/ethjson}/src/vm/vm.rs (100%) rename {util => crates/net}/fake-fetch/Cargo.toml (100%) rename {util => crates/net}/fake-fetch/src/lib.rs (100%) rename {util => crates/net}/fetch/Cargo.toml (100%) rename {util => crates/net}/fetch/src/client.rs (100%) rename {util => crates/net}/fetch/src/lib.rs (100%) rename {util => crates/net}/network-devp2p/Cargo.toml (89%) rename {util => crates/net}/network-devp2p/src/connection.rs (100%) rename {util => crates/net}/network-devp2p/src/discovery.rs (100%) rename {util => crates/net}/network-devp2p/src/handshake.rs (100%) rename {util => crates/net}/network-devp2p/src/host.rs (100%) rename {util => crates/net}/network-devp2p/src/ip_utils.rs (100%) rename {util => crates/net}/network-devp2p/src/lib.rs (100%) rename {util => crates/net}/network-devp2p/src/node_table.rs (100%) rename {util => crates/net}/network-devp2p/src/service.rs (100%) rename {util => crates/net}/network-devp2p/src/session.rs (100%) rename {util => crates/net}/network-devp2p/tests/tests.rs (100%) rename {util => crates/net}/network/Cargo.toml (86%) rename {util => crates/net}/network/src/client_version.rs (100%) rename {util => crates/net}/network/src/connection_filter.rs (100%) rename {util => crates/net}/network/src/error.rs (100%) rename {util => crates/net}/network/src/lib.rs (100%) rename {ethcore => crates/net}/node-filter/Cargo.toml (64%) rename {ethcore => crates/net}/node-filter/res/node_filter.json (100%) rename {ethcore => crates/net}/node-filter/res/peer_set.json (100%) rename {ethcore => crates/net}/node-filter/src/lib.rs (100%) rename {rpc => crates/rpc}/Cargo.toml (75%) rename {rpc => crates/rpc}/src/authcodes.rs (100%) rename {rpc => crates/rpc}/src/http_common.rs (100%) rename {rpc => crates/rpc}/src/lib.rs (100%) rename {rpc => crates/rpc}/src/tests/helpers.rs (100%) rename {rpc => crates/rpc}/src/tests/http_client.rs (100%) rename {rpc => crates/rpc}/src/tests/mod.rs (100%) rename {rpc => crates/rpc}/src/tests/rpc.rs (100%) rename {rpc => crates/rpc}/src/tests/ws.rs (100%) rename {rpc => crates/rpc}/src/v1/extractors.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/block_import.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/deprecated.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/dispatch/full.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/dispatch/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/dispatch/prospective_signer.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/dispatch/signing.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/eip191.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/engine_signer.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/errors.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/external_signer/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/external_signer/oneshot.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/external_signer/signing_queue.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/fake_sign.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/network_settings.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/nonce.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/poll_filter.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/poll_manager.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/requests.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/secretstore.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/signature.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/subscribers.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/subscription_manager.rs (100%) rename {rpc => crates/rpc}/src/v1/helpers/work.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/debug.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/eth.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/eth_filter.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/eth_pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/net.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/parity.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/parity_accounts.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/parity_set.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/personal.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/secretstore.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/signer.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/signing.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/signing_unsafe.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/traces.rs (100%) rename {rpc => crates/rpc}/src/v1/impls/web3.rs (100%) rename {rpc => crates/rpc}/src/v1/informant.rs (100%) rename {rpc => crates/rpc}/src/v1/metadata.rs (100%) rename {rpc => crates/rpc}/src/v1/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/eth.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/helpers/miner_service.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/helpers/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/helpers/snapshot_service.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/helpers/sync_provider.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/debug.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/eth.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/eth_pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/manage_network.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/net.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/parity.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/parity_accounts.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/parity_set.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/personal.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/secretstore.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/signer.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/signing.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/signing_unsafe.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/traces.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mocked/web3.rs (100%) rename {rpc => crates/rpc}/src/v1/tests/mod.rs (93%) rename {rpc => crates/rpc}/src/v1/traits/debug.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/eth.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/eth_pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/eth_signing.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/net.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/parity.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/parity_accounts.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/parity_set.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/parity_signing.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/personal.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/secretstore.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/signer.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/traces.rs (100%) rename {rpc => crates/rpc}/src/v1/traits/web3.rs (100%) rename {rpc => crates/rpc}/src/v1/types/account_info.rs (100%) rename {rpc => crates/rpc}/src/v1/types/block.rs (100%) rename {rpc => crates/rpc}/src/v1/types/block_number.rs (100%) rename {rpc => crates/rpc}/src/v1/types/bytes.rs (100%) rename {rpc => crates/rpc}/src/v1/types/call_request.rs (100%) rename {rpc => crates/rpc}/src/v1/types/confirmations.rs (100%) rename {rpc => crates/rpc}/src/v1/types/derivation.rs (100%) rename {rpc => crates/rpc}/src/v1/types/eip191.rs (100%) rename {rpc => crates/rpc}/src/v1/types/eth_types.rs (100%) rename {rpc => crates/rpc}/src/v1/types/filter.rs (100%) rename {rpc => crates/rpc}/src/v1/types/histogram.rs (100%) rename {rpc => crates/rpc}/src/v1/types/index.rs (100%) rename {rpc => crates/rpc}/src/v1/types/log.rs (100%) rename {rpc => crates/rpc}/src/v1/types/mod.rs (100%) rename {rpc => crates/rpc}/src/v1/types/node_kind.rs (100%) rename {rpc => crates/rpc}/src/v1/types/provenance.rs (100%) rename {rpc => crates/rpc}/src/v1/types/pubsub.rs (100%) rename {rpc => crates/rpc}/src/v1/types/receipt.rs (100%) rename {rpc => crates/rpc}/src/v1/types/rpc_settings.rs (100%) rename {rpc => crates/rpc}/src/v1/types/secretstore.rs (100%) rename {rpc => crates/rpc}/src/v1/types/sync.rs (100%) rename {rpc => crates/rpc}/src/v1/types/trace.rs (100%) rename {rpc => crates/rpc}/src/v1/types/trace_filter.rs (100%) rename {rpc => crates/rpc}/src/v1/types/transaction.rs (100%) rename {rpc => crates/rpc}/src/v1/types/transaction_condition.rs (100%) rename {rpc => crates/rpc}/src/v1/types/transaction_request.rs (100%) rename {rpc => crates/rpc}/src/v1/types/work.rs (100%) rename {util => crates/runtime}/io/Cargo.toml (100%) rename {util => crates/runtime}/io/src/lib.rs (100%) rename {util => crates/runtime}/io/src/service_mio.rs (100%) rename {util => crates/runtime}/io/src/service_non_mio.rs (100%) rename {util => crates/runtime}/io/src/worker.rs (100%) rename {util => crates/runtime}/runtime/Cargo.toml (100%) rename {util => crates/runtime}/runtime/src/lib.rs (100%) rename {util => crates/util}/EIP-152/Cargo.toml (100%) rename {util => crates/util}/EIP-152/src/lib.rs (100%) rename {util => crates/util}/EIP-712/Cargo.toml (100%) rename {util => crates/util}/EIP-712/README.md (100%) rename {util => crates/util}/EIP-712/src/eip712.rs (100%) rename {util => crates/util}/EIP-712/src/encode.rs (100%) rename {util => crates/util}/EIP-712/src/error.rs (100%) rename {util => crates/util}/EIP-712/src/lib.rs (100%) rename {util => crates/util}/EIP-712/src/parser.rs (100%) rename {cli-signer => crates/util/cli-signer}/Cargo.toml (90%) rename {cli-signer => crates/util/cli-signer}/rpc-client/Cargo.toml (91%) rename {cli-signer => crates/util/cli-signer}/rpc-client/src/client.rs (100%) rename {cli-signer => crates/util/cli-signer}/rpc-client/src/lib.rs (100%) rename {cli-signer => crates/util/cli-signer}/rpc-client/src/signer_client.rs (100%) rename {cli-signer => crates/util/cli-signer}/src/lib.rs (100%) rename {util => crates/util}/dir/Cargo.toml (83%) rename {util => crates/util}/dir/src/helpers.rs (100%) rename {util => crates/util}/dir/src/lib.rs (100%) rename {util => crates/util}/fastmap/Cargo.toml (100%) rename {util => crates/util}/fastmap/src/lib.rs (100%) rename {util => crates/util}/keccak-hasher/Cargo.toml (100%) rename {util => crates/util}/keccak-hasher/src/lib.rs (100%) rename {util => crates/util}/len-caching-lock/Cargo.toml (100%) rename {util => crates/util}/len-caching-lock/src/lib.rs (100%) rename {util => crates/util}/len-caching-lock/src/mutex.rs (100%) rename {util => crates/util}/len-caching-lock/src/rwlock.rs (100%) rename {util => crates/util}/macros/Cargo.toml (100%) rename {util => crates/util}/macros/src/lib.rs (100%) rename {util => crates/util}/memory-cache/Cargo.toml (100%) rename {util => crates/util}/memory-cache/src/lib.rs (100%) rename {util => crates/util}/memzero/Cargo.toml (100%) rename {util => crates/util}/memzero/src/lib.rs (100%) rename {util => crates/util}/panic-hook/Cargo.toml (100%) rename {util => crates/util}/panic-hook/src/lib.rs (100%) rename {util => crates/util}/rlp-compress/Cargo.toml (100%) rename {util => crates/util}/rlp-compress/src/common.rs (100%) rename {util => crates/util}/rlp-compress/src/lib.rs (100%) rename {util => crates/util}/rlp-compress/tests/compress.rs (100%) rename {util => crates/util}/rlp-derive/Cargo.toml (100%) rename {util => crates/util}/rlp-derive/src/de.rs (100%) rename {util => crates/util}/rlp-derive/src/en.rs (100%) rename {util => crates/util}/rlp-derive/src/lib.rs (100%) rename {util => crates/util}/rlp-derive/tests/rlp.rs (100%) rename {util => crates/util}/stats/Cargo.toml (100%) rename {util => crates/util}/stats/src/lib.rs (100%) rename {util => crates/util}/time-utils/Cargo.toml (100%) rename {util => crates/util}/time-utils/src/lib.rs (100%) rename {util => crates/util}/triehash-ethereum/Cargo.toml (100%) rename {util => crates/util}/triehash-ethereum/src/lib.rs (100%) rename {util => crates/util}/unexpected/Cargo.toml (100%) rename {util => crates/util}/unexpected/src/lib.rs (100%) rename {util => crates/util}/version/Cargo.toml (100%) rename {util => crates/util}/version/build.rs (100%) rename {util => crates/util}/version/src/lib.rs (100%) rename {ethcore => crates/vm}/builtin/Cargo.toml (95%) rename {ethcore => crates/vm}/builtin/src/lib.rs (100%) rename {ethcore => crates/vm}/call-contract/Cargo.toml (83%) rename {ethcore => crates/vm}/call-contract/src/call_contract.rs (100%) rename {ethcore => crates/vm}/call-contract/src/lib.rs (100%) rename {ethcore => crates/vm}/evm/Cargo.toml (100%) rename {ethcore => crates/vm}/evm/benches/basic.rs (100%) rename {ethcore => crates/vm}/evm/src/evm.rs (100%) rename {ethcore => crates/vm}/evm/src/factory.rs (100%) rename {ethcore => crates/vm}/evm/src/instructions.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/gasometer.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/informant.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/memory.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/mod.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/shared_cache.rs (100%) rename {ethcore => crates/vm}/evm/src/interpreter/stack.rs (100%) rename {ethcore => crates/vm}/evm/src/lib.rs (100%) rename {ethcore => crates/vm}/evm/src/tests.rs (100%) rename {ethcore => crates/vm}/evm/src/vmtype.rs (100%) rename {ethcore => crates/vm}/vm/Cargo.toml (72%) rename {ethcore => crates/vm}/vm/src/access_list.rs (100%) rename {ethcore => crates/vm}/vm/src/action_params.rs (100%) rename {ethcore => crates/vm}/vm/src/call_type.rs (100%) rename {ethcore => crates/vm}/vm/src/env_info.rs (100%) rename {ethcore => crates/vm}/vm/src/error.rs (100%) rename {ethcore => crates/vm}/vm/src/ext.rs (100%) rename {ethcore => crates/vm}/vm/src/lib.rs (100%) rename {ethcore => crates/vm}/vm/src/return_data.rs (100%) rename {ethcore => crates/vm}/vm/src/schedule.rs (100%) rename {ethcore => crates/vm}/vm/src/tests.rs (100%) rename {ethcore => crates/vm}/wasm/Cargo.toml (100%) rename {ethcore => crates/vm}/wasm/src/env.rs (100%) rename {ethcore => crates/vm}/wasm/src/lib.rs (99%) rename {ethcore => crates/vm}/wasm/src/panic_payload.rs (100%) rename {ethcore => crates/vm}/wasm/src/parser.rs (100%) rename {ethcore => crates/vm}/wasm/src/runtime.rs (100%) delete mode 100644 ethcore/res/ethereum/runner/full.json delete mode 100644 ethcore/res/ethereum/tests-issues/currents.json delete mode 160000 ethcore/res/wasm-tests delete mode 100644 ethcore/wasm/run/Cargo.toml delete mode 100644 ethcore/wasm/run/res/sample-fixture.json delete mode 100644 ethcore/wasm/run/res/sample1.wasm delete mode 100644 ethcore/wasm/run/res/sample2.wasm delete mode 100644 ethcore/wasm/run/res/sample3.wasm delete mode 100644 ethcore/wasm/run/src/fixture.rs delete mode 100644 ethcore/wasm/run/src/main.rs delete mode 100644 ethcore/wasm/run/src/runner.rs delete mode 100644 ethcore/wasm/src/tests.rs delete mode 100755 scripts/add_license.sh delete mode 100755 scripts/doc.sh delete mode 100755 scripts/hook.sh delete mode 100755 scripts/remove_duplicate_empty_lines.sh delete mode 100644 secret-store/Cargo.toml delete mode 100644 secret-store/res/acl_storage.json delete mode 100644 secret-store/res/key_server_set.json delete mode 100644 secret-store/res/service.json delete mode 100644 secret-store/src/acl_storage.rs delete mode 100644 secret-store/src/helpers.rs delete mode 100644 secret-store/src/key_server.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/mod.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs delete mode 100644 secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/decryption_session.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/encryption_session.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/generation_session.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/mod.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs delete mode 100644 secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs delete mode 100644 secret-store/src/key_server_cluster/cluster.rs delete mode 100644 secret-store/src/key_server_cluster/cluster_connections.rs delete mode 100644 secret-store/src/key_server_cluster/cluster_connections_net.rs delete mode 100644 secret-store/src/key_server_cluster/cluster_message_processor.rs delete mode 100644 secret-store/src/key_server_cluster/cluster_sessions.rs delete mode 100644 secret-store/src/key_server_cluster/cluster_sessions_creator.rs delete mode 100644 secret-store/src/key_server_cluster/connection_trigger.rs delete mode 100644 secret-store/src/key_server_cluster/connection_trigger_with_migration.rs delete mode 100644 secret-store/src/key_server_cluster/io/deadline.rs delete mode 100644 secret-store/src/key_server_cluster/io/handshake.rs delete mode 100644 secret-store/src/key_server_cluster/io/message.rs delete mode 100644 secret-store/src/key_server_cluster/io/mod.rs delete mode 100644 secret-store/src/key_server_cluster/io/read_header.rs delete mode 100644 secret-store/src/key_server_cluster/io/read_message.rs delete mode 100644 secret-store/src/key_server_cluster/io/read_payload.rs delete mode 100644 secret-store/src/key_server_cluster/io/shared_tcp_stream.rs delete mode 100644 secret-store/src/key_server_cluster/io/write_message.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/consensus_session.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/decryption_job.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/dummy_job.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/job_session.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/key_access_job.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/mod.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/servers_set_change_access_job.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/signing_job_ecdsa.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/signing_job_schnorr.rs delete mode 100644 secret-store/src/key_server_cluster/jobs/unknown_sessions_job.rs delete mode 100644 secret-store/src/key_server_cluster/math.rs delete mode 100644 secret-store/src/key_server_cluster/message.rs delete mode 100644 secret-store/src/key_server_cluster/mod.rs delete mode 100644 secret-store/src/key_server_cluster/net/accept_connection.rs delete mode 100644 secret-store/src/key_server_cluster/net/connect.rs delete mode 100644 secret-store/src/key_server_cluster/net/connection.rs delete mode 100644 secret-store/src/key_server_cluster/net/mod.rs delete mode 100644 secret-store/src/key_server_set.rs delete mode 100644 secret-store/src/key_storage.rs delete mode 100644 secret-store/src/lib.rs delete mode 100644 secret-store/src/listener/http_listener.rs delete mode 100644 secret-store/src/listener/mod.rs delete mode 100644 secret-store/src/listener/service_contract.rs delete mode 100644 secret-store/src/listener/service_contract_aggregate.rs delete mode 100644 secret-store/src/listener/service_contract_listener.rs delete mode 100644 secret-store/src/listener/tasks_queue.rs delete mode 100644 secret-store/src/node_key_pair.rs delete mode 100644 secret-store/src/serialization.rs delete mode 100644 secret-store/src/traits.rs delete mode 100644 secret-store/src/trusted_client.rs delete mode 100644 secret-store/src/types/all.rs delete mode 100644 secret-store/src/types/error.rs delete mode 100644 secret-store/src/types/mod.rs delete mode 100644 secret_store/src/key_server_cluster/cluster_connections.rs delete mode 100644 secret_store/src/key_server_cluster/cluster_connections_net.rs delete mode 100644 secret_store/src/key_server_cluster/cluster_message_processor.rs delete mode 100644 util/registrar/Cargo.toml delete mode 100644 util/registrar/res/registrar.json delete mode 100644 util/registrar/src/lib.rs delete mode 100644 util/registrar/src/registrar.rs diff --git a/.gitmodules b/.gitmodules index b49256b4d..2789e41fb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,3 @@ -[submodule "ethcore/res/ethereum/tests"] - path = ethcore/res/ethereum/tests +[submodule "crates/ethcore/res/json_tests"] + path = crates/ethcore/res/json_tests url = https://github.com/ethereum/tests.git - branch = develop -[submodule "ethcore/res/wasm-tests"] - path = ethcore/res/wasm-tests - url = https://github.com/paritytech/wasm-tests diff --git a/Cargo.lock b/Cargo.lock index 4a52e1294..01f122fe1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1249,47 +1249,6 @@ dependencies = [ "tiny-keccak 1.5.0", ] -[[package]] -name = "ethcore-secretstore" -version = "1.0.0" -dependencies = [ - "byteorder", - "common-types", - "env_logger", - "ethabi", - "ethabi-contract", - "ethabi-derive", - "ethcore", - "ethcore-accounts", - "ethcore-call-contract", - "ethcore-sync", - "ethereum-types 0.4.2", - "ethkey", - "futures", - "hyper 0.12.35", - "jsonrpc-server-utils", - "keccak-hash", - "kvdb", - "kvdb-rocksdb", - "lazy_static", - "log", - "parity-bytes", - "parity-crypto 0.3.1", - "parity-runtime", - "parking_lot 0.7.1", - "percent-encoding 2.1.0", - "rustc-hex 1.0.0", - "serde", - "serde_derive", - "serde_json", - "tempdir", - "tiny-keccak 1.5.0", - "tokio", - "tokio-io", - "tokio-service", - "url 2.1.0", -] - [[package]] name = "ethcore-service" version = "0.1.0" @@ -2885,7 +2844,6 @@ dependencies = [ "ethcore-logger", "ethcore-miner", "ethcore-network", - "ethcore-secretstore", "ethcore-service", "ethcore-sync", "ethereum-types 0.4.2", @@ -2920,7 +2878,6 @@ dependencies = [ "pretty_assertions", "prometheus", "regex 1.3.9", - "registrar", "rlp 0.3.0", "rpassword", "rustc-hex 1.0.0", @@ -3604,22 +3561,6 @@ dependencies = [ "getopts", ] -[[package]] -name = "pwasm-run-test" -version = "0.1.0" -dependencies = [ - "clap", - "env_logger", - "ethereum-types 0.4.2", - "ethjson", - "rustc-hex 1.0.0", - "serde", - "serde_derive", - "serde_json", - "vm", - "wasm", -] - [[package]] name = "pwasm-utils" version = "0.6.2" @@ -3961,17 +3902,6 @@ version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" -[[package]] -name = "registrar" -version = "0.0.1" -dependencies = [ - "ethabi", - "ethabi-contract", - "ethabi-derive", - "futures", - "keccak-hash", -] - [[package]] name = "relay" version = "0.1.1" diff --git a/Cargo.toml b/Cargo.toml index c2eccb979..aed85d039 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ authors = [ ] [dependencies] -blooms-db = { path = "util/blooms-db" } +blooms-db = { path = "crates/db/blooms-db" } log = "0.4" rustc-hex = "1.0" docopt = "1.0" @@ -35,44 +35,42 @@ fdlimit = "0.1" ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } jsonrpc-core = "15.0.0" parity-bytes = "0.1" -common-types = { path = "ethcore/types" } -ethcore = { path = "ethcore", features = ["parity"] } -ethcore-accounts = { path = "accounts", optional = true } -ethcore-blockchain = { path = "ethcore/blockchain" } -ethcore-call-contract = { path = "ethcore/call-contract"} -ethcore-db = { path = "ethcore/db" } -ethcore-io = { path = "util/io" } -ethcore-logger = { path = "parity/logger" } -ethcore-miner = { path = "miner" } -ethcore-network = { path = "util/network" } -ethcore-service = { path = "ethcore/service" } -ethcore-sync = { path = "ethcore/sync" } +common-types = { path = "crates/ethcore/types" } +ethcore = { path = "crates/ethcore", features = ["parity"] } +ethcore-accounts = { path = "crates/accounts", optional = true } +ethcore-blockchain = { path = "crates/ethcore/blockchain" } +ethcore-call-contract = { path = "crates/vm/call-contract"} +ethcore-db = { path = "crates/db/db" } +ethcore-io = { path = "crates/runtime/io" } +ethcore-logger = { path = "bin/oe/logger" } +ethcore-miner = { path = "crates/concensus/miner" } +ethcore-network = { path = "crates/net/network" } +ethcore-service = { path = "crates/ethcore/service" } +ethcore-sync = { path = "crates/ethcore/sync" } ethereum-types = "0.4" -ethkey = { path = "accounts/ethkey" } -ethstore = { path = "accounts/ethstore" } -fetch = { path = "util/fetch" } -node-filter = { path = "ethcore/node-filter" } +ethkey = { path = "crates/accounts/ethkey" } +ethstore = { path = "crates/accounts/ethstore" } +fetch = { path = "crates/net/fetch" } +node-filter = { path = "crates/net/node-filter" } rlp = { version = "0.3.0", features = ["ethereum"] } -cli-signer= { path = "cli-signer" } +cli-signer= { path = "crates/util/cli-signer" } parity-daemonize = "0.3" -parity-local-store = { path = "miner/local-store" } -parity-runtime = { path = "util/runtime" } -parity-rpc = { path = "rpc" } -parity-version = { path = "util/version" } +parity-local-store = { path = "crates/concensus/miner/local-store" } +parity-runtime = { path = "crates/runtime/runtime" } +parity-rpc = { path = "crates/rpc" } +parity-version = { path = "crates/util/version" } parity-path = "0.1" -dir = { path = "util/dir" } -panic_hook = { path = "util/panic-hook" } +dir = { path = "crates/util/dir" } +panic_hook = { path = "crates/util/panic-hook" } keccak-hash = "0.1" -migration-rocksdb = { path = "util/migration-rocksdb" } +migration-rocksdb = { path = "crates/db/migration-rocksdb" } kvdb = "0.1" kvdb-rocksdb = "0.1.3" -journaldb = { path = "util/journaldb" } -stats = { path = "util/stats" } +journaldb = { path = "crates/db/journaldb" } +stats = { path = "crates/util/stats" } prometheus = "0.9.0" -ethcore-secretstore = { path = "secret-store", optional = true } - -registrar = { path = "util/registrar" } +# ethcore-secretstore = { path = "crates/util/secret-store", optional = true } [build-dependencies] rustc_version = "0.2" @@ -81,7 +79,7 @@ rustc_version = "0.2" pretty_assertions = "0.1" ipnetwork = "0.12.6" tempdir = "0.3" -fake-fetch = { path = "util/fake-fetch" } +fake-fetch = { path = "crates/net/fake-fetch" } lazy_static = "1.2.0" [target.'cfg(windows)'.dependencies] @@ -97,7 +95,6 @@ test-heavy = ["ethcore/test-heavy"] evm-debug = ["ethcore/evm-debug"] evm-debug-tests = ["ethcore/evm-debug-tests"] slow-blocks = ["ethcore/slow-blocks"] -secretstore = ["ethcore-secretstore", "ethcore-secretstore/accounts"] final = ["parity-version/final"] deadlock_detection = ["parking_lot/deadlock_detection"] # to create a memory profile (requires nightly rust), use e.g. @@ -109,10 +106,10 @@ deadlock_detection = ["parking_lot/deadlock_detection"] memory_profiling = [] [lib] -path = "parity/lib.rs" +path = "bin/oe/lib.rs" [[bin]] -path = "parity/main.rs" +path = "bin/oe/main.rs" name = "openethereum" [profile.test] @@ -128,16 +125,10 @@ lto = true # in the dependency tree in any other way # (i.e. pretty much only standalone CLI tools) members = [ - "accounts/ethkey/cli", - "accounts/ethstore/cli", - "chainspec", - "ethcore/wasm/run", - "evmbin", - "util/triehash-ethereum", - "util/keccak-hasher", - "util/patricia-trie-ethereum", - "util/fastmap", - "util/time-utils" + "bin/ethkey", + "bin/ethstore", + "bin/evmbin", + "bin/chainspec" ] [patch.crates-io] diff --git a/chainspec/Cargo.toml b/bin/chainspec/Cargo.toml similarity index 80% rename from chainspec/Cargo.toml rename to bin/chainspec/Cargo.toml index c0308edd3..f990a81dd 100644 --- a/chainspec/Cargo.toml +++ b/bin/chainspec/Cargo.toml @@ -5,5 +5,5 @@ version = "0.1.0" authors = ["Marek Kotewicz "] [dependencies] -ethjson = { path = "../json" } +ethjson = { path = "../../crates/ethjson" } serde_json = "1.0" diff --git a/chainspec/src/main.rs b/bin/chainspec/src/main.rs similarity index 100% rename from chainspec/src/main.rs rename to bin/chainspec/src/main.rs diff --git a/accounts/ethkey/cli/Cargo.toml b/bin/ethkey/Cargo.toml similarity index 76% rename from accounts/ethkey/cli/Cargo.toml rename to bin/ethkey/Cargo.toml index c1d44897c..e649202e0 100644 --- a/accounts/ethkey/cli/Cargo.toml +++ b/bin/ethkey/Cargo.toml @@ -7,8 +7,8 @@ authors = ["Parity Technologies "] [dependencies] docopt = "1.0" env_logger = "0.5" -ethkey = { path = "../" } -panic_hook = { path = "../../../util/panic-hook" } +ethkey = { path = "../../crates/accounts/ethkey" } +panic_hook = { path = "../../crates/util/panic-hook" } parity-wordlist="1.3" rustc-hex = "1.0" serde = "1.0" diff --git a/accounts/ethkey/cli/src/main.rs b/bin/ethkey/src/main.rs similarity index 100% rename from accounts/ethkey/cli/src/main.rs rename to bin/ethkey/src/main.rs diff --git a/accounts/ethstore/cli/Cargo.toml b/bin/ethstore/Cargo.toml similarity index 72% rename from accounts/ethstore/cli/Cargo.toml rename to bin/ethstore/Cargo.toml index 9578a7537..6ad76cb0a 100644 --- a/accounts/ethstore/cli/Cargo.toml +++ b/bin/ethstore/Cargo.toml @@ -12,9 +12,9 @@ rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" parking_lot = "0.7" -ethstore = { path = "../" } -dir = { path = '../../../util/dir' } -panic_hook = { path = "../../../util/panic-hook" } +ethstore = { path = "../../crates/accounts/ethstore" } +dir = { path = '../../crates/util/dir' } +panic_hook = { path = "../../crates/util/panic-hook" } [[bin]] name = "ethstore" diff --git a/accounts/ethstore/cli/src/crack.rs b/bin/ethstore/src/crack.rs similarity index 100% rename from accounts/ethstore/cli/src/crack.rs rename to bin/ethstore/src/crack.rs diff --git a/accounts/ethstore/cli/src/main.rs b/bin/ethstore/src/main.rs similarity index 100% rename from accounts/ethstore/cli/src/main.rs rename to bin/ethstore/src/main.rs diff --git a/evmbin/Cargo.toml b/bin/evmbin/Cargo.toml similarity index 57% rename from evmbin/Cargo.toml rename to bin/evmbin/Cargo.toml index 18d0b3e8e..1e0ac33ad 100644 --- a/evmbin/Cargo.toml +++ b/bin/evmbin/Cargo.toml @@ -9,20 +9,20 @@ name = "openethereum-evm" path = "./src/main.rs" [dependencies] -common-types = { path = "../ethcore/types", features = ["test-helpers"] } +common-types = { path = "../../crates/ethcore/types", features = ["test-helpers"] } docopt = "1.0" env_logger = "0.5" -ethcore = { path = "../ethcore", features = ["test-helpers", "json-tests", "to-pod-full"] } +ethcore = { path = "../../crates/ethcore", features = ["test-helpers", "json-tests", "to-pod-full"] } ethereum-types = "0.4" -ethjson = { path = "../json" } -evm = { path = "../ethcore/evm" } -panic_hook = { path = "../util/panic-hook" } +ethjson = { path = "../../crates/ethjson" } +evm = { path = "../../crates/vm/evm" } +panic_hook = { path = "../../crates/util/panic-hook" } parity-bytes = "0.1" rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" -vm = { path = "../ethcore/vm" } +vm = { path = "../../crates/vm/vm" } [dev-dependencies] criterion = "0.3.0" diff --git a/evmbin/README.md b/bin/evmbin/README.md similarity index 100% rename from evmbin/README.md rename to bin/evmbin/README.md diff --git a/evmbin/benches/mod.rs b/bin/evmbin/benches/mod.rs similarity index 100% rename from evmbin/benches/mod.rs rename to bin/evmbin/benches/mod.rs diff --git a/evmbin/res/testchain.json b/bin/evmbin/res/testchain.json similarity index 100% rename from evmbin/res/testchain.json rename to bin/evmbin/res/testchain.json diff --git a/evmbin/src/display/config.rs b/bin/evmbin/src/display/config.rs similarity index 100% rename from evmbin/src/display/config.rs rename to bin/evmbin/src/display/config.rs diff --git a/evmbin/src/display/json.rs b/bin/evmbin/src/display/json.rs similarity index 100% rename from evmbin/src/display/json.rs rename to bin/evmbin/src/display/json.rs diff --git a/evmbin/src/display/mod.rs b/bin/evmbin/src/display/mod.rs similarity index 100% rename from evmbin/src/display/mod.rs rename to bin/evmbin/src/display/mod.rs diff --git a/evmbin/src/display/simple.rs b/bin/evmbin/src/display/simple.rs similarity index 100% rename from evmbin/src/display/simple.rs rename to bin/evmbin/src/display/simple.rs diff --git a/evmbin/src/display/std_json.rs b/bin/evmbin/src/display/std_json.rs similarity index 100% rename from evmbin/src/display/std_json.rs rename to bin/evmbin/src/display/std_json.rs diff --git a/evmbin/src/info.rs b/bin/evmbin/src/info.rs similarity index 100% rename from evmbin/src/info.rs rename to bin/evmbin/src/info.rs diff --git a/evmbin/src/main.rs b/bin/evmbin/src/main.rs similarity index 100% rename from evmbin/src/main.rs rename to bin/evmbin/src/main.rs diff --git a/parity/account.rs b/bin/oe/account.rs similarity index 100% rename from parity/account.rs rename to bin/oe/account.rs diff --git a/parity/account_utils.rs b/bin/oe/account_utils.rs similarity index 100% rename from parity/account_utils.rs rename to bin/oe/account_utils.rs diff --git a/parity/blockchain.rs b/bin/oe/blockchain.rs similarity index 100% rename from parity/blockchain.rs rename to bin/oe/blockchain.rs diff --git a/parity/cache.rs b/bin/oe/cache.rs similarity index 100% rename from parity/cache.rs rename to bin/oe/cache.rs diff --git a/parity/cli/mod.rs b/bin/oe/cli/mod.rs similarity index 100% rename from parity/cli/mod.rs rename to bin/oe/cli/mod.rs diff --git a/parity/cli/presets/config.dev-insecure.toml b/bin/oe/cli/presets/config.dev-insecure.toml similarity index 100% rename from parity/cli/presets/config.dev-insecure.toml rename to bin/oe/cli/presets/config.dev-insecure.toml diff --git a/parity/cli/presets/config.dev.toml b/bin/oe/cli/presets/config.dev.toml similarity index 100% rename from parity/cli/presets/config.dev.toml rename to bin/oe/cli/presets/config.dev.toml diff --git a/parity/cli/presets/config.insecure.toml b/bin/oe/cli/presets/config.insecure.toml similarity index 100% rename from parity/cli/presets/config.insecure.toml rename to bin/oe/cli/presets/config.insecure.toml diff --git a/parity/cli/presets/config.mining.toml b/bin/oe/cli/presets/config.mining.toml similarity index 100% rename from parity/cli/presets/config.mining.toml rename to bin/oe/cli/presets/config.mining.toml diff --git a/parity/cli/presets/config.non-standard-ports.toml b/bin/oe/cli/presets/config.non-standard-ports.toml similarity index 100% rename from parity/cli/presets/config.non-standard-ports.toml rename to bin/oe/cli/presets/config.non-standard-ports.toml diff --git a/parity/cli/presets/mod.rs b/bin/oe/cli/presets/mod.rs similarity index 100% rename from parity/cli/presets/mod.rs rename to bin/oe/cli/presets/mod.rs diff --git a/parity/cli/tests/config.full.toml b/bin/oe/cli/tests/config.full.toml similarity index 100% rename from parity/cli/tests/config.full.toml rename to bin/oe/cli/tests/config.full.toml diff --git a/parity/cli/tests/config.invalid1.toml b/bin/oe/cli/tests/config.invalid1.toml similarity index 100% rename from parity/cli/tests/config.invalid1.toml rename to bin/oe/cli/tests/config.invalid1.toml diff --git a/parity/cli/tests/config.invalid2.toml b/bin/oe/cli/tests/config.invalid2.toml similarity index 100% rename from parity/cli/tests/config.invalid2.toml rename to bin/oe/cli/tests/config.invalid2.toml diff --git a/parity/cli/tests/config.invalid3.toml b/bin/oe/cli/tests/config.invalid3.toml similarity index 100% rename from parity/cli/tests/config.invalid3.toml rename to bin/oe/cli/tests/config.invalid3.toml diff --git a/parity/cli/tests/config.invalid4.toml b/bin/oe/cli/tests/config.invalid4.toml similarity index 100% rename from parity/cli/tests/config.invalid4.toml rename to bin/oe/cli/tests/config.invalid4.toml diff --git a/parity/cli/tests/config.toml b/bin/oe/cli/tests/config.toml similarity index 100% rename from parity/cli/tests/config.toml rename to bin/oe/cli/tests/config.toml diff --git a/parity/cli/usage.rs b/bin/oe/cli/usage.rs similarity index 100% rename from parity/cli/usage.rs rename to bin/oe/cli/usage.rs diff --git a/parity/cli/usage_header.txt b/bin/oe/cli/usage_header.txt similarity index 100% rename from parity/cli/usage_header.txt rename to bin/oe/cli/usage_header.txt diff --git a/parity/cli/version.txt b/bin/oe/cli/version.txt similarity index 100% rename from parity/cli/version.txt rename to bin/oe/cli/version.txt diff --git a/parity/configuration.rs b/bin/oe/configuration.rs similarity index 100% rename from parity/configuration.rs rename to bin/oe/configuration.rs diff --git a/parity/db/mod.rs b/bin/oe/db/mod.rs similarity index 100% rename from parity/db/mod.rs rename to bin/oe/db/mod.rs diff --git a/parity/db/rocksdb/blooms.rs b/bin/oe/db/rocksdb/blooms.rs similarity index 100% rename from parity/db/rocksdb/blooms.rs rename to bin/oe/db/rocksdb/blooms.rs diff --git a/parity/db/rocksdb/helpers.rs b/bin/oe/db/rocksdb/helpers.rs similarity index 100% rename from parity/db/rocksdb/helpers.rs rename to bin/oe/db/rocksdb/helpers.rs diff --git a/parity/db/rocksdb/migration.rs b/bin/oe/db/rocksdb/migration.rs similarity index 100% rename from parity/db/rocksdb/migration.rs rename to bin/oe/db/rocksdb/migration.rs diff --git a/parity/db/rocksdb/mod.rs b/bin/oe/db/rocksdb/mod.rs similarity index 100% rename from parity/db/rocksdb/mod.rs rename to bin/oe/db/rocksdb/mod.rs diff --git a/parity/helpers.rs b/bin/oe/helpers.rs similarity index 100% rename from parity/helpers.rs rename to bin/oe/helpers.rs diff --git a/parity/informant.rs b/bin/oe/informant.rs similarity index 100% rename from parity/informant.rs rename to bin/oe/informant.rs diff --git a/parity/lib.rs b/bin/oe/lib.rs similarity index 99% rename from parity/lib.rs rename to bin/oe/lib.rs index 3f811826d..09d8ae5d0 100644 --- a/parity/lib.rs +++ b/bin/oe/lib.rs @@ -67,7 +67,6 @@ extern crate parity_rpc; extern crate parity_runtime; extern crate parity_version; extern crate prometheus; -extern crate registrar; extern crate stats; #[macro_use] diff --git a/parity/logger/Cargo.toml b/bin/oe/logger/Cargo.toml similarity index 100% rename from parity/logger/Cargo.toml rename to bin/oe/logger/Cargo.toml diff --git a/parity/logger/src/lib.rs b/bin/oe/logger/src/lib.rs similarity index 100% rename from parity/logger/src/lib.rs rename to bin/oe/logger/src/lib.rs diff --git a/parity/logger/src/rotating.rs b/bin/oe/logger/src/rotating.rs similarity index 100% rename from parity/logger/src/rotating.rs rename to bin/oe/logger/src/rotating.rs diff --git a/parity/main.rs b/bin/oe/main.rs similarity index 100% rename from parity/main.rs rename to bin/oe/main.rs diff --git a/parity/metrics.rs b/bin/oe/metrics.rs similarity index 100% rename from parity/metrics.rs rename to bin/oe/metrics.rs diff --git a/parity/modules.rs b/bin/oe/modules.rs similarity index 100% rename from parity/modules.rs rename to bin/oe/modules.rs diff --git a/parity/params.rs b/bin/oe/params.rs similarity index 100% rename from parity/params.rs rename to bin/oe/params.rs diff --git a/parity/presale.rs b/bin/oe/presale.rs similarity index 100% rename from parity/presale.rs rename to bin/oe/presale.rs diff --git a/parity/rpc.rs b/bin/oe/rpc.rs similarity index 100% rename from parity/rpc.rs rename to bin/oe/rpc.rs diff --git a/parity/rpc_apis.rs b/bin/oe/rpc_apis.rs similarity index 100% rename from parity/rpc_apis.rs rename to bin/oe/rpc_apis.rs diff --git a/parity/run.rs b/bin/oe/run.rs similarity index 100% rename from parity/run.rs rename to bin/oe/run.rs diff --git a/parity/secretstore.rs b/bin/oe/secretstore.rs similarity index 100% rename from parity/secretstore.rs rename to bin/oe/secretstore.rs diff --git a/parity/signer.rs b/bin/oe/signer.rs similarity index 100% rename from parity/signer.rs rename to bin/oe/signer.rs diff --git a/parity/snapshot.rs b/bin/oe/snapshot.rs similarity index 100% rename from parity/snapshot.rs rename to bin/oe/snapshot.rs diff --git a/parity/stratum.rs b/bin/oe/stratum.rs similarity index 100% rename from parity/stratum.rs rename to bin/oe/stratum.rs diff --git a/parity/upgrade.rs b/bin/oe/upgrade.rs similarity index 100% rename from parity/upgrade.rs rename to bin/oe/upgrade.rs diff --git a/parity/user_defaults.rs b/bin/oe/user_defaults.rs similarity index 100% rename from parity/user_defaults.rs rename to bin/oe/user_defaults.rs diff --git a/accounts/Cargo.toml b/crates/accounts/Cargo.toml similarity index 100% rename from accounts/Cargo.toml rename to crates/accounts/Cargo.toml diff --git a/accounts/ethkey/.gitignore b/crates/accounts/ethkey/.gitignore similarity index 100% rename from accounts/ethkey/.gitignore rename to crates/accounts/ethkey/.gitignore diff --git a/accounts/ethkey/.travis.yml b/crates/accounts/ethkey/.travis.yml similarity index 100% rename from accounts/ethkey/.travis.yml rename to crates/accounts/ethkey/.travis.yml diff --git a/accounts/ethkey/Cargo.toml b/crates/accounts/ethkey/Cargo.toml similarity index 90% rename from accounts/ethkey/Cargo.toml rename to crates/accounts/ethkey/Cargo.toml index e432a1182..6348230d3 100644 --- a/accounts/ethkey/Cargo.toml +++ b/crates/accounts/ethkey/Cargo.toml @@ -11,7 +11,7 @@ eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1", rev = "c ethereum-types = "0.4" lazy_static = "1.0" log = "0.4" -memzero = { path = "../../util/memzero" } +memzero = { path = "../../../crates/util/memzero" } parity-wordlist = "1.3" quick-error = "1.2.2" rand = "0.4" diff --git a/accounts/ethkey/README.md b/crates/accounts/ethkey/README.md similarity index 100% rename from accounts/ethkey/README.md rename to crates/accounts/ethkey/README.md diff --git a/accounts/ethkey/src/brain.rs b/crates/accounts/ethkey/src/brain.rs similarity index 100% rename from accounts/ethkey/src/brain.rs rename to crates/accounts/ethkey/src/brain.rs diff --git a/accounts/ethkey/src/brain_prefix.rs b/crates/accounts/ethkey/src/brain_prefix.rs similarity index 100% rename from accounts/ethkey/src/brain_prefix.rs rename to crates/accounts/ethkey/src/brain_prefix.rs diff --git a/accounts/ethkey/src/brain_recover.rs b/crates/accounts/ethkey/src/brain_recover.rs similarity index 100% rename from accounts/ethkey/src/brain_recover.rs rename to crates/accounts/ethkey/src/brain_recover.rs diff --git a/accounts/ethkey/src/crypto.rs b/crates/accounts/ethkey/src/crypto.rs similarity index 100% rename from accounts/ethkey/src/crypto.rs rename to crates/accounts/ethkey/src/crypto.rs diff --git a/accounts/ethkey/src/error.rs b/crates/accounts/ethkey/src/error.rs similarity index 100% rename from accounts/ethkey/src/error.rs rename to crates/accounts/ethkey/src/error.rs diff --git a/accounts/ethkey/src/extended.rs b/crates/accounts/ethkey/src/extended.rs similarity index 100% rename from accounts/ethkey/src/extended.rs rename to crates/accounts/ethkey/src/extended.rs diff --git a/accounts/ethkey/src/keccak.rs b/crates/accounts/ethkey/src/keccak.rs similarity index 100% rename from accounts/ethkey/src/keccak.rs rename to crates/accounts/ethkey/src/keccak.rs diff --git a/accounts/ethkey/src/keypair.rs b/crates/accounts/ethkey/src/keypair.rs similarity index 100% rename from accounts/ethkey/src/keypair.rs rename to crates/accounts/ethkey/src/keypair.rs diff --git a/accounts/ethkey/src/lib.rs b/crates/accounts/ethkey/src/lib.rs similarity index 100% rename from accounts/ethkey/src/lib.rs rename to crates/accounts/ethkey/src/lib.rs diff --git a/accounts/ethkey/src/math.rs b/crates/accounts/ethkey/src/math.rs similarity index 100% rename from accounts/ethkey/src/math.rs rename to crates/accounts/ethkey/src/math.rs diff --git a/accounts/ethkey/src/password.rs b/crates/accounts/ethkey/src/password.rs similarity index 100% rename from accounts/ethkey/src/password.rs rename to crates/accounts/ethkey/src/password.rs diff --git a/accounts/ethkey/src/prefix.rs b/crates/accounts/ethkey/src/prefix.rs similarity index 100% rename from accounts/ethkey/src/prefix.rs rename to crates/accounts/ethkey/src/prefix.rs diff --git a/accounts/ethkey/src/random.rs b/crates/accounts/ethkey/src/random.rs similarity index 100% rename from accounts/ethkey/src/random.rs rename to crates/accounts/ethkey/src/random.rs diff --git a/accounts/ethkey/src/secret.rs b/crates/accounts/ethkey/src/secret.rs similarity index 100% rename from accounts/ethkey/src/secret.rs rename to crates/accounts/ethkey/src/secret.rs diff --git a/accounts/ethkey/src/signature.rs b/crates/accounts/ethkey/src/signature.rs similarity index 100% rename from accounts/ethkey/src/signature.rs rename to crates/accounts/ethkey/src/signature.rs diff --git a/accounts/ethstore/.editorconfig b/crates/accounts/ethstore/.editorconfig similarity index 100% rename from accounts/ethstore/.editorconfig rename to crates/accounts/ethstore/.editorconfig diff --git a/accounts/ethstore/.gitignore b/crates/accounts/ethstore/.gitignore similarity index 100% rename from accounts/ethstore/.gitignore rename to crates/accounts/ethstore/.gitignore diff --git a/accounts/ethstore/.travis.yml b/crates/accounts/ethstore/.travis.yml similarity index 100% rename from accounts/ethstore/.travis.yml rename to crates/accounts/ethstore/.travis.yml diff --git a/accounts/ethstore/Cargo.toml b/crates/accounts/ethstore/Cargo.toml similarity index 100% rename from accounts/ethstore/Cargo.toml rename to crates/accounts/ethstore/Cargo.toml diff --git a/accounts/ethstore/README.md b/crates/accounts/ethstore/README.md similarity index 100% rename from accounts/ethstore/README.md rename to crates/accounts/ethstore/README.md diff --git a/accounts/ethstore/src/account/cipher.rs b/crates/accounts/ethstore/src/account/cipher.rs similarity index 100% rename from accounts/ethstore/src/account/cipher.rs rename to crates/accounts/ethstore/src/account/cipher.rs diff --git a/accounts/ethstore/src/account/crypto.rs b/crates/accounts/ethstore/src/account/crypto.rs similarity index 100% rename from accounts/ethstore/src/account/crypto.rs rename to crates/accounts/ethstore/src/account/crypto.rs diff --git a/accounts/ethstore/src/account/kdf.rs b/crates/accounts/ethstore/src/account/kdf.rs similarity index 100% rename from accounts/ethstore/src/account/kdf.rs rename to crates/accounts/ethstore/src/account/kdf.rs diff --git a/accounts/ethstore/src/account/mod.rs b/crates/accounts/ethstore/src/account/mod.rs similarity index 100% rename from accounts/ethstore/src/account/mod.rs rename to crates/accounts/ethstore/src/account/mod.rs diff --git a/accounts/ethstore/src/account/safe_account.rs b/crates/accounts/ethstore/src/account/safe_account.rs similarity index 100% rename from accounts/ethstore/src/account/safe_account.rs rename to crates/accounts/ethstore/src/account/safe_account.rs diff --git a/accounts/ethstore/src/account/version.rs b/crates/accounts/ethstore/src/account/version.rs similarity index 100% rename from accounts/ethstore/src/account/version.rs rename to crates/accounts/ethstore/src/account/version.rs diff --git a/accounts/ethstore/src/accounts_dir/disk.rs b/crates/accounts/ethstore/src/accounts_dir/disk.rs similarity index 100% rename from accounts/ethstore/src/accounts_dir/disk.rs rename to crates/accounts/ethstore/src/accounts_dir/disk.rs diff --git a/accounts/ethstore/src/accounts_dir/memory.rs b/crates/accounts/ethstore/src/accounts_dir/memory.rs similarity index 100% rename from accounts/ethstore/src/accounts_dir/memory.rs rename to crates/accounts/ethstore/src/accounts_dir/memory.rs diff --git a/accounts/ethstore/src/accounts_dir/mod.rs b/crates/accounts/ethstore/src/accounts_dir/mod.rs similarity index 100% rename from accounts/ethstore/src/accounts_dir/mod.rs rename to crates/accounts/ethstore/src/accounts_dir/mod.rs diff --git a/accounts/ethstore/src/accounts_dir/vault.rs b/crates/accounts/ethstore/src/accounts_dir/vault.rs similarity index 100% rename from accounts/ethstore/src/accounts_dir/vault.rs rename to crates/accounts/ethstore/src/accounts_dir/vault.rs diff --git a/accounts/ethstore/src/error.rs b/crates/accounts/ethstore/src/error.rs similarity index 100% rename from accounts/ethstore/src/error.rs rename to crates/accounts/ethstore/src/error.rs diff --git a/accounts/ethstore/src/ethkey.rs b/crates/accounts/ethstore/src/ethkey.rs similarity index 100% rename from accounts/ethstore/src/ethkey.rs rename to crates/accounts/ethstore/src/ethkey.rs diff --git a/accounts/ethstore/src/ethstore.rs b/crates/accounts/ethstore/src/ethstore.rs similarity index 100% rename from accounts/ethstore/src/ethstore.rs rename to crates/accounts/ethstore/src/ethstore.rs diff --git a/accounts/ethstore/src/import.rs b/crates/accounts/ethstore/src/import.rs similarity index 100% rename from accounts/ethstore/src/import.rs rename to crates/accounts/ethstore/src/import.rs diff --git a/accounts/ethstore/src/json/bytes.rs b/crates/accounts/ethstore/src/json/bytes.rs similarity index 100% rename from accounts/ethstore/src/json/bytes.rs rename to crates/accounts/ethstore/src/json/bytes.rs diff --git a/accounts/ethstore/src/json/cipher.rs b/crates/accounts/ethstore/src/json/cipher.rs similarity index 100% rename from accounts/ethstore/src/json/cipher.rs rename to crates/accounts/ethstore/src/json/cipher.rs diff --git a/accounts/ethstore/src/json/crypto.rs b/crates/accounts/ethstore/src/json/crypto.rs similarity index 100% rename from accounts/ethstore/src/json/crypto.rs rename to crates/accounts/ethstore/src/json/crypto.rs diff --git a/accounts/ethstore/src/json/error.rs b/crates/accounts/ethstore/src/json/error.rs similarity index 100% rename from accounts/ethstore/src/json/error.rs rename to crates/accounts/ethstore/src/json/error.rs diff --git a/accounts/ethstore/src/json/hash.rs b/crates/accounts/ethstore/src/json/hash.rs similarity index 100% rename from accounts/ethstore/src/json/hash.rs rename to crates/accounts/ethstore/src/json/hash.rs diff --git a/accounts/ethstore/src/json/id.rs b/crates/accounts/ethstore/src/json/id.rs similarity index 100% rename from accounts/ethstore/src/json/id.rs rename to crates/accounts/ethstore/src/json/id.rs diff --git a/accounts/ethstore/src/json/kdf.rs b/crates/accounts/ethstore/src/json/kdf.rs similarity index 100% rename from accounts/ethstore/src/json/kdf.rs rename to crates/accounts/ethstore/src/json/kdf.rs diff --git a/accounts/ethstore/src/json/key_file.rs b/crates/accounts/ethstore/src/json/key_file.rs similarity index 100% rename from accounts/ethstore/src/json/key_file.rs rename to crates/accounts/ethstore/src/json/key_file.rs diff --git a/accounts/ethstore/src/json/mod.rs b/crates/accounts/ethstore/src/json/mod.rs similarity index 100% rename from accounts/ethstore/src/json/mod.rs rename to crates/accounts/ethstore/src/json/mod.rs diff --git a/accounts/ethstore/src/json/presale.rs b/crates/accounts/ethstore/src/json/presale.rs similarity index 100% rename from accounts/ethstore/src/json/presale.rs rename to crates/accounts/ethstore/src/json/presale.rs diff --git a/accounts/ethstore/src/json/vault_file.rs b/crates/accounts/ethstore/src/json/vault_file.rs similarity index 100% rename from accounts/ethstore/src/json/vault_file.rs rename to crates/accounts/ethstore/src/json/vault_file.rs diff --git a/accounts/ethstore/src/json/vault_key_file.rs b/crates/accounts/ethstore/src/json/vault_key_file.rs similarity index 100% rename from accounts/ethstore/src/json/vault_key_file.rs rename to crates/accounts/ethstore/src/json/vault_key_file.rs diff --git a/accounts/ethstore/src/json/version.rs b/crates/accounts/ethstore/src/json/version.rs similarity index 100% rename from accounts/ethstore/src/json/version.rs rename to crates/accounts/ethstore/src/json/version.rs diff --git a/accounts/ethstore/src/lib.rs b/crates/accounts/ethstore/src/lib.rs similarity index 100% rename from accounts/ethstore/src/lib.rs rename to crates/accounts/ethstore/src/lib.rs diff --git a/accounts/ethstore/src/presale.rs b/crates/accounts/ethstore/src/presale.rs similarity index 100% rename from accounts/ethstore/src/presale.rs rename to crates/accounts/ethstore/src/presale.rs diff --git a/accounts/ethstore/src/random.rs b/crates/accounts/ethstore/src/random.rs similarity index 100% rename from accounts/ethstore/src/random.rs rename to crates/accounts/ethstore/src/random.rs diff --git a/accounts/ethstore/src/secret_store.rs b/crates/accounts/ethstore/src/secret_store.rs similarity index 100% rename from accounts/ethstore/src/secret_store.rs rename to crates/accounts/ethstore/src/secret_store.rs diff --git a/accounts/ethstore/tests/api.rs b/crates/accounts/ethstore/tests/api.rs similarity index 100% rename from accounts/ethstore/tests/api.rs rename to crates/accounts/ethstore/tests/api.rs diff --git a/accounts/ethstore/tests/res/ciphertext/30.json b/crates/accounts/ethstore/tests/res/ciphertext/30.json similarity index 100% rename from accounts/ethstore/tests/res/ciphertext/30.json rename to crates/accounts/ethstore/tests/res/ciphertext/30.json diff --git a/accounts/ethstore/tests/res/ciphertext/31.json b/crates/accounts/ethstore/tests/res/ciphertext/31.json similarity index 100% rename from accounts/ethstore/tests/res/ciphertext/31.json rename to crates/accounts/ethstore/tests/res/ciphertext/31.json diff --git a/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9 b/crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9 similarity index 100% rename from accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9 rename to crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-17T09-20-45.721400158Z--3f49624084b67849c7b4e805c5988c21a430f9d9 diff --git a/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-20T09-33-03.984382741Z--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf b/crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-20T09-33-03.984382741Z--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf similarity index 100% rename from accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-20T09-33-03.984382741Z--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf rename to crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-02-20T09-33-03.984382741Z--5ba4dcf897e97c2bdf8315b9ef26c13c085988cf diff --git a/accounts/ethstore/tests/res/geth_keystore/UTC--2016-04-03T08-58-49.834202900Z--63121b431a52f8043c16fcf0d1df9cb7b5f66649 b/crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-04-03T08-58-49.834202900Z--63121b431a52f8043c16fcf0d1df9cb7b5f66649 similarity index 100% rename from accounts/ethstore/tests/res/geth_keystore/UTC--2016-04-03T08-58-49.834202900Z--63121b431a52f8043c16fcf0d1df9cb7b5f66649 rename to crates/accounts/ethstore/tests/res/geth_keystore/UTC--2016-04-03T08-58-49.834202900Z--63121b431a52f8043c16fcf0d1df9cb7b5f66649 diff --git a/accounts/ethstore/tests/res/pat/p1.json b/crates/accounts/ethstore/tests/res/pat/p1.json similarity index 100% rename from accounts/ethstore/tests/res/pat/p1.json rename to crates/accounts/ethstore/tests/res/pat/p1.json diff --git a/accounts/ethstore/tests/res/pat/p2.json b/crates/accounts/ethstore/tests/res/pat/p2.json similarity index 100% rename from accounts/ethstore/tests/res/pat/p2.json rename to crates/accounts/ethstore/tests/res/pat/p2.json diff --git a/accounts/ethstore/tests/util/mod.rs b/crates/accounts/ethstore/tests/util/mod.rs similarity index 100% rename from accounts/ethstore/tests/util/mod.rs rename to crates/accounts/ethstore/tests/util/mod.rs diff --git a/accounts/ethstore/tests/util/transient_dir.rs b/crates/accounts/ethstore/tests/util/transient_dir.rs similarity index 100% rename from accounts/ethstore/tests/util/transient_dir.rs rename to crates/accounts/ethstore/tests/util/transient_dir.rs diff --git a/accounts/src/account_data.rs b/crates/accounts/src/account_data.rs similarity index 100% rename from accounts/src/account_data.rs rename to crates/accounts/src/account_data.rs diff --git a/accounts/src/error.rs b/crates/accounts/src/error.rs similarity index 100% rename from accounts/src/error.rs rename to crates/accounts/src/error.rs diff --git a/accounts/src/lib.rs b/crates/accounts/src/lib.rs similarity index 100% rename from accounts/src/lib.rs rename to crates/accounts/src/lib.rs diff --git a/accounts/src/stores.rs b/crates/accounts/src/stores.rs similarity index 100% rename from accounts/src/stores.rs rename to crates/accounts/src/stores.rs diff --git a/ethash/Cargo.toml b/crates/concensus/ethash/Cargo.toml similarity index 100% rename from ethash/Cargo.toml rename to crates/concensus/ethash/Cargo.toml diff --git a/ethash/benches/basic.rs b/crates/concensus/ethash/benches/basic.rs similarity index 100% rename from ethash/benches/basic.rs rename to crates/concensus/ethash/benches/basic.rs diff --git a/ethash/benches/progpow.rs b/crates/concensus/ethash/benches/progpow.rs similarity index 100% rename from ethash/benches/progpow.rs rename to crates/concensus/ethash/benches/progpow.rs diff --git a/ethash/res/progpow_testvectors.json b/crates/concensus/ethash/res/progpow_testvectors.json similarity index 100% rename from ethash/res/progpow_testvectors.json rename to crates/concensus/ethash/res/progpow_testvectors.json diff --git a/ethash/src/cache.rs b/crates/concensus/ethash/src/cache.rs similarity index 100% rename from ethash/src/cache.rs rename to crates/concensus/ethash/src/cache.rs diff --git a/ethash/src/compute.rs b/crates/concensus/ethash/src/compute.rs similarity index 100% rename from ethash/src/compute.rs rename to crates/concensus/ethash/src/compute.rs diff --git a/ethash/src/keccak.rs b/crates/concensus/ethash/src/keccak.rs similarity index 100% rename from ethash/src/keccak.rs rename to crates/concensus/ethash/src/keccak.rs diff --git a/ethash/src/lib.rs b/crates/concensus/ethash/src/lib.rs similarity index 100% rename from ethash/src/lib.rs rename to crates/concensus/ethash/src/lib.rs diff --git a/ethash/src/progpow.rs b/crates/concensus/ethash/src/progpow.rs similarity index 100% rename from ethash/src/progpow.rs rename to crates/concensus/ethash/src/progpow.rs diff --git a/ethash/src/seed_compute.rs b/crates/concensus/ethash/src/seed_compute.rs similarity index 100% rename from ethash/src/seed_compute.rs rename to crates/concensus/ethash/src/seed_compute.rs diff --git a/ethash/src/shared.rs b/crates/concensus/ethash/src/shared.rs similarity index 100% rename from ethash/src/shared.rs rename to crates/concensus/ethash/src/shared.rs diff --git a/miner/Cargo.toml b/crates/concensus/miner/Cargo.toml similarity index 73% rename from miner/Cargo.toml rename to crates/concensus/miner/Cargo.toml index 22e2e7dd8..d7bb02b56 100644 --- a/miner/Cargo.toml +++ b/crates/concensus/miner/Cargo.toml @@ -8,26 +8,26 @@ authors = ["Parity Technologies "] [dependencies] # Only work_notify, consider a separate crate -ethash = { path = "../ethash", optional = true } -fetch = { path = "../util/fetch", optional = true } +ethash = { path = "../../concensus/ethash", optional = true } +fetch = { path = "../../net/fetch", optional = true } hyper = { version = "0.12", optional = true } url = { version = "2", optional = true } # Miner ansi_term = "0.10" -common-types = { path = "../ethcore/types" } +common-types = { path = "../../ethcore/types" } error-chain = "0.12" ethabi = "6.0" ethabi-derive = "6.0" ethabi-contract = "6.0" -ethcore-call-contract = { path = "../ethcore/call-contract" } +ethcore-call-contract = { path = "../../vm/call-contract" } ethereum-types = "0.4" futures = "0.1" heapsize = "0.4" keccak-hash = "0.1" linked-hash-map = "0.5" log = "0.4" -parity-runtime = { path = "../util/runtime" } +parity-runtime = { path = "../../runtime/runtime" } parking_lot = "0.7" price-info = { path = "./price-info", optional = true } rlp = { version = "0.3.0", features = ["ethereum"] } @@ -36,7 +36,7 @@ transaction-pool = "2.0.1" [dev-dependencies] env_logger = "0.5" -ethkey = { path = "../accounts/ethkey" } +ethkey = { path = "../../accounts/ethkey" } rustc-hex = "1.0" [features] diff --git a/miner/local-store/Cargo.toml b/crates/concensus/miner/local-store/Cargo.toml similarity index 70% rename from miner/local-store/Cargo.toml rename to crates/concensus/miner/local-store/Cargo.toml index d16c64407..db34cd7c7 100644 --- a/miner/local-store/Cargo.toml +++ b/crates/concensus/miner/local-store/Cargo.toml @@ -5,8 +5,8 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -common-types = { path = "../../ethcore/types" } -ethcore-io = { path = "../../util/io" } +common-types = { path = "../../../ethcore/types" } +ethcore-io = { path = "../../../runtime/io" } kvdb = "0.1" log = "0.4" rlp = { version = "0.3.0", features = ["ethereum"] } @@ -15,5 +15,5 @@ serde_derive = "1.0" serde_json = "1.0" [dev-dependencies] -ethkey = { path = "../../accounts/ethkey" } +ethkey = { path = "../../../accounts/ethkey" } kvdb-memorydb = "0.1" diff --git a/miner/local-store/src/lib.rs b/crates/concensus/miner/local-store/src/lib.rs similarity index 100% rename from miner/local-store/src/lib.rs rename to crates/concensus/miner/local-store/src/lib.rs diff --git a/miner/price-info/Cargo.toml b/crates/concensus/miner/price-info/Cargo.toml similarity index 68% rename from miner/price-info/Cargo.toml rename to crates/concensus/miner/price-info/Cargo.toml index 0426b01b4..3a884b24d 100644 --- a/miner/price-info/Cargo.toml +++ b/crates/concensus/miner/price-info/Cargo.toml @@ -7,12 +7,12 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -fetch = { path = "../../util/fetch" } +fetch = { path = "../../../net/fetch" } futures = "0.1" -parity-runtime = { path = "../../util/runtime" } +parity-runtime = { path = "../../../runtime/runtime" } log = "0.4" serde_json = "1.0" [dev-dependencies] parking_lot = "0.7" -fake-fetch = { path = "../../util/fake-fetch" } +fake-fetch = { path = "../../../net/fake-fetch" } diff --git a/miner/price-info/src/lib.rs b/crates/concensus/miner/price-info/src/lib.rs similarity index 100% rename from miner/price-info/src/lib.rs rename to crates/concensus/miner/price-info/src/lib.rs diff --git a/miner/res/contracts/service_transaction.json b/crates/concensus/miner/res/contracts/service_transaction.json similarity index 100% rename from miner/res/contracts/service_transaction.json rename to crates/concensus/miner/res/contracts/service_transaction.json diff --git a/miner/src/external.rs b/crates/concensus/miner/src/external.rs similarity index 100% rename from miner/src/external.rs rename to crates/concensus/miner/src/external.rs diff --git a/miner/src/gas_price_calibrator.rs b/crates/concensus/miner/src/gas_price_calibrator.rs similarity index 100% rename from miner/src/gas_price_calibrator.rs rename to crates/concensus/miner/src/gas_price_calibrator.rs diff --git a/miner/src/gas_pricer.rs b/crates/concensus/miner/src/gas_pricer.rs similarity index 100% rename from miner/src/gas_pricer.rs rename to crates/concensus/miner/src/gas_pricer.rs diff --git a/miner/src/lib.rs b/crates/concensus/miner/src/lib.rs similarity index 100% rename from miner/src/lib.rs rename to crates/concensus/miner/src/lib.rs diff --git a/miner/src/local_accounts.rs b/crates/concensus/miner/src/local_accounts.rs similarity index 100% rename from miner/src/local_accounts.rs rename to crates/concensus/miner/src/local_accounts.rs diff --git a/miner/src/pool/client.rs b/crates/concensus/miner/src/pool/client.rs similarity index 100% rename from miner/src/pool/client.rs rename to crates/concensus/miner/src/pool/client.rs diff --git a/miner/src/pool/listener.rs b/crates/concensus/miner/src/pool/listener.rs similarity index 100% rename from miner/src/pool/listener.rs rename to crates/concensus/miner/src/pool/listener.rs diff --git a/miner/src/pool/local_transactions.rs b/crates/concensus/miner/src/pool/local_transactions.rs similarity index 100% rename from miner/src/pool/local_transactions.rs rename to crates/concensus/miner/src/pool/local_transactions.rs diff --git a/miner/src/pool/mod.rs b/crates/concensus/miner/src/pool/mod.rs similarity index 100% rename from miner/src/pool/mod.rs rename to crates/concensus/miner/src/pool/mod.rs diff --git a/miner/src/pool/queue.rs b/crates/concensus/miner/src/pool/queue.rs similarity index 100% rename from miner/src/pool/queue.rs rename to crates/concensus/miner/src/pool/queue.rs diff --git a/miner/src/pool/ready.rs b/crates/concensus/miner/src/pool/ready.rs similarity index 100% rename from miner/src/pool/ready.rs rename to crates/concensus/miner/src/pool/ready.rs diff --git a/miner/src/pool/replace.rs b/crates/concensus/miner/src/pool/replace.rs similarity index 100% rename from miner/src/pool/replace.rs rename to crates/concensus/miner/src/pool/replace.rs diff --git a/miner/src/pool/res/big_transaction.data b/crates/concensus/miner/src/pool/res/big_transaction.data similarity index 100% rename from miner/src/pool/res/big_transaction.data rename to crates/concensus/miner/src/pool/res/big_transaction.data diff --git a/miner/src/pool/scoring.rs b/crates/concensus/miner/src/pool/scoring.rs similarity index 100% rename from miner/src/pool/scoring.rs rename to crates/concensus/miner/src/pool/scoring.rs diff --git a/miner/src/pool/tests/client.rs b/crates/concensus/miner/src/pool/tests/client.rs similarity index 100% rename from miner/src/pool/tests/client.rs rename to crates/concensus/miner/src/pool/tests/client.rs diff --git a/miner/src/pool/tests/mod.rs b/crates/concensus/miner/src/pool/tests/mod.rs similarity index 100% rename from miner/src/pool/tests/mod.rs rename to crates/concensus/miner/src/pool/tests/mod.rs diff --git a/miner/src/pool/tests/tx.rs b/crates/concensus/miner/src/pool/tests/tx.rs similarity index 100% rename from miner/src/pool/tests/tx.rs rename to crates/concensus/miner/src/pool/tests/tx.rs diff --git a/miner/src/pool/verifier.rs b/crates/concensus/miner/src/pool/verifier.rs similarity index 100% rename from miner/src/pool/verifier.rs rename to crates/concensus/miner/src/pool/verifier.rs diff --git a/miner/src/service_transaction_checker.rs b/crates/concensus/miner/src/service_transaction_checker.rs similarity index 100% rename from miner/src/service_transaction_checker.rs rename to crates/concensus/miner/src/service_transaction_checker.rs diff --git a/miner/src/work_notify.rs b/crates/concensus/miner/src/work_notify.rs similarity index 100% rename from miner/src/work_notify.rs rename to crates/concensus/miner/src/work_notify.rs diff --git a/miner/stratum/Cargo.toml b/crates/concensus/miner/stratum/Cargo.toml similarity index 100% rename from miner/stratum/Cargo.toml rename to crates/concensus/miner/stratum/Cargo.toml diff --git a/miner/stratum/src/lib.rs b/crates/concensus/miner/stratum/src/lib.rs similarity index 100% rename from miner/stratum/src/lib.rs rename to crates/concensus/miner/stratum/src/lib.rs diff --git a/miner/stratum/src/traits.rs b/crates/concensus/miner/stratum/src/traits.rs similarity index 100% rename from miner/stratum/src/traits.rs rename to crates/concensus/miner/stratum/src/traits.rs diff --git a/miner/using-queue/Cargo.toml b/crates/concensus/miner/using-queue/Cargo.toml similarity index 100% rename from miner/using-queue/Cargo.toml rename to crates/concensus/miner/using-queue/Cargo.toml diff --git a/miner/using-queue/src/lib.rs b/crates/concensus/miner/using-queue/src/lib.rs similarity index 100% rename from miner/using-queue/src/lib.rs rename to crates/concensus/miner/using-queue/src/lib.rs diff --git a/util/bloom/Cargo.toml b/crates/db/bloom/Cargo.toml similarity index 100% rename from util/bloom/Cargo.toml rename to crates/db/bloom/Cargo.toml diff --git a/util/bloom/src/lib.rs b/crates/db/bloom/src/lib.rs similarity index 100% rename from util/bloom/src/lib.rs rename to crates/db/bloom/src/lib.rs diff --git a/util/blooms-db/Cargo.toml b/crates/db/blooms-db/Cargo.toml similarity index 100% rename from util/blooms-db/Cargo.toml rename to crates/db/blooms-db/Cargo.toml diff --git a/util/blooms-db/benches/blooms.rs b/crates/db/blooms-db/benches/blooms.rs similarity index 100% rename from util/blooms-db/benches/blooms.rs rename to crates/db/blooms-db/benches/blooms.rs diff --git a/util/blooms-db/src/db.rs b/crates/db/blooms-db/src/db.rs similarity index 100% rename from util/blooms-db/src/db.rs rename to crates/db/blooms-db/src/db.rs diff --git a/util/blooms-db/src/file.rs b/crates/db/blooms-db/src/file.rs similarity index 100% rename from util/blooms-db/src/file.rs rename to crates/db/blooms-db/src/file.rs diff --git a/util/blooms-db/src/lib.rs b/crates/db/blooms-db/src/lib.rs similarity index 100% rename from util/blooms-db/src/lib.rs rename to crates/db/blooms-db/src/lib.rs diff --git a/ethcore/db/Cargo.toml b/crates/db/db/Cargo.toml similarity index 90% rename from ethcore/db/Cargo.toml rename to crates/db/db/Cargo.toml index 2c730a758..64f175c24 100644 --- a/ethcore/db/Cargo.toml +++ b/crates/db/db/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -common-types = { path = "../types" } +common-types = { path = "../../ethcore/types" } ethereum-types = "0.4" heapsize = "0.4" kvdb = "0.1" diff --git a/ethcore/db/src/cache_manager.rs b/crates/db/db/src/cache_manager.rs similarity index 100% rename from ethcore/db/src/cache_manager.rs rename to crates/db/db/src/cache_manager.rs diff --git a/ethcore/db/src/db.rs b/crates/db/db/src/db.rs similarity index 100% rename from ethcore/db/src/db.rs rename to crates/db/db/src/db.rs diff --git a/ethcore/db/src/keys.rs b/crates/db/db/src/keys.rs similarity index 100% rename from ethcore/db/src/keys.rs rename to crates/db/db/src/keys.rs diff --git a/ethcore/db/src/lib.rs b/crates/db/db/src/lib.rs similarity index 100% rename from ethcore/db/src/lib.rs rename to crates/db/db/src/lib.rs diff --git a/util/journaldb/Cargo.toml b/crates/db/journaldb/Cargo.toml similarity index 85% rename from util/journaldb/Cargo.toml rename to crates/db/journaldb/Cargo.toml index f3750318e..5ca9853ab 100644 --- a/util/journaldb/Cargo.toml +++ b/crates/db/journaldb/Cargo.toml @@ -10,10 +10,10 @@ parity-bytes = "0.1" ethereum-types = "0.4" hash-db = "0.11.0" heapsize = "0.4" -keccak-hasher = { path = "../keccak-hasher" } +keccak-hasher = { path = "../../util/keccak-hasher" } kvdb = "0.1" log = "0.4" -memory-db = { path = "../../util/memory-db" } +memory-db = { path = "../memory-db" } parking_lot = "0.7" fastmap = { path = "../../util/fastmap" } rlp = { version = "0.3.0", features = ["ethereum"] } diff --git a/util/journaldb/src/archivedb.rs b/crates/db/journaldb/src/archivedb.rs similarity index 100% rename from util/journaldb/src/archivedb.rs rename to crates/db/journaldb/src/archivedb.rs diff --git a/util/journaldb/src/as_hash_db_impls.rs b/crates/db/journaldb/src/as_hash_db_impls.rs similarity index 100% rename from util/journaldb/src/as_hash_db_impls.rs rename to crates/db/journaldb/src/as_hash_db_impls.rs diff --git a/util/journaldb/src/earlymergedb.rs b/crates/db/journaldb/src/earlymergedb.rs similarity index 100% rename from util/journaldb/src/earlymergedb.rs rename to crates/db/journaldb/src/earlymergedb.rs diff --git a/util/journaldb/src/lib.rs b/crates/db/journaldb/src/lib.rs similarity index 100% rename from util/journaldb/src/lib.rs rename to crates/db/journaldb/src/lib.rs diff --git a/util/journaldb/src/overlaydb.rs b/crates/db/journaldb/src/overlaydb.rs similarity index 100% rename from util/journaldb/src/overlaydb.rs rename to crates/db/journaldb/src/overlaydb.rs diff --git a/util/journaldb/src/overlayrecentdb.rs b/crates/db/journaldb/src/overlayrecentdb.rs similarity index 100% rename from util/journaldb/src/overlayrecentdb.rs rename to crates/db/journaldb/src/overlayrecentdb.rs diff --git a/util/journaldb/src/refcounteddb.rs b/crates/db/journaldb/src/refcounteddb.rs similarity index 100% rename from util/journaldb/src/refcounteddb.rs rename to crates/db/journaldb/src/refcounteddb.rs diff --git a/util/journaldb/src/traits.rs b/crates/db/journaldb/src/traits.rs similarity index 100% rename from util/journaldb/src/traits.rs rename to crates/db/journaldb/src/traits.rs diff --git a/util/journaldb/src/util.rs b/crates/db/journaldb/src/util.rs similarity index 100% rename from util/journaldb/src/util.rs rename to crates/db/journaldb/src/util.rs diff --git a/util/memory-db/.cargo_vcs_info.json b/crates/db/memory-db/.cargo_vcs_info.json similarity index 100% rename from util/memory-db/.cargo_vcs_info.json rename to crates/db/memory-db/.cargo_vcs_info.json diff --git a/util/memory-db/Cargo.toml b/crates/db/memory-db/Cargo.toml similarity index 100% rename from util/memory-db/Cargo.toml rename to crates/db/memory-db/Cargo.toml diff --git a/util/memory-db/Cargo.toml.orig b/crates/db/memory-db/Cargo.toml.orig similarity index 100% rename from util/memory-db/Cargo.toml.orig rename to crates/db/memory-db/Cargo.toml.orig diff --git a/util/memory-db/README.md b/crates/db/memory-db/README.md similarity index 100% rename from util/memory-db/README.md rename to crates/db/memory-db/README.md diff --git a/util/memory-db/benches/bench.rs b/crates/db/memory-db/benches/bench.rs similarity index 100% rename from util/memory-db/benches/bench.rs rename to crates/db/memory-db/benches/bench.rs diff --git a/util/memory-db/src/lib.rs b/crates/db/memory-db/src/lib.rs similarity index 100% rename from util/memory-db/src/lib.rs rename to crates/db/memory-db/src/lib.rs diff --git a/util/migration-rocksdb/Cargo.toml b/crates/db/migration-rocksdb/Cargo.toml similarity index 83% rename from util/migration-rocksdb/Cargo.toml rename to crates/db/migration-rocksdb/Cargo.toml index 3cbfce09f..025ed98cc 100644 --- a/util/migration-rocksdb/Cargo.toml +++ b/crates/db/migration-rocksdb/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] [dependencies] log = "0.4" -macros = { path = "../macros" } +macros = { path = "../../util/macros" } kvdb = "0.1" kvdb-rocksdb = "0.1.3" diff --git a/util/migration-rocksdb/src/lib.rs b/crates/db/migration-rocksdb/src/lib.rs similarity index 100% rename from util/migration-rocksdb/src/lib.rs rename to crates/db/migration-rocksdb/src/lib.rs diff --git a/util/migration-rocksdb/tests/tests.rs b/crates/db/migration-rocksdb/tests/tests.rs similarity index 100% rename from util/migration-rocksdb/tests/tests.rs rename to crates/db/migration-rocksdb/tests/tests.rs diff --git a/util/patricia-trie-ethereum/Cargo.toml b/crates/db/patricia-trie-ethereum/Cargo.toml similarity index 86% rename from util/patricia-trie-ethereum/Cargo.toml rename to crates/db/patricia-trie-ethereum/Cargo.toml index 205201a2d..1a2bdf344 100644 --- a/util/patricia-trie-ethereum/Cargo.toml +++ b/crates/db/patricia-trie-ethereum/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" [dependencies] trie-db = "0.11.0" -keccak-hasher = { version = "0.1.1", path = "../keccak-hasher" } +keccak-hasher = { version = "0.1.1", path = "../../util/keccak-hasher" } hash-db = "0.11.0" rlp = { version = "0.3.0", features = ["ethereum"] } parity-bytes = "0.1" diff --git a/util/patricia-trie-ethereum/src/lib.rs b/crates/db/patricia-trie-ethereum/src/lib.rs similarity index 100% rename from util/patricia-trie-ethereum/src/lib.rs rename to crates/db/patricia-trie-ethereum/src/lib.rs diff --git a/util/patricia-trie-ethereum/src/rlp_node_codec.rs b/crates/db/patricia-trie-ethereum/src/rlp_node_codec.rs similarity index 100% rename from util/patricia-trie-ethereum/src/rlp_node_codec.rs rename to crates/db/patricia-trie-ethereum/src/rlp_node_codec.rs diff --git a/ethcore/Cargo.toml b/crates/ethcore/Cargo.toml similarity index 78% rename from ethcore/Cargo.toml rename to crates/ethcore/Cargo.toml index 62e82d3ff..57c75a967 100644 --- a/ethcore/Cargo.toml +++ b/crates/ethcore/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] [dependencies] ansi_term = "0.10" -blooms-db = { path = "../util/blooms-db", optional = true } +blooms-db = { path = "../db/blooms-db", optional = true } common-types = { path = "types" } crossbeam-utils = "0.6" eip-152 = { version = "0.1", path = "../util/EIP-152" } @@ -17,24 +17,24 @@ error-chain = { version = "0.12", default-features = false } ethabi = "6.0" ethabi-contract = "6.0" ethabi-derive = "6.0" -ethash = { path = "../ethash" } +ethash = { path = "../concensus/ethash" } ethcore-blockchain = { path = "./blockchain" } -ethcore-bloom-journal = { path = "../util/bloom" } -ethcore-builtin = { path = "./builtin" } -ethcore-call-contract = { path = "./call-contract" } -ethcore-db = { path = "./db" } -ethcore-io = { path = "../util/io" } -ethcore-miner = { path = "../miner" } -ethcore-stratum = { path = "../miner/stratum", optional = true } +ethcore-bloom-journal = { path = "../db/bloom" } +ethcore-builtin = { path = "../vm/builtin" } +ethcore-call-contract = { path = "../vm/call-contract" } +ethcore-db = { path = "../db/db" } +ethcore-io = { path = "../runtime/io" } +ethcore-miner = { path = "../concensus/miner" } +ethcore-stratum = { path = "../concensus/miner/stratum", optional = true } ethereum-types = "0.4" -ethjson = { path = "../json" } +ethjson = { path = "../ethjson" } ethkey = { path = "../accounts/ethkey" } -evm = { path = "evm" } +evm = { path = "../vm/evm" } globset = "0.4" hash-db = "0.11.0" heapsize = "0.4" itertools = "0.5" -journaldb = { path = "../util/journaldb" } +journaldb = { path = "../db/journaldb" } keccak-hash = "0.1" keccak-hasher = { path = "../util/keccak-hasher" } kvdb = "0.1" @@ -47,13 +47,13 @@ lru-cache = "0.1" macros = { path = "../util/macros" } maplit = "1" memory-cache = { path = "../util/memory-cache" } -memory-db = { path = "../util/memory-db" } +memory-db = { path = "../db/memory-db" } num_cpus = "1.2" parity-bytes = "0.1" parity-snappy = "0.1" parking_lot = "0.7" trie-db = "0.11.0" -patricia-trie-ethereum = { path = "../util/patricia-trie-ethereum" } +patricia-trie-ethereum = { path = "../db/patricia-trie-ethereum" } rand = "0.4" rayon = "1.1" regex = "1.3.9" @@ -70,20 +70,20 @@ time-utils = { path = "../util/time-utils" } trace-time = "0.1" triehash-ethereum = { version = "0.2", path = "../util/triehash-ethereum" } unexpected = { path = "../util/unexpected" } -using_queue = { path = "../miner/using-queue" } -vm = { path = "vm" } +using_queue = { path = "../concensus/miner/using-queue" } +vm = { path = "../vm/vm" } walkdir = "2.3" -wasm = { path = "wasm" } +wasm = { path = "../vm/wasm" } [dev-dependencies] -blooms-db = { path = "../util/blooms-db" } +blooms-db = { path = "../db/blooms-db" } criterion = "0.2" env_logger = "0.5" ethcore-accounts = { path = "../accounts" } -fetch = { path = "../util/fetch" } +fetch = { path = "../net/fetch" } hex-literal = "0.2.1" kvdb-rocksdb = "0.1.3" -parity-runtime = { path = "../util/runtime" } +parity-runtime = { path = "../runtime/runtime" } rlp_compress = { path = "../util/rlp-compress" } tempdir = "0.3" trie-standardmap = "0.1" diff --git a/ethcore/benches/builtin.rs b/crates/ethcore/benches/builtin.rs similarity index 100% rename from ethcore/benches/builtin.rs rename to crates/ethcore/benches/builtin.rs diff --git a/ethcore/blockchain/Cargo.toml b/crates/ethcore/blockchain/Cargo.toml similarity index 92% rename from ethcore/blockchain/Cargo.toml rename to crates/ethcore/blockchain/Cargo.toml index d627341de..d20a23889 100644 --- a/ethcore/blockchain/Cargo.toml +++ b/crates/ethcore/blockchain/Cargo.toml @@ -9,9 +9,9 @@ edition = "2018" [dependencies] ansi_term = "0.11" -blooms-db = { path = "../../util/blooms-db" } +blooms-db = { path = "../../db/blooms-db" } common-types = { path = "../types" } -ethcore-db = { path = "../db" } +ethcore-db = { path = "../../db/db" } ethereum-types = "0.4" heapsize = "0.4" itertools = "0.5" diff --git a/ethcore/blockchain/src/best_block.rs b/crates/ethcore/blockchain/src/best_block.rs similarity index 100% rename from ethcore/blockchain/src/best_block.rs rename to crates/ethcore/blockchain/src/best_block.rs diff --git a/ethcore/blockchain/src/block_info.rs b/crates/ethcore/blockchain/src/block_info.rs similarity index 100% rename from ethcore/blockchain/src/block_info.rs rename to crates/ethcore/blockchain/src/block_info.rs diff --git a/ethcore/blockchain/src/blockchain.rs b/crates/ethcore/blockchain/src/blockchain.rs similarity index 100% rename from ethcore/blockchain/src/blockchain.rs rename to crates/ethcore/blockchain/src/blockchain.rs diff --git a/ethcore/blockchain/src/cache.rs b/crates/ethcore/blockchain/src/cache.rs similarity index 100% rename from ethcore/blockchain/src/cache.rs rename to crates/ethcore/blockchain/src/cache.rs diff --git a/ethcore/blockchain/src/config.rs b/crates/ethcore/blockchain/src/config.rs similarity index 100% rename from ethcore/blockchain/src/config.rs rename to crates/ethcore/blockchain/src/config.rs diff --git a/ethcore/blockchain/src/generator.rs b/crates/ethcore/blockchain/src/generator.rs similarity index 100% rename from ethcore/blockchain/src/generator.rs rename to crates/ethcore/blockchain/src/generator.rs diff --git a/ethcore/blockchain/src/import_route.rs b/crates/ethcore/blockchain/src/import_route.rs similarity index 100% rename from ethcore/blockchain/src/import_route.rs rename to crates/ethcore/blockchain/src/import_route.rs diff --git a/ethcore/blockchain/src/lib.rs b/crates/ethcore/blockchain/src/lib.rs similarity index 100% rename from ethcore/blockchain/src/lib.rs rename to crates/ethcore/blockchain/src/lib.rs diff --git a/ethcore/blockchain/src/update.rs b/crates/ethcore/blockchain/src/update.rs similarity index 100% rename from ethcore/blockchain/src/update.rs rename to crates/ethcore/blockchain/src/update.rs diff --git a/ethcore/res/ethereum/callisto.json b/crates/ethcore/res/chainspec/callisto.json similarity index 100% rename from ethcore/res/ethereum/callisto.json rename to crates/ethcore/res/chainspec/callisto.json diff --git a/ethcore/res/ethereum/ellaism.json b/crates/ethcore/res/chainspec/ellaism.json similarity index 100% rename from ethcore/res/ethereum/ellaism.json rename to crates/ethcore/res/chainspec/ellaism.json diff --git a/ethcore/res/ethereum/ewc.json b/crates/ethcore/res/chainspec/ewc.json similarity index 100% rename from ethcore/res/ethereum/ewc.json rename to crates/ethcore/res/chainspec/ewc.json diff --git a/ethcore/res/ethereum/foundation.json b/crates/ethcore/res/chainspec/foundation.json similarity index 100% rename from ethcore/res/ethereum/foundation.json rename to crates/ethcore/res/chainspec/foundation.json diff --git a/ethcore/res/ethereum/goerli.json b/crates/ethcore/res/chainspec/goerli.json similarity index 100% rename from ethcore/res/ethereum/goerli.json rename to crates/ethcore/res/chainspec/goerli.json diff --git a/ethcore/res/instant_seal.json b/crates/ethcore/res/chainspec/instant_seal.json similarity index 100% rename from ethcore/res/instant_seal.json rename to crates/ethcore/res/chainspec/instant_seal.json diff --git a/ethcore/res/ethereum/kovan.json b/crates/ethcore/res/chainspec/kovan.json similarity index 100% rename from ethcore/res/ethereum/kovan.json rename to crates/ethcore/res/chainspec/kovan.json diff --git a/ethcore/res/ethereum/mix.json b/crates/ethcore/res/chainspec/mix.json similarity index 100% rename from ethcore/res/ethereum/mix.json rename to crates/ethcore/res/chainspec/mix.json diff --git a/ethcore/res/ethereum/morden.json b/crates/ethcore/res/chainspec/morden.json similarity index 100% rename from ethcore/res/ethereum/morden.json rename to crates/ethcore/res/chainspec/morden.json diff --git a/ethcore/res/ethereum/musicoin.json b/crates/ethcore/res/chainspec/musicoin.json similarity index 100% rename from ethcore/res/ethereum/musicoin.json rename to crates/ethcore/res/chainspec/musicoin.json diff --git a/ethcore/res/ethereum/poacore.json b/crates/ethcore/res/chainspec/poacore.json similarity index 100% rename from ethcore/res/ethereum/poacore.json rename to crates/ethcore/res/chainspec/poacore.json diff --git a/ethcore/res/ethereum/poasokol.json b/crates/ethcore/res/chainspec/poasokol.json similarity index 100% rename from ethcore/res/ethereum/poasokol.json rename to crates/ethcore/res/chainspec/poasokol.json diff --git a/ethcore/res/ethereum/rinkeby.json b/crates/ethcore/res/chainspec/rinkeby.json similarity index 100% rename from ethcore/res/ethereum/rinkeby.json rename to crates/ethcore/res/chainspec/rinkeby.json diff --git a/ethcore/res/ethereum/ropsten.json b/crates/ethcore/res/chainspec/ropsten.json similarity index 100% rename from ethcore/res/ethereum/ropsten.json rename to crates/ethcore/res/chainspec/ropsten.json diff --git a/ethcore/res/authority_round.json b/crates/ethcore/res/chainspec/test/authority_round.json similarity index 100% rename from ethcore/res/authority_round.json rename to crates/ethcore/res/chainspec/test/authority_round.json diff --git a/ethcore/res/authority_round_block_reward_contract.json b/crates/ethcore/res/chainspec/test/authority_round_block_reward_contract.json similarity index 100% rename from ethcore/res/authority_round_block_reward_contract.json rename to crates/ethcore/res/chainspec/test/authority_round_block_reward_contract.json diff --git a/ethcore/res/authority_round_empty_steps.json b/crates/ethcore/res/chainspec/test/authority_round_empty_steps.json similarity index 100% rename from ethcore/res/authority_round_empty_steps.json rename to crates/ethcore/res/chainspec/test/authority_round_empty_steps.json diff --git a/ethcore/res/basic_authority.json b/crates/ethcore/res/chainspec/test/basic_authority.json similarity index 100% rename from ethcore/res/basic_authority.json rename to crates/ethcore/res/chainspec/test/basic_authority.json diff --git a/ethcore/res/ethereum/berlin_test.json b/crates/ethcore/res/chainspec/test/berlin_test.json similarity index 100% rename from ethcore/res/ethereum/berlin_test.json rename to crates/ethcore/res/chainspec/test/berlin_test.json diff --git a/ethcore/res/ethereum/builtin_multi_bench.json b/crates/ethcore/res/chainspec/test/builtin_multi_bench.json similarity index 100% rename from ethcore/res/ethereum/builtin_multi_bench.json rename to crates/ethcore/res/chainspec/test/builtin_multi_bench.json diff --git a/ethcore/res/ethereum/builtin_one_activation_bench.json b/crates/ethcore/res/chainspec/test/builtin_one_activation_bench.json similarity index 100% rename from ethcore/res/ethereum/builtin_one_activation_bench.json rename to crates/ethcore/res/chainspec/test/builtin_one_activation_bench.json diff --git a/ethcore/res/ethereum/byzantium_test.json b/crates/ethcore/res/chainspec/test/byzantium_test.json similarity index 100% rename from ethcore/res/ethereum/byzantium_test.json rename to crates/ethcore/res/chainspec/test/byzantium_test.json diff --git a/ethcore/res/ethereum/byzantium_to_constantinoplefixat5_test.json b/crates/ethcore/res/chainspec/test/byzantium_to_constantinoplefixat5_test.json similarity index 100% rename from ethcore/res/ethereum/byzantium_to_constantinoplefixat5_test.json rename to crates/ethcore/res/chainspec/test/byzantium_to_constantinoplefixat5_test.json diff --git a/ethcore/res/ethereum/constantinople_test.json b/crates/ethcore/res/chainspec/test/constantinople_test.json similarity index 100% rename from ethcore/res/ethereum/constantinople_test.json rename to crates/ethcore/res/chainspec/test/constantinople_test.json diff --git a/ethcore/res/constructor.json b/crates/ethcore/res/chainspec/test/constructor.json similarity index 100% rename from ethcore/res/constructor.json rename to crates/ethcore/res/chainspec/test/constructor.json diff --git a/ethcore/res/tx_permission_tests/contract_ver_2_genesis.json b/crates/ethcore/res/chainspec/test/contract_ver_2_genesis.json similarity index 100% rename from ethcore/res/tx_permission_tests/contract_ver_2_genesis.json rename to crates/ethcore/res/chainspec/test/contract_ver_2_genesis.json diff --git a/ethcore/res/tx_permission_tests/deprecated_contract_genesis.json b/crates/ethcore/res/chainspec/test/deprecated_contract_genesis.json similarity index 100% rename from ethcore/res/tx_permission_tests/deprecated_contract_genesis.json rename to crates/ethcore/res/chainspec/test/deprecated_contract_genesis.json diff --git a/ethcore/res/ethereum/eip150_test.json b/crates/ethcore/res/chainspec/test/eip150_test.json similarity index 100% rename from ethcore/res/ethereum/eip150_test.json rename to crates/ethcore/res/chainspec/test/eip150_test.json diff --git a/ethcore/res/ethereum/eip161_test.json b/crates/ethcore/res/chainspec/test/eip161_test.json similarity index 100% rename from ethcore/res/ethereum/eip161_test.json rename to crates/ethcore/res/chainspec/test/eip161_test.json diff --git a/ethcore/res/ethereum/eip210_test.json b/crates/ethcore/res/chainspec/test/eip210_test.json similarity index 100% rename from ethcore/res/ethereum/eip210_test.json rename to crates/ethcore/res/chainspec/test/eip210_test.json diff --git a/ethcore/res/ethereum/frontier_like_test.json b/crates/ethcore/res/chainspec/test/frontier_like_test.json similarity index 100% rename from ethcore/res/ethereum/frontier_like_test.json rename to crates/ethcore/res/chainspec/test/frontier_like_test.json diff --git a/ethcore/res/ethereum/frontier_test.json b/crates/ethcore/res/chainspec/test/frontier_test.json similarity index 100% rename from ethcore/res/ethereum/frontier_test.json rename to crates/ethcore/res/chainspec/test/frontier_test.json diff --git a/ethcore/res/ethereum/homestead_test.json b/crates/ethcore/res/chainspec/test/homestead_test.json similarity index 100% rename from ethcore/res/ethereum/homestead_test.json rename to crates/ethcore/res/chainspec/test/homestead_test.json diff --git a/ethcore/res/ethereum/istanbul_test.json b/crates/ethcore/res/chainspec/test/istanbul_test.json similarity index 100% rename from ethcore/res/ethereum/istanbul_test.json rename to crates/ethcore/res/chainspec/test/istanbul_test.json diff --git a/ethcore/res/ethereum/kovan_wasm_test.json b/crates/ethcore/res/chainspec/test/kovan_wasm_test.json similarity index 100% rename from ethcore/res/ethereum/kovan_wasm_test.json rename to crates/ethcore/res/chainspec/test/kovan_wasm_test.json diff --git a/ethcore/res/ethereum/mcip3_test.json b/crates/ethcore/res/chainspec/test/mcip3_test.json similarity index 100% rename from ethcore/res/ethereum/mcip3_test.json rename to crates/ethcore/res/chainspec/test/mcip3_test.json diff --git a/ethcore/res/null.json b/crates/ethcore/res/chainspec/test/null.json similarity index 100% rename from ethcore/res/null.json rename to crates/ethcore/res/chainspec/test/null.json diff --git a/ethcore/res/null_morden.json b/crates/ethcore/res/chainspec/test/null_morden.json similarity index 100% rename from ethcore/res/null_morden.json rename to crates/ethcore/res/chainspec/test/null_morden.json diff --git a/ethcore/res/null_morden_with_finality.json b/crates/ethcore/res/chainspec/test/null_morden_with_finality.json similarity index 100% rename from ethcore/res/null_morden_with_finality.json rename to crates/ethcore/res/chainspec/test/null_morden_with_finality.json diff --git a/ethcore/res/null_morden_with_reward.json b/crates/ethcore/res/chainspec/test/null_morden_with_reward.json similarity index 100% rename from ethcore/res/null_morden_with_reward.json rename to crates/ethcore/res/chainspec/test/null_morden_with_reward.json diff --git a/ethcore/res/spec_backward_compability.json b/crates/ethcore/res/chainspec/test/spec_backward_compability.json similarity index 100% rename from ethcore/res/spec_backward_compability.json rename to crates/ethcore/res/chainspec/test/spec_backward_compability.json diff --git a/ethcore/res/ethereum/st_peters_test.json b/crates/ethcore/res/chainspec/test/st_peters_test.json similarity index 100% rename from ethcore/res/ethereum/st_peters_test.json rename to crates/ethcore/res/chainspec/test/st_peters_test.json diff --git a/ethcore/res/ethereum/transition_test.json b/crates/ethcore/res/chainspec/test/transition_test.json similarity index 100% rename from ethcore/res/ethereum/transition_test.json rename to crates/ethcore/res/chainspec/test/transition_test.json diff --git a/ethcore/res/validator_contract.json b/crates/ethcore/res/chainspec/test/validator_contract.json similarity index 100% rename from ethcore/res/validator_contract.json rename to crates/ethcore/res/chainspec/test/validator_contract.json diff --git a/ethcore/res/validator_multi.json b/crates/ethcore/res/chainspec/test/validator_multi.json similarity index 100% rename from ethcore/res/validator_multi.json rename to crates/ethcore/res/chainspec/test/validator_multi.json diff --git a/ethcore/res/validator_safe_contract.json b/crates/ethcore/res/chainspec/test/validator_safe_contract.json similarity index 100% rename from ethcore/res/validator_safe_contract.json rename to crates/ethcore/res/chainspec/test/validator_safe_contract.json diff --git a/ethcore/res/ethereum/yolo3_test.json b/crates/ethcore/res/chainspec/test/yolo3_test.json similarity index 100% rename from ethcore/res/ethereum/yolo3_test.json rename to crates/ethcore/res/chainspec/test/yolo3_test.json diff --git a/ethcore/res/ethereum/volta.json b/crates/ethcore/res/chainspec/volta.json similarity index 100% rename from ethcore/res/ethereum/volta.json rename to crates/ethcore/res/chainspec/volta.json diff --git a/ethcore/res/ethereum/xdai.json b/crates/ethcore/res/chainspec/xdai.json similarity index 100% rename from ethcore/res/ethereum/xdai.json rename to crates/ethcore/res/chainspec/xdai.json diff --git a/ethcore/res/contracts/block_reward.json b/crates/ethcore/res/contracts/block_reward.json similarity index 100% rename from ethcore/res/contracts/block_reward.json rename to crates/ethcore/res/contracts/block_reward.json diff --git a/ethcore/res/contracts/registrar.json b/crates/ethcore/res/contracts/registrar.json similarity index 99% rename from ethcore/res/contracts/registrar.json rename to crates/ethcore/res/contracts/registrar.json index 38edcc787..2f4aab1fd 100644 --- a/ethcore/res/contracts/registrar.json +++ b/crates/ethcore/res/contracts/registrar.json @@ -18,4 +18,4 @@ {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"type":"function"}, {"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"type":"function"}, {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"Drained","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"FeeChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"}],"name":"Reserved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"oldOwner","type":"address"},{"indexed":true,"name":"newOwner","type":"address"}],"name":"Transferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"}],"name":"Dropped","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"},{"indexed":true,"name":"key","type":"string"}],"name":"DataChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseProposed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseConfirmed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"} -] +] \ No newline at end of file diff --git a/ethcore/res/contracts/test_validator_set.json b/crates/ethcore/res/contracts/test_validator_set.json similarity index 100% rename from ethcore/res/contracts/test_validator_set.json rename to crates/ethcore/res/contracts/test_validator_set.json diff --git a/ethcore/res/contracts/tx_acl.json b/crates/ethcore/res/contracts/tx_acl.json similarity index 100% rename from ethcore/res/contracts/tx_acl.json rename to crates/ethcore/res/contracts/tx_acl.json diff --git a/ethcore/res/contracts/tx_acl_deprecated.json b/crates/ethcore/res/contracts/tx_acl_deprecated.json similarity index 100% rename from ethcore/res/contracts/tx_acl_deprecated.json rename to crates/ethcore/res/contracts/tx_acl_deprecated.json diff --git a/ethcore/res/contracts/validator_report.json b/crates/ethcore/res/contracts/validator_report.json similarity index 100% rename from ethcore/res/contracts/validator_report.json rename to crates/ethcore/res/contracts/validator_report.json diff --git a/ethcore/res/contracts/validator_set.json b/crates/ethcore/res/contracts/validator_set.json similarity index 100% rename from ethcore/res/contracts/validator_set.json rename to crates/ethcore/res/contracts/validator_set.json diff --git a/ethcore/res/ethereum/tests b/crates/ethcore/res/json_tests similarity index 100% rename from ethcore/res/ethereum/tests rename to crates/ethcore/res/json_tests diff --git a/crates/ethcore/res/json_tests.json b/crates/ethcore/res/json_tests.json new file mode 100644 index 000000000..7fa205984 --- /dev/null +++ b/crates/ethcore/res/json_tests.json @@ -0,0 +1,60 @@ +{ + "chain": [ + { + "path": "res/json_tests/BlockchainTests", + "skip" : [] + }, + { + "path": "res/json_tests/LegacyTests/Constantinople/BlockchainTests", + "skip" : [] + } + ], + "state": [ + { + "path": "res/json_tests/GeneralStateTests", + "skip" : [] + + }, + { + "path": "res/json_tests/LegacyTests/Constantinople/GeneralStateTests", + "skip" : [] + + } + ], + "difficulty": [ + { + "path": [ + "res/json_tests/BasicTests/difficulty.json", + "res/json_tests/BasicTests/difficultyMainNetwork.json" + ], + "chainspec": "Foundation" + } + ], + "executive": [ + { + "path": "res/json_tests/VMTests" + } + ], + "transaction": [ + { + "path": "res/json_tests/TransactionTests" + } + ], + "trie": [ + { + "path": [ + "res/json_tests/TrieTests/trietest.json", + "res/json_tests/TrieTests/trieanyorder.json" + ], + "triespec": "Generic" + }, + { + "path": [ + "res/json_tests/TrieTests/hex_encoded_securetrie_test.json", + "res/json_tests/TrieTests/trietest_secureTrie.json", + "res/json_tests/TrieTests/trieanyorder_secureTrie.json" + ], + "triespec": "Secure" + } + ] +} \ No newline at end of file diff --git a/ethcore/service/Cargo.toml b/crates/ethcore/service/Cargo.toml similarity index 88% rename from ethcore/service/Cargo.toml rename to crates/ethcore/service/Cargo.toml index 68c73b4f9..cd0f9e112 100644 --- a/ethcore/service/Cargo.toml +++ b/crates/ethcore/service/Cargo.toml @@ -9,7 +9,7 @@ ansi_term = "0.10" error-chain = { version = "0.12", default-features = false } ethcore = { path = ".." } ethcore-blockchain = { path = "../blockchain" } -ethcore-io = { path = "../../util/io" } +ethcore-io = { path = "../../runtime/io" } ethcore-sync = { path = "../sync" } ethereum-types = "0.4" kvdb = "0.1" @@ -17,7 +17,7 @@ log = "0.4" trace-time = "0.1" [dev-dependencies] -ethcore-db = { path = "../db" } +ethcore-db = { path = "../../db/db" } ethcore = { path = "..", features = ["test-helpers"] } tempdir = "0.3" kvdb-rocksdb = "0.1.3" diff --git a/ethcore/service/src/error.rs b/crates/ethcore/service/src/error.rs similarity index 100% rename from ethcore/service/src/error.rs rename to crates/ethcore/service/src/error.rs diff --git a/ethcore/service/src/lib.rs b/crates/ethcore/service/src/lib.rs similarity index 100% rename from ethcore/service/src/lib.rs rename to crates/ethcore/service/src/lib.rs diff --git a/ethcore/service/src/service.rs b/crates/ethcore/service/src/service.rs similarity index 100% rename from ethcore/service/src/service.rs rename to crates/ethcore/service/src/service.rs diff --git a/ethcore/service/src/stop_guard.rs b/crates/ethcore/service/src/stop_guard.rs similarity index 100% rename from ethcore/service/src/stop_guard.rs rename to crates/ethcore/service/src/stop_guard.rs diff --git a/ethcore/src/account_db.rs b/crates/ethcore/src/account_db.rs similarity index 100% rename from ethcore/src/account_db.rs rename to crates/ethcore/src/account_db.rs diff --git a/ethcore/src/block.rs b/crates/ethcore/src/block.rs similarity index 100% rename from ethcore/src/block.rs rename to crates/ethcore/src/block.rs diff --git a/ethcore/src/client/ancient_import.rs b/crates/ethcore/src/client/ancient_import.rs similarity index 100% rename from ethcore/src/client/ancient_import.rs rename to crates/ethcore/src/client/ancient_import.rs diff --git a/ethcore/src/client/bad_blocks.rs b/crates/ethcore/src/client/bad_blocks.rs similarity index 100% rename from ethcore/src/client/bad_blocks.rs rename to crates/ethcore/src/client/bad_blocks.rs diff --git a/ethcore/src/client/chain_notify.rs b/crates/ethcore/src/client/chain_notify.rs similarity index 100% rename from ethcore/src/client/chain_notify.rs rename to crates/ethcore/src/client/chain_notify.rs diff --git a/ethcore/src/client/client.rs b/crates/ethcore/src/client/client.rs similarity index 100% rename from ethcore/src/client/client.rs rename to crates/ethcore/src/client/client.rs diff --git a/ethcore/src/client/config.rs b/crates/ethcore/src/client/config.rs similarity index 100% rename from ethcore/src/client/config.rs rename to crates/ethcore/src/client/config.rs diff --git a/ethcore/src/client/evm_test_client.rs b/crates/ethcore/src/client/evm_test_client.rs similarity index 100% rename from ethcore/src/client/evm_test_client.rs rename to crates/ethcore/src/client/evm_test_client.rs diff --git a/ethcore/src/client/io_message.rs b/crates/ethcore/src/client/io_message.rs similarity index 100% rename from ethcore/src/client/io_message.rs rename to crates/ethcore/src/client/io_message.rs diff --git a/ethcore/src/client/mod.rs b/crates/ethcore/src/client/mod.rs similarity index 100% rename from ethcore/src/client/mod.rs rename to crates/ethcore/src/client/mod.rs diff --git a/ethcore/src/client/test_client.rs b/crates/ethcore/src/client/test_client.rs similarity index 100% rename from ethcore/src/client/test_client.rs rename to crates/ethcore/src/client/test_client.rs diff --git a/ethcore/src/client/trace.rs b/crates/ethcore/src/client/trace.rs similarity index 100% rename from ethcore/src/client/trace.rs rename to crates/ethcore/src/client/trace.rs diff --git a/ethcore/src/client/traits.rs b/crates/ethcore/src/client/traits.rs similarity index 100% rename from ethcore/src/client/traits.rs rename to crates/ethcore/src/client/traits.rs diff --git a/ethcore/src/engines/authority_round/finality.rs b/crates/ethcore/src/engines/authority_round/finality.rs similarity index 100% rename from ethcore/src/engines/authority_round/finality.rs rename to crates/ethcore/src/engines/authority_round/finality.rs diff --git a/ethcore/src/engines/authority_round/mod.rs b/crates/ethcore/src/engines/authority_round/mod.rs similarity index 100% rename from ethcore/src/engines/authority_round/mod.rs rename to crates/ethcore/src/engines/authority_round/mod.rs diff --git a/ethcore/src/engines/basic_authority.rs b/crates/ethcore/src/engines/basic_authority.rs similarity index 99% rename from ethcore/src/engines/basic_authority.rs rename to crates/ethcore/src/engines/basic_authority.rs index 51f1a2bbe..8164e65a6 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/crates/ethcore/src/engines/basic_authority.rs @@ -233,7 +233,7 @@ mod tests { /// Create a new test chain spec with `BasicAuthority` consensus engine. fn new_test_authority() -> Spec { - let bytes: &[u8] = include_bytes!("../../res/basic_authority.json"); + let bytes: &[u8] = include_bytes!("../../res/chainspec/test/basic_authority.json"); let tempdir = TempDir::new("").unwrap(); Spec::load(&tempdir.path(), bytes).expect("invalid chain spec") } diff --git a/ethcore/src/engines/block_reward.rs b/crates/ethcore/src/engines/block_reward.rs similarity index 100% rename from ethcore/src/engines/block_reward.rs rename to crates/ethcore/src/engines/block_reward.rs diff --git a/ethcore/src/engines/clique/block_state.rs b/crates/ethcore/src/engines/clique/block_state.rs similarity index 100% rename from ethcore/src/engines/clique/block_state.rs rename to crates/ethcore/src/engines/clique/block_state.rs diff --git a/ethcore/src/engines/clique/mod.rs b/crates/ethcore/src/engines/clique/mod.rs similarity index 100% rename from ethcore/src/engines/clique/mod.rs rename to crates/ethcore/src/engines/clique/mod.rs diff --git a/ethcore/src/engines/clique/params.rs b/crates/ethcore/src/engines/clique/params.rs similarity index 100% rename from ethcore/src/engines/clique/params.rs rename to crates/ethcore/src/engines/clique/params.rs diff --git a/ethcore/src/engines/clique/tests.rs b/crates/ethcore/src/engines/clique/tests.rs similarity index 100% rename from ethcore/src/engines/clique/tests.rs rename to crates/ethcore/src/engines/clique/tests.rs diff --git a/ethcore/src/engines/clique/util.rs b/crates/ethcore/src/engines/clique/util.rs similarity index 100% rename from ethcore/src/engines/clique/util.rs rename to crates/ethcore/src/engines/clique/util.rs diff --git a/ethcore/src/engines/instant_seal.rs b/crates/ethcore/src/engines/instant_seal.rs similarity index 100% rename from ethcore/src/engines/instant_seal.rs rename to crates/ethcore/src/engines/instant_seal.rs diff --git a/ethcore/src/engines/mod.rs b/crates/ethcore/src/engines/mod.rs similarity index 100% rename from ethcore/src/engines/mod.rs rename to crates/ethcore/src/engines/mod.rs diff --git a/ethcore/src/engines/null_engine.rs b/crates/ethcore/src/engines/null_engine.rs similarity index 100% rename from ethcore/src/engines/null_engine.rs rename to crates/ethcore/src/engines/null_engine.rs diff --git a/ethcore/src/engines/signer.rs b/crates/ethcore/src/engines/signer.rs similarity index 100% rename from ethcore/src/engines/signer.rs rename to crates/ethcore/src/engines/signer.rs diff --git a/ethcore/src/engines/validator_set/contract.rs b/crates/ethcore/src/engines/validator_set/contract.rs similarity index 100% rename from ethcore/src/engines/validator_set/contract.rs rename to crates/ethcore/src/engines/validator_set/contract.rs diff --git a/ethcore/src/engines/validator_set/mod.rs b/crates/ethcore/src/engines/validator_set/mod.rs similarity index 100% rename from ethcore/src/engines/validator_set/mod.rs rename to crates/ethcore/src/engines/validator_set/mod.rs diff --git a/ethcore/src/engines/validator_set/multi.rs b/crates/ethcore/src/engines/validator_set/multi.rs similarity index 100% rename from ethcore/src/engines/validator_set/multi.rs rename to crates/ethcore/src/engines/validator_set/multi.rs diff --git a/ethcore/src/engines/validator_set/safe_contract.rs b/crates/ethcore/src/engines/validator_set/safe_contract.rs similarity index 100% rename from ethcore/src/engines/validator_set/safe_contract.rs rename to crates/ethcore/src/engines/validator_set/safe_contract.rs diff --git a/ethcore/src/engines/validator_set/simple_list.rs b/crates/ethcore/src/engines/validator_set/simple_list.rs similarity index 100% rename from ethcore/src/engines/validator_set/simple_list.rs rename to crates/ethcore/src/engines/validator_set/simple_list.rs diff --git a/ethcore/src/engines/validator_set/test.rs b/crates/ethcore/src/engines/validator_set/test.rs similarity index 100% rename from ethcore/src/engines/validator_set/test.rs rename to crates/ethcore/src/engines/validator_set/test.rs diff --git a/ethcore/src/error.rs b/crates/ethcore/src/error.rs similarity index 100% rename from ethcore/src/error.rs rename to crates/ethcore/src/error.rs diff --git a/ethcore/src/ethereum/denominations.rs b/crates/ethcore/src/ethereum/denominations.rs similarity index 100% rename from ethcore/src/ethereum/denominations.rs rename to crates/ethcore/src/ethereum/denominations.rs diff --git a/ethcore/src/ethereum/ethash.rs b/crates/ethcore/src/ethereum/ethash.rs similarity index 100% rename from ethcore/src/ethereum/ethash.rs rename to crates/ethcore/src/ethereum/ethash.rs diff --git a/ethcore/src/ethereum/mod.rs b/crates/ethcore/src/ethereum/mod.rs similarity index 78% rename from ethcore/src/ethereum/mod.rs rename to crates/ethcore/src/ethereum/mod.rs index 736d0d5ae..70ce87455 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/crates/ethcore/src/ethereum/mod.rs @@ -46,7 +46,7 @@ fn load_machine(b: &[u8]) -> EthereumMachine { pub fn new_foundation<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/foundation.json"), + include_bytes!("../../res/chainspec/foundation.json"), ) } @@ -54,7 +54,7 @@ pub fn new_foundation<'a, T: Into>>(params: T) -> Spec { pub fn new_poanet<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/poacore.json"), + include_bytes!("../../res/chainspec/poacore.json"), ) } @@ -62,7 +62,7 @@ pub fn new_poanet<'a, T: Into>>(params: T) -> Spec { pub fn new_xdai<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/xdai.json"), + include_bytes!("../../res/chainspec/xdai.json"), ) } @@ -70,13 +70,13 @@ pub fn new_xdai<'a, T: Into>>(params: T) -> Spec { pub fn new_volta<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/volta.json"), + include_bytes!("../../res/chainspec/volta.json"), ) } /// Create a new EWC mainnet chain spec. pub fn new_ewc<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/ewc.json")) + load(params.into(), include_bytes!("../../res/chainspec/ewc.json")) } /// Create a new Musicoin mainnet chain spec. @@ -85,7 +85,7 @@ pub fn new_musicoin<'a, T: Into>>(params: T) -> Spec { // https://gist.github.com/andresilva/6f2afaf9486732a0797f4bdeae018ee9 load( params.into(), - include_bytes!("../../res/ethereum/musicoin.json"), + include_bytes!("../../res/chainspec/musicoin.json"), ) } @@ -93,20 +93,20 @@ pub fn new_musicoin<'a, T: Into>>(params: T) -> Spec { pub fn new_ellaism<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/ellaism.json"), + include_bytes!("../../res/chainspec/ellaism.json"), ) } /// Create a new MIX mainnet chain spec. pub fn new_mix<'a, T: Into>>(params: T) -> Spec { - load(params.into(), include_bytes!("../../res/ethereum/mix.json")) + load(params.into(), include_bytes!("../../res/chainspec/mix.json")) } /// Create a new Callisto chain spec pub fn new_callisto<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/callisto.json"), + include_bytes!("../../res/chainspec/callisto.json"), ) } @@ -114,7 +114,7 @@ pub fn new_callisto<'a, T: Into>>(params: T) -> Spec { pub fn new_morden<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/morden.json"), + include_bytes!("../../res/chainspec/morden.json"), ) } @@ -122,7 +122,7 @@ pub fn new_morden<'a, T: Into>>(params: T) -> Spec { pub fn new_ropsten<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/ropsten.json"), + include_bytes!("../../res/chainspec/ropsten.json"), ) } @@ -130,7 +130,7 @@ pub fn new_ropsten<'a, T: Into>>(params: T) -> Spec { pub fn new_kovan<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/kovan.json"), + include_bytes!("../../res/chainspec/kovan.json"), ) } @@ -138,7 +138,7 @@ pub fn new_kovan<'a, T: Into>>(params: T) -> Spec { pub fn new_rinkeby<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/rinkeby.json"), + include_bytes!("../../res/chainspec/rinkeby.json"), ) } @@ -146,7 +146,7 @@ pub fn new_rinkeby<'a, T: Into>>(params: T) -> Spec { pub fn new_goerli<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/goerli.json"), + include_bytes!("../../res/chainspec/goerli.json"), ) } @@ -154,7 +154,7 @@ pub fn new_goerli<'a, T: Into>>(params: T) -> Spec { pub fn new_sokol<'a, T: Into>>(params: T) -> Spec { load( params.into(), - include_bytes!("../../res/ethereum/poasokol.json"), + include_bytes!("../../res/chainspec/poasokol.json"), ) } @@ -164,38 +164,38 @@ pub fn new_sokol<'a, T: Into>>(params: T) -> Spec { pub fn new_frontier_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/frontier_test.json"), + include_bytes!("../../res/chainspec/test/frontier_test.json"), ) } /// Create a new Ropsten chain spec. pub fn new_ropsten_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/ropsten.json")) + load(None, include_bytes!("../../res/chainspec/ropsten.json")) } /// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier. pub fn new_homestead_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/homestead_test.json"), + include_bytes!("../../res/chainspec/test/homestead_test.json"), ) } /// Create a new Foundation Homestead-EIP150-era chain spec as though it never changed from Homestead/Frontier. pub fn new_eip150_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/eip150_test.json")) + load(None, include_bytes!("../../res/chainspec/test/eip150_test.json")) } /// Create a new Foundation Homestead-EIP161-era chain spec as though it never changed from Homestead/Frontier. pub fn new_eip161_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/eip161_test.json")) + load(None, include_bytes!("../../res/chainspec/test/eip161_test.json")) } /// Create a new Foundation Frontier/Homestead/DAO chain spec with transition points at #5 and #8. pub fn new_transition_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/transition_test.json"), + include_bytes!("../../res/chainspec/test/transition_test.json"), ) } @@ -203,7 +203,7 @@ pub fn new_transition_test() -> Spec { pub fn new_mainnet_like() -> Spec { load( None, - include_bytes!("../../res/ethereum/frontier_like_test.json"), + include_bytes!("../../res/chainspec/test/frontier_like_test.json"), ) } @@ -211,7 +211,7 @@ pub fn new_mainnet_like() -> Spec { pub fn new_byzantium_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/byzantium_test.json"), + include_bytes!("../../res/chainspec/test/byzantium_test.json"), ) } @@ -219,7 +219,7 @@ pub fn new_byzantium_test() -> Spec { pub fn new_constantinople_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/constantinople_test.json"), + include_bytes!("../../res/chainspec/test/constantinople_test.json"), ) } @@ -227,7 +227,7 @@ pub fn new_constantinople_test() -> Spec { pub fn new_constantinople_fix_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/st_peters_test.json"), + include_bytes!("../../res/chainspec/test/st_peters_test.json"), ) } @@ -235,7 +235,7 @@ pub fn new_constantinople_fix_test() -> Spec { pub fn new_istanbul_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/istanbul_test.json"), + include_bytes!("../../res/chainspec/test/istanbul_test.json"), ) } @@ -243,72 +243,72 @@ pub fn new_istanbul_test() -> Spec { pub fn new_byzantium_to_constantinoplefixat5_test() -> Spec { load( None, - include_bytes!("../../res/ethereum/byzantium_to_constantinoplefixat5_test.json"), + include_bytes!("../../res/chainspec/test/byzantium_to_constantinoplefixat5_test.json"), ) } /// Create a new Foundation Berlin era spec. pub fn new_berlin_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/berlin_test.json")) + load(None, include_bytes!("../../res/chainspec/test/berlin_test.json")) } /// Create a new YOLO spec pub fn new_yolo3_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/yolo3_test.json")) + load(None, include_bytes!("../../res/chainspec/test/yolo3_test.json")) } /// Create a new Musicoin-MCIP3-era spec. pub fn new_mcip3_test() -> Spec { - load(None, include_bytes!("../../res/ethereum/mcip3_test.json")) + load(None, include_bytes!("../../res/chainspec/test/mcip3_test.json")) } // For tests /// Create a new Foundation Frontier-era chain spec as though it never changes to Homestead. pub fn new_frontier_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/frontier_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/frontier_test.json")) } /// Create a new Foundation Homestead-era chain spec as though it never changed from Frontier. pub fn new_homestead_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/homestead_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/homestead_test.json")) } /// Create a new Foundation Homestead-EIP210-era chain spec as though it never changed from Homestead/Frontier. pub fn new_eip210_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/eip210_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/eip210_test.json")) } /// Create a new Foundation Byzantium era spec. pub fn new_byzantium_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/byzantium_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/byzantium_test.json")) } /// Create a new Foundation Constantinople era spec. pub fn new_constantinople_test_machine() -> EthereumMachine { load_machine(include_bytes!( - "../../res/ethereum/constantinople_test.json" + "../../res/chainspec/test/constantinople_test.json" )) } /// Create a new Foundation St. Peter's (Contantinople Fix) era spec. pub fn new_constantinople_fix_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/st_peters_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/st_peters_test.json")) } /// Create a new Foundation Istanbul era spec. pub fn new_istanbul_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/istanbul_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/istanbul_test.json")) } /// Create a new Musicoin-MCIP3-era spec. pub fn new_mcip3_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/mcip3_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/mcip3_test.json")) } /// Create new Kovan spec with wasm activated at certain block pub fn new_kovan_wasm_test_machine() -> EthereumMachine { - load_machine(include_bytes!("../../res/ethereum/kovan_wasm_test.json")) + load_machine(include_bytes!("../../res/chainspec/test/kovan_wasm_test.json")) } #[cfg(test)] diff --git a/ethcore/src/executed.rs b/crates/ethcore/src/executed.rs similarity index 100% rename from ethcore/src/executed.rs rename to crates/ethcore/src/executed.rs diff --git a/ethcore/src/executive.rs b/crates/ethcore/src/executive.rs similarity index 100% rename from ethcore/src/executive.rs rename to crates/ethcore/src/executive.rs diff --git a/ethcore/src/externalities.rs b/crates/ethcore/src/externalities.rs similarity index 100% rename from ethcore/src/externalities.rs rename to crates/ethcore/src/externalities.rs diff --git a/ethcore/src/factory.rs b/crates/ethcore/src/factory.rs similarity index 100% rename from ethcore/src/factory.rs rename to crates/ethcore/src/factory.rs diff --git a/ethcore/src/json_tests/chain.rs b/crates/ethcore/src/json_tests/chain.rs similarity index 100% rename from ethcore/src/json_tests/chain.rs rename to crates/ethcore/src/json_tests/chain.rs diff --git a/ethcore/src/json_tests/difficulty.rs b/crates/ethcore/src/json_tests/difficulty.rs similarity index 100% rename from ethcore/src/json_tests/difficulty.rs rename to crates/ethcore/src/json_tests/difficulty.rs diff --git a/ethcore/src/json_tests/executive.rs b/crates/ethcore/src/json_tests/executive.rs similarity index 100% rename from ethcore/src/json_tests/executive.rs rename to crates/ethcore/src/json_tests/executive.rs diff --git a/ethcore/src/json_tests/mod.rs b/crates/ethcore/src/json_tests/mod.rs similarity index 100% rename from ethcore/src/json_tests/mod.rs rename to crates/ethcore/src/json_tests/mod.rs diff --git a/ethcore/src/json_tests/runner.rs b/crates/ethcore/src/json_tests/runner.rs similarity index 99% rename from ethcore/src/json_tests/runner.rs rename to crates/ethcore/src/json_tests/runner.rs index 4b22146fa..2b8fa1f82 100644 --- a/ethcore/src/json_tests/runner.rs +++ b/crates/ethcore/src/json_tests/runner.rs @@ -232,7 +232,7 @@ impl TestRunner { #[test] fn ethereum_json_tests() { - let content = std::fs::read("res/ethereum/runner/full.json") + let content = std::fs::read("res/json_tests.json") .expect("cannot open ethereum tests spec file"); let runner = TestRunner::load(content.as_slice()).expect("cannot load ethereum tests spec file"); diff --git a/ethcore/src/json_tests/skip.rs b/crates/ethcore/src/json_tests/skip.rs similarity index 100% rename from ethcore/src/json_tests/skip.rs rename to crates/ethcore/src/json_tests/skip.rs diff --git a/ethcore/src/json_tests/state.rs b/crates/ethcore/src/json_tests/state.rs similarity index 100% rename from ethcore/src/json_tests/state.rs rename to crates/ethcore/src/json_tests/state.rs diff --git a/ethcore/src/json_tests/test_common.rs b/crates/ethcore/src/json_tests/test_common.rs similarity index 100% rename from ethcore/src/json_tests/test_common.rs rename to crates/ethcore/src/json_tests/test_common.rs diff --git a/ethcore/src/json_tests/transaction.rs b/crates/ethcore/src/json_tests/transaction.rs similarity index 100% rename from ethcore/src/json_tests/transaction.rs rename to crates/ethcore/src/json_tests/transaction.rs diff --git a/ethcore/src/json_tests/trie.rs b/crates/ethcore/src/json_tests/trie.rs similarity index 100% rename from ethcore/src/json_tests/trie.rs rename to crates/ethcore/src/json_tests/trie.rs diff --git a/ethcore/src/lib.rs b/crates/ethcore/src/lib.rs similarity index 100% rename from ethcore/src/lib.rs rename to crates/ethcore/src/lib.rs diff --git a/ethcore/src/machine/impls.rs b/crates/ethcore/src/machine/impls.rs similarity index 100% rename from ethcore/src/machine/impls.rs rename to crates/ethcore/src/machine/impls.rs diff --git a/ethcore/src/machine/mod.rs b/crates/ethcore/src/machine/mod.rs similarity index 100% rename from ethcore/src/machine/mod.rs rename to crates/ethcore/src/machine/mod.rs diff --git a/ethcore/src/machine/traits.rs b/crates/ethcore/src/machine/traits.rs similarity index 100% rename from ethcore/src/machine/traits.rs rename to crates/ethcore/src/machine/traits.rs diff --git a/ethcore/src/miner/miner.rs b/crates/ethcore/src/miner/miner.rs similarity index 100% rename from ethcore/src/miner/miner.rs rename to crates/ethcore/src/miner/miner.rs diff --git a/ethcore/src/miner/mod.rs b/crates/ethcore/src/miner/mod.rs similarity index 100% rename from ethcore/src/miner/mod.rs rename to crates/ethcore/src/miner/mod.rs diff --git a/ethcore/src/miner/pool_client.rs b/crates/ethcore/src/miner/pool_client.rs similarity index 100% rename from ethcore/src/miner/pool_client.rs rename to crates/ethcore/src/miner/pool_client.rs diff --git a/ethcore/src/miner/stratum.rs b/crates/ethcore/src/miner/stratum.rs similarity index 100% rename from ethcore/src/miner/stratum.rs rename to crates/ethcore/src/miner/stratum.rs diff --git a/ethcore/src/pod_account.rs b/crates/ethcore/src/pod_account.rs similarity index 100% rename from ethcore/src/pod_account.rs rename to crates/ethcore/src/pod_account.rs diff --git a/ethcore/src/pod_state.rs b/crates/ethcore/src/pod_state.rs similarity index 100% rename from ethcore/src/pod_state.rs rename to crates/ethcore/src/pod_state.rs diff --git a/ethcore/src/snapshot/account.rs b/crates/ethcore/src/snapshot/account.rs similarity index 100% rename from ethcore/src/snapshot/account.rs rename to crates/ethcore/src/snapshot/account.rs diff --git a/ethcore/src/snapshot/block.rs b/crates/ethcore/src/snapshot/block.rs similarity index 100% rename from ethcore/src/snapshot/block.rs rename to crates/ethcore/src/snapshot/block.rs diff --git a/ethcore/src/snapshot/consensus/authority.rs b/crates/ethcore/src/snapshot/consensus/authority.rs similarity index 100% rename from ethcore/src/snapshot/consensus/authority.rs rename to crates/ethcore/src/snapshot/consensus/authority.rs diff --git a/ethcore/src/snapshot/consensus/mod.rs b/crates/ethcore/src/snapshot/consensus/mod.rs similarity index 100% rename from ethcore/src/snapshot/consensus/mod.rs rename to crates/ethcore/src/snapshot/consensus/mod.rs diff --git a/ethcore/src/snapshot/consensus/work.rs b/crates/ethcore/src/snapshot/consensus/work.rs similarity index 100% rename from ethcore/src/snapshot/consensus/work.rs rename to crates/ethcore/src/snapshot/consensus/work.rs diff --git a/ethcore/src/snapshot/error.rs b/crates/ethcore/src/snapshot/error.rs similarity index 100% rename from ethcore/src/snapshot/error.rs rename to crates/ethcore/src/snapshot/error.rs diff --git a/ethcore/src/snapshot/io.rs b/crates/ethcore/src/snapshot/io.rs similarity index 100% rename from ethcore/src/snapshot/io.rs rename to crates/ethcore/src/snapshot/io.rs diff --git a/ethcore/src/snapshot/mod.rs b/crates/ethcore/src/snapshot/mod.rs similarity index 100% rename from ethcore/src/snapshot/mod.rs rename to crates/ethcore/src/snapshot/mod.rs diff --git a/ethcore/src/snapshot/service.rs b/crates/ethcore/src/snapshot/service.rs similarity index 100% rename from ethcore/src/snapshot/service.rs rename to crates/ethcore/src/snapshot/service.rs diff --git a/ethcore/src/snapshot/tests/helpers.rs b/crates/ethcore/src/snapshot/tests/helpers.rs similarity index 100% rename from ethcore/src/snapshot/tests/helpers.rs rename to crates/ethcore/src/snapshot/tests/helpers.rs diff --git a/ethcore/src/snapshot/tests/mod.rs b/crates/ethcore/src/snapshot/tests/mod.rs similarity index 100% rename from ethcore/src/snapshot/tests/mod.rs rename to crates/ethcore/src/snapshot/tests/mod.rs diff --git a/ethcore/src/snapshot/tests/proof_of_authority.rs b/crates/ethcore/src/snapshot/tests/proof_of_authority.rs similarity index 100% rename from ethcore/src/snapshot/tests/proof_of_authority.rs rename to crates/ethcore/src/snapshot/tests/proof_of_authority.rs diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/crates/ethcore/src/snapshot/tests/proof_of_work.rs similarity index 100% rename from ethcore/src/snapshot/tests/proof_of_work.rs rename to crates/ethcore/src/snapshot/tests/proof_of_work.rs diff --git a/ethcore/src/snapshot/tests/service.rs b/crates/ethcore/src/snapshot/tests/service.rs similarity index 100% rename from ethcore/src/snapshot/tests/service.rs rename to crates/ethcore/src/snapshot/tests/service.rs diff --git a/ethcore/src/snapshot/tests/state.rs b/crates/ethcore/src/snapshot/tests/state.rs similarity index 100% rename from ethcore/src/snapshot/tests/state.rs rename to crates/ethcore/src/snapshot/tests/state.rs diff --git a/ethcore/src/snapshot/tests/test_validator_contract.json b/crates/ethcore/src/snapshot/tests/test_validator_contract.json similarity index 100% rename from ethcore/src/snapshot/tests/test_validator_contract.json rename to crates/ethcore/src/snapshot/tests/test_validator_contract.json diff --git a/ethcore/src/snapshot/traits.rs b/crates/ethcore/src/snapshot/traits.rs similarity index 100% rename from ethcore/src/snapshot/traits.rs rename to crates/ethcore/src/snapshot/traits.rs diff --git a/ethcore/src/snapshot/watcher.rs b/crates/ethcore/src/snapshot/watcher.rs similarity index 100% rename from ethcore/src/snapshot/watcher.rs rename to crates/ethcore/src/snapshot/watcher.rs diff --git a/ethcore/src/spec/genesis.rs b/crates/ethcore/src/spec/genesis.rs similarity index 100% rename from ethcore/src/spec/genesis.rs rename to crates/ethcore/src/spec/genesis.rs diff --git a/ethcore/src/spec/mod.rs b/crates/ethcore/src/spec/mod.rs similarity index 100% rename from ethcore/src/spec/mod.rs rename to crates/ethcore/src/spec/mod.rs diff --git a/ethcore/src/spec/seal.rs b/crates/ethcore/src/spec/seal.rs similarity index 100% rename from ethcore/src/spec/seal.rs rename to crates/ethcore/src/spec/seal.rs diff --git a/ethcore/src/spec/spec.rs b/crates/ethcore/src/spec/spec.rs similarity index 98% rename from ethcore/src/spec/spec.rs rename to crates/ethcore/src/spec/spec.rs index 615302511..0ae5835b9 100644 --- a/ethcore/src/spec/spec.rs +++ b/crates/ethcore/src/spec/spec.rs @@ -602,7 +602,7 @@ macro_rules! load_bundled { ($e:expr) => { Spec::load( &::std::env::temp_dir(), - include_bytes!(concat!("../../res/", $e, ".json")) as &[u8], + include_bytes!(concat!("../../res/chainspec/", $e, ".json")) as &[u8], ) .expect(concat!("Chain spec ", $e, " is invalid.")) }; @@ -611,7 +611,7 @@ macro_rules! load_bundled { #[cfg(any(test, feature = "test-helpers"))] macro_rules! load_machine_bundled { ($e:expr) => { - Spec::load_machine(include_bytes!(concat!("../../res/", $e, ".json")) as &[u8]) + Spec::load_machine(include_bytes!(concat!("../../res/chainspec/", $e, ".json")) as &[u8]) .expect(concat!("Chain spec ", $e, " is invalid.")) }; } @@ -1027,38 +1027,38 @@ impl Spec { /// NullEngine consensus. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test() -> Spec { - load_bundled!("null_morden") + load_bundled!("test/null_morden") } /// Create the EthereumMachine corresponding to Spec::new_test. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_machine() -> EthereumMachine { - load_machine_bundled!("null_morden") + load_machine_bundled!("test/null_morden") } /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_with_reward() -> Spec { - load_bundled!("null_morden_with_reward") + load_bundled!("test/null_morden_with_reward") } /// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with finality. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_with_finality() -> Spec { - load_bundled!("null_morden_with_finality") + load_bundled!("test/null_morden_with_finality") } /// Create a new Spec which is a NullEngine consensus with a premine of address whose /// secret is keccak(''). #[cfg(any(test, feature = "test-helpers"))] pub fn new_null() -> Spec { - load_bundled!("null") + load_bundled!("test/null") } /// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_constructor() -> Spec { - load_bundled!("constructor") + load_bundled!("test/constructor") } /// Create a new Spec with AuthorityRound consensus which does internal sealing (not @@ -1066,7 +1066,7 @@ impl Spec { /// Accounts with secrets keccak("0") and keccak("1") are the validators. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_round() -> Self { - load_bundled!("authority_round") + load_bundled!("test/authority_round") } /// Create a new Spec with AuthorityRound consensus which does internal sealing (not @@ -1074,7 +1074,7 @@ impl Spec { /// Accounts with secrets keccak("0") and keccak("1") are the validators. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_round_empty_steps() -> Self { - load_bundled!("authority_round_empty_steps") + load_bundled!("test/authority_round_empty_steps") } /// Create a new Spec with AuthorityRound consensus (with empty steps) using a block reward @@ -1082,7 +1082,7 @@ impl Spec { /// https://github.com/openethereum/block-reward/blob/daf7d44383b6cdb11cb6b953b018648e2b027cfb/contracts/ExampleBlockReward.sol #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_round_block_reward_contract() -> Self { - load_bundled!("authority_round_block_reward_contract") + load_bundled!("test/authority_round_block_reward_contract") } /// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files (link not valid) @@ -1095,7 +1095,7 @@ impl Spec { /// "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1". #[cfg(any(test, feature = "test-helpers"))] pub fn new_validator_safe_contract() -> Self { - load_bundled!("validator_safe_contract") + load_bundled!("test/validator_safe_contract") } /// The same as the `safeContract`, but allows reporting and uses AuthorityRound. @@ -1103,7 +1103,7 @@ impl Spec { /// Validator can be removed with `reportMalicious`. #[cfg(any(test, feature = "test-helpers"))] pub fn new_validator_contract() -> Self { - load_bundled!("validator_contract") + load_bundled!("test/validator_contract") } /// Create a new Spec with BasicAuthority which uses multiple validator sets changing with @@ -1112,7 +1112,7 @@ impl Spec { /// onwards. #[cfg(any(test, feature = "test-helpers"))] pub fn new_validator_multi() -> Self { - load_bundled!("validator_multi") + load_bundled!("test/validator_multi") } } diff --git a/ethcore/src/state/account.rs b/crates/ethcore/src/state/account.rs similarity index 100% rename from ethcore/src/state/account.rs rename to crates/ethcore/src/state/account.rs diff --git a/ethcore/src/state/backend.rs b/crates/ethcore/src/state/backend.rs similarity index 100% rename from ethcore/src/state/backend.rs rename to crates/ethcore/src/state/backend.rs diff --git a/ethcore/src/state/mod.rs b/crates/ethcore/src/state/mod.rs similarity index 100% rename from ethcore/src/state/mod.rs rename to crates/ethcore/src/state/mod.rs diff --git a/ethcore/src/state/substate.rs b/crates/ethcore/src/state/substate.rs similarity index 100% rename from ethcore/src/state/substate.rs rename to crates/ethcore/src/state/substate.rs diff --git a/ethcore/src/state_db.rs b/crates/ethcore/src/state_db.rs similarity index 100% rename from ethcore/src/state_db.rs rename to crates/ethcore/src/state_db.rs diff --git a/ethcore/src/test_helpers.rs b/crates/ethcore/src/test_helpers.rs similarity index 100% rename from ethcore/src/test_helpers.rs rename to crates/ethcore/src/test_helpers.rs diff --git a/ethcore/src/tests/blockchain.rs b/crates/ethcore/src/tests/blockchain.rs similarity index 100% rename from ethcore/src/tests/blockchain.rs rename to crates/ethcore/src/tests/blockchain.rs diff --git a/ethcore/src/tests/client.rs b/crates/ethcore/src/tests/client.rs similarity index 100% rename from ethcore/src/tests/client.rs rename to crates/ethcore/src/tests/client.rs diff --git a/ethcore/src/tests/evm.rs b/crates/ethcore/src/tests/evm.rs similarity index 100% rename from ethcore/src/tests/evm.rs rename to crates/ethcore/src/tests/evm.rs diff --git a/ethcore/src/tests/mod.rs b/crates/ethcore/src/tests/mod.rs similarity index 100% rename from ethcore/src/tests/mod.rs rename to crates/ethcore/src/tests/mod.rs diff --git a/ethcore/src/tests/trace.rs b/crates/ethcore/src/tests/trace.rs similarity index 100% rename from ethcore/src/tests/trace.rs rename to crates/ethcore/src/tests/trace.rs diff --git a/ethcore/src/trace/config.rs b/crates/ethcore/src/trace/config.rs similarity index 100% rename from ethcore/src/trace/config.rs rename to crates/ethcore/src/trace/config.rs diff --git a/ethcore/src/trace/db.rs b/crates/ethcore/src/trace/db.rs similarity index 100% rename from ethcore/src/trace/db.rs rename to crates/ethcore/src/trace/db.rs diff --git a/ethcore/src/trace/executive_tracer.rs b/crates/ethcore/src/trace/executive_tracer.rs similarity index 100% rename from ethcore/src/trace/executive_tracer.rs rename to crates/ethcore/src/trace/executive_tracer.rs diff --git a/ethcore/src/trace/import.rs b/crates/ethcore/src/trace/import.rs similarity index 100% rename from ethcore/src/trace/import.rs rename to crates/ethcore/src/trace/import.rs diff --git a/ethcore/src/trace/mod.rs b/crates/ethcore/src/trace/mod.rs similarity index 100% rename from ethcore/src/trace/mod.rs rename to crates/ethcore/src/trace/mod.rs diff --git a/ethcore/src/trace/noop_tracer.rs b/crates/ethcore/src/trace/noop_tracer.rs similarity index 100% rename from ethcore/src/trace/noop_tracer.rs rename to crates/ethcore/src/trace/noop_tracer.rs diff --git a/ethcore/src/trace/types/error.rs b/crates/ethcore/src/trace/types/error.rs similarity index 100% rename from ethcore/src/trace/types/error.rs rename to crates/ethcore/src/trace/types/error.rs diff --git a/ethcore/src/trace/types/filter.rs b/crates/ethcore/src/trace/types/filter.rs similarity index 100% rename from ethcore/src/trace/types/filter.rs rename to crates/ethcore/src/trace/types/filter.rs diff --git a/ethcore/src/trace/types/flat.rs b/crates/ethcore/src/trace/types/flat.rs similarity index 100% rename from ethcore/src/trace/types/flat.rs rename to crates/ethcore/src/trace/types/flat.rs diff --git a/ethcore/src/trace/types/localized.rs b/crates/ethcore/src/trace/types/localized.rs similarity index 100% rename from ethcore/src/trace/types/localized.rs rename to crates/ethcore/src/trace/types/localized.rs diff --git a/ethcore/src/trace/types/mod.rs b/crates/ethcore/src/trace/types/mod.rs similarity index 100% rename from ethcore/src/trace/types/mod.rs rename to crates/ethcore/src/trace/types/mod.rs diff --git a/ethcore/src/trace/types/trace.rs b/crates/ethcore/src/trace/types/trace.rs similarity index 100% rename from ethcore/src/trace/types/trace.rs rename to crates/ethcore/src/trace/types/trace.rs diff --git a/ethcore/src/transaction_ext.rs b/crates/ethcore/src/transaction_ext.rs similarity index 100% rename from ethcore/src/transaction_ext.rs rename to crates/ethcore/src/transaction_ext.rs diff --git a/ethcore/src/tx_filter.rs b/crates/ethcore/src/tx_filter.rs similarity index 98% rename from ethcore/src/tx_filter.rs rename to crates/ethcore/src/tx_filter.rs index 2a7da20e9..573fbbfbf 100644 --- a/ethcore/src/tx_filter.rs +++ b/crates/ethcore/src/tx_filter.rs @@ -186,7 +186,7 @@ mod test { /// Contract code: https://gist.github.com/VladLupashevskyi/84f18eabb1e4afadf572cf92af3e7e7f #[test] fn transaction_filter() { - let spec_data = include_str!("../res/tx_permission_tests/contract_ver_2_genesis.json"); + let spec_data = include_str!("../res/chainspec/test/contract_ver_2_genesis.json"); let db = test_helpers::new_db(); let tempdir = TempDir::new("").unwrap(); @@ -414,7 +414,7 @@ mod test { /// Contract code: https://gist.github.com/arkpar/38a87cb50165b7e683585eec71acb05a #[test] fn transaction_filter_deprecated() { - let spec_data = include_str!("../res/tx_permission_tests/deprecated_contract_genesis.json"); + let spec_data = include_str!("../res/chainspec/test/deprecated_contract_genesis.json"); let db = test_helpers::new_db(); let tempdir = TempDir::new("").unwrap(); diff --git a/ethcore/src/verification/canon_verifier.rs b/crates/ethcore/src/verification/canon_verifier.rs similarity index 100% rename from ethcore/src/verification/canon_verifier.rs rename to crates/ethcore/src/verification/canon_verifier.rs diff --git a/ethcore/src/verification/mod.rs b/crates/ethcore/src/verification/mod.rs similarity index 100% rename from ethcore/src/verification/mod.rs rename to crates/ethcore/src/verification/mod.rs diff --git a/ethcore/src/verification/noop_verifier.rs b/crates/ethcore/src/verification/noop_verifier.rs similarity index 100% rename from ethcore/src/verification/noop_verifier.rs rename to crates/ethcore/src/verification/noop_verifier.rs diff --git a/ethcore/src/verification/queue/kind.rs b/crates/ethcore/src/verification/queue/kind.rs similarity index 100% rename from ethcore/src/verification/queue/kind.rs rename to crates/ethcore/src/verification/queue/kind.rs diff --git a/ethcore/src/verification/queue/mod.rs b/crates/ethcore/src/verification/queue/mod.rs similarity index 100% rename from ethcore/src/verification/queue/mod.rs rename to crates/ethcore/src/verification/queue/mod.rs diff --git a/ethcore/src/verification/verification.rs b/crates/ethcore/src/verification/verification.rs similarity index 100% rename from ethcore/src/verification/verification.rs rename to crates/ethcore/src/verification/verification.rs diff --git a/ethcore/src/verification/verifier.rs b/crates/ethcore/src/verification/verifier.rs similarity index 100% rename from ethcore/src/verification/verifier.rs rename to crates/ethcore/src/verification/verifier.rs diff --git a/ethcore/sync/Cargo.toml b/crates/ethcore/sync/Cargo.toml similarity index 88% rename from ethcore/sync/Cargo.toml rename to crates/ethcore/sync/Cargo.toml index 4ea7ba1ae..590eab782 100644 --- a/ethcore/sync/Cargo.toml +++ b/crates/ethcore/sync/Cargo.toml @@ -12,9 +12,9 @@ common-types = { path = "../types" } enum_primitive = "0.1.1" derive_more = "0.99" ethcore = { path = ".." } -ethcore-io = { path = "../../util/io" } -ethcore-network = { path = "../../util/network" } -ethcore-network-devp2p = { path = "../../util/network-devp2p" } +ethcore-io = { path = "../../runtime/io" } +ethcore-network = { path = "../../net/network" } +ethcore-network-devp2p = { path = "../../net/network-devp2p" } ethereum-forkid = "0.2" primitive_types07 = { package = "primitive-types", version = "0.7"} ethereum-types = "0.4" @@ -41,6 +41,6 @@ stats = { path = "../../util/stats" } [dev-dependencies] env_logger = "0.5" ethcore = { path = "..", features = ["test-helpers"] } -ethcore-io = { path = "../../util/io", features = ["mio"] } +ethcore-io = { path = "../../runtime/io", features = ["mio"] } kvdb-memorydb = "0.1" rustc-hex = "1.0" diff --git a/ethcore/sync/src/api.rs b/crates/ethcore/sync/src/api.rs similarity index 100% rename from ethcore/sync/src/api.rs rename to crates/ethcore/sync/src/api.rs diff --git a/ethcore/sync/src/block_sync.rs b/crates/ethcore/sync/src/block_sync.rs similarity index 100% rename from ethcore/sync/src/block_sync.rs rename to crates/ethcore/sync/src/block_sync.rs diff --git a/ethcore/sync/src/blocks.rs b/crates/ethcore/sync/src/blocks.rs similarity index 100% rename from ethcore/sync/src/blocks.rs rename to crates/ethcore/sync/src/blocks.rs diff --git a/ethcore/sync/src/chain/fork_filter.rs b/crates/ethcore/sync/src/chain/fork_filter.rs similarity index 100% rename from ethcore/sync/src/chain/fork_filter.rs rename to crates/ethcore/sync/src/chain/fork_filter.rs diff --git a/ethcore/sync/src/chain/handler.rs b/crates/ethcore/sync/src/chain/handler.rs similarity index 100% rename from ethcore/sync/src/chain/handler.rs rename to crates/ethcore/sync/src/chain/handler.rs diff --git a/ethcore/sync/src/chain/mod.rs b/crates/ethcore/sync/src/chain/mod.rs similarity index 100% rename from ethcore/sync/src/chain/mod.rs rename to crates/ethcore/sync/src/chain/mod.rs diff --git a/ethcore/sync/src/chain/propagator.rs b/crates/ethcore/sync/src/chain/propagator.rs similarity index 100% rename from ethcore/sync/src/chain/propagator.rs rename to crates/ethcore/sync/src/chain/propagator.rs diff --git a/ethcore/sync/src/chain/requester.rs b/crates/ethcore/sync/src/chain/requester.rs similarity index 100% rename from ethcore/sync/src/chain/requester.rs rename to crates/ethcore/sync/src/chain/requester.rs diff --git a/ethcore/sync/src/chain/supplier.rs b/crates/ethcore/sync/src/chain/supplier.rs similarity index 100% rename from ethcore/sync/src/chain/supplier.rs rename to crates/ethcore/sync/src/chain/supplier.rs diff --git a/ethcore/sync/src/chain/sync_packet.rs b/crates/ethcore/sync/src/chain/sync_packet.rs similarity index 100% rename from ethcore/sync/src/chain/sync_packet.rs rename to crates/ethcore/sync/src/chain/sync_packet.rs diff --git a/ethcore/sync/src/lib.rs b/crates/ethcore/sync/src/lib.rs similarity index 100% rename from ethcore/sync/src/lib.rs rename to crates/ethcore/sync/src/lib.rs diff --git a/ethcore/sync/src/res/private_spec.json b/crates/ethcore/sync/src/res/private_spec.json similarity index 100% rename from ethcore/sync/src/res/private_spec.json rename to crates/ethcore/sync/src/res/private_spec.json diff --git a/ethcore/sync/src/snapshot.rs b/crates/ethcore/sync/src/snapshot.rs similarity index 100% rename from ethcore/sync/src/snapshot.rs rename to crates/ethcore/sync/src/snapshot.rs diff --git a/ethcore/sync/src/sync_io.rs b/crates/ethcore/sync/src/sync_io.rs similarity index 100% rename from ethcore/sync/src/sync_io.rs rename to crates/ethcore/sync/src/sync_io.rs diff --git a/ethcore/sync/src/tests/chain.rs b/crates/ethcore/sync/src/tests/chain.rs similarity index 100% rename from ethcore/sync/src/tests/chain.rs rename to crates/ethcore/sync/src/tests/chain.rs diff --git a/ethcore/sync/src/tests/consensus.rs b/crates/ethcore/sync/src/tests/consensus.rs similarity index 100% rename from ethcore/sync/src/tests/consensus.rs rename to crates/ethcore/sync/src/tests/consensus.rs diff --git a/ethcore/sync/src/tests/helpers.rs b/crates/ethcore/sync/src/tests/helpers.rs similarity index 100% rename from ethcore/sync/src/tests/helpers.rs rename to crates/ethcore/sync/src/tests/helpers.rs diff --git a/ethcore/sync/src/tests/mod.rs b/crates/ethcore/sync/src/tests/mod.rs similarity index 100% rename from ethcore/sync/src/tests/mod.rs rename to crates/ethcore/sync/src/tests/mod.rs diff --git a/ethcore/sync/src/tests/rpc.rs b/crates/ethcore/sync/src/tests/rpc.rs similarity index 100% rename from ethcore/sync/src/tests/rpc.rs rename to crates/ethcore/sync/src/tests/rpc.rs diff --git a/ethcore/sync/src/tests/snapshot.rs b/crates/ethcore/sync/src/tests/snapshot.rs similarity index 100% rename from ethcore/sync/src/tests/snapshot.rs rename to crates/ethcore/sync/src/tests/snapshot.rs diff --git a/ethcore/sync/src/transactions_stats.rs b/crates/ethcore/sync/src/transactions_stats.rs similarity index 100% rename from ethcore/sync/src/transactions_stats.rs rename to crates/ethcore/sync/src/transactions_stats.rs diff --git a/ethcore/types/Cargo.toml b/crates/ethcore/types/Cargo.toml similarity index 93% rename from ethcore/types/Cargo.toml rename to crates/ethcore/types/Cargo.toml index 5fa288cc9..d6a18bcd1 100644 --- a/ethcore/types/Cargo.toml +++ b/crates/ethcore/types/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Parity Technologies "] [dependencies] ethereum-types = "0.4" -ethjson = { path = "../../json" } +ethjson = { path = "../../ethjson" } ethkey = { path = "../../accounts/ethkey" } heapsize = "0.4" keccak-hash = "0.1" diff --git a/ethcore/types/src/account_diff.rs b/crates/ethcore/types/src/account_diff.rs similarity index 100% rename from ethcore/types/src/account_diff.rs rename to crates/ethcore/types/src/account_diff.rs diff --git a/ethcore/types/src/ancestry_action.rs b/crates/ethcore/types/src/ancestry_action.rs similarity index 100% rename from ethcore/types/src/ancestry_action.rs rename to crates/ethcore/types/src/ancestry_action.rs diff --git a/ethcore/types/src/basic_account.rs b/crates/ethcore/types/src/basic_account.rs similarity index 100% rename from ethcore/types/src/basic_account.rs rename to crates/ethcore/types/src/basic_account.rs diff --git a/ethcore/types/src/block.rs b/crates/ethcore/types/src/block.rs similarity index 100% rename from ethcore/types/src/block.rs rename to crates/ethcore/types/src/block.rs diff --git a/ethcore/types/src/block_status.rs b/crates/ethcore/types/src/block_status.rs similarity index 100% rename from ethcore/types/src/block_status.rs rename to crates/ethcore/types/src/block_status.rs diff --git a/ethcore/types/src/blockchain_info.rs b/crates/ethcore/types/src/blockchain_info.rs similarity index 100% rename from ethcore/types/src/blockchain_info.rs rename to crates/ethcore/types/src/blockchain_info.rs diff --git a/ethcore/types/src/call_analytics.rs b/crates/ethcore/types/src/call_analytics.rs similarity index 100% rename from ethcore/types/src/call_analytics.rs rename to crates/ethcore/types/src/call_analytics.rs diff --git a/ethcore/types/src/creation_status.rs b/crates/ethcore/types/src/creation_status.rs similarity index 100% rename from ethcore/types/src/creation_status.rs rename to crates/ethcore/types/src/creation_status.rs diff --git a/ethcore/types/src/data_format.rs b/crates/ethcore/types/src/data_format.rs similarity index 100% rename from ethcore/types/src/data_format.rs rename to crates/ethcore/types/src/data_format.rs diff --git a/ethcore/types/src/encoded.rs b/crates/ethcore/types/src/encoded.rs similarity index 100% rename from ethcore/types/src/encoded.rs rename to crates/ethcore/types/src/encoded.rs diff --git a/ethcore/types/src/engines/epoch.rs b/crates/ethcore/types/src/engines/epoch.rs similarity index 100% rename from ethcore/types/src/engines/epoch.rs rename to crates/ethcore/types/src/engines/epoch.rs diff --git a/ethcore/types/src/engines/mod.rs b/crates/ethcore/types/src/engines/mod.rs similarity index 100% rename from ethcore/types/src/engines/mod.rs rename to crates/ethcore/types/src/engines/mod.rs diff --git a/ethcore/types/src/filter.rs b/crates/ethcore/types/src/filter.rs similarity index 100% rename from ethcore/types/src/filter.rs rename to crates/ethcore/types/src/filter.rs diff --git a/ethcore/types/src/header.rs b/crates/ethcore/types/src/header.rs similarity index 100% rename from ethcore/types/src/header.rs rename to crates/ethcore/types/src/header.rs diff --git a/ethcore/types/src/ids.rs b/crates/ethcore/types/src/ids.rs similarity index 100% rename from ethcore/types/src/ids.rs rename to crates/ethcore/types/src/ids.rs diff --git a/ethcore/types/src/lib.rs b/crates/ethcore/types/src/lib.rs similarity index 100% rename from ethcore/types/src/lib.rs rename to crates/ethcore/types/src/lib.rs diff --git a/ethcore/types/src/log_entry.rs b/crates/ethcore/types/src/log_entry.rs similarity index 100% rename from ethcore/types/src/log_entry.rs rename to crates/ethcore/types/src/log_entry.rs diff --git a/ethcore/types/src/pruning_info.rs b/crates/ethcore/types/src/pruning_info.rs similarity index 100% rename from ethcore/types/src/pruning_info.rs rename to crates/ethcore/types/src/pruning_info.rs diff --git a/ethcore/types/src/receipt.rs b/crates/ethcore/types/src/receipt.rs similarity index 100% rename from ethcore/types/src/receipt.rs rename to crates/ethcore/types/src/receipt.rs diff --git a/ethcore/types/src/restoration_status.rs b/crates/ethcore/types/src/restoration_status.rs similarity index 100% rename from ethcore/types/src/restoration_status.rs rename to crates/ethcore/types/src/restoration_status.rs diff --git a/ethcore/types/src/security_level.rs b/crates/ethcore/types/src/security_level.rs similarity index 100% rename from ethcore/types/src/security_level.rs rename to crates/ethcore/types/src/security_level.rs diff --git a/ethcore/types/src/snapshot_manifest.rs b/crates/ethcore/types/src/snapshot_manifest.rs similarity index 100% rename from ethcore/types/src/snapshot_manifest.rs rename to crates/ethcore/types/src/snapshot_manifest.rs diff --git a/ethcore/types/src/state_diff.rs b/crates/ethcore/types/src/state_diff.rs similarity index 100% rename from ethcore/types/src/state_diff.rs rename to crates/ethcore/types/src/state_diff.rs diff --git a/ethcore/types/src/trace_filter.rs b/crates/ethcore/types/src/trace_filter.rs similarity index 100% rename from ethcore/types/src/trace_filter.rs rename to crates/ethcore/types/src/trace_filter.rs diff --git a/ethcore/types/src/transaction/error.rs b/crates/ethcore/types/src/transaction/error.rs similarity index 100% rename from ethcore/types/src/transaction/error.rs rename to crates/ethcore/types/src/transaction/error.rs diff --git a/ethcore/types/src/transaction/mod.rs b/crates/ethcore/types/src/transaction/mod.rs similarity index 100% rename from ethcore/types/src/transaction/mod.rs rename to crates/ethcore/types/src/transaction/mod.rs diff --git a/ethcore/types/src/transaction/transaction.rs b/crates/ethcore/types/src/transaction/transaction.rs similarity index 100% rename from ethcore/types/src/transaction/transaction.rs rename to crates/ethcore/types/src/transaction/transaction.rs diff --git a/ethcore/types/src/transaction/transaction_id.rs b/crates/ethcore/types/src/transaction/transaction_id.rs similarity index 100% rename from ethcore/types/src/transaction/transaction_id.rs rename to crates/ethcore/types/src/transaction/transaction_id.rs diff --git a/ethcore/types/src/tree_route.rs b/crates/ethcore/types/src/tree_route.rs similarity index 100% rename from ethcore/types/src/tree_route.rs rename to crates/ethcore/types/src/tree_route.rs diff --git a/ethcore/types/src/verification_queue_info.rs b/crates/ethcore/types/src/verification_queue_info.rs similarity index 100% rename from ethcore/types/src/verification_queue_info.rs rename to crates/ethcore/types/src/verification_queue_info.rs diff --git a/ethcore/types/src/views/block.rs b/crates/ethcore/types/src/views/block.rs similarity index 100% rename from ethcore/types/src/views/block.rs rename to crates/ethcore/types/src/views/block.rs diff --git a/ethcore/types/src/views/body.rs b/crates/ethcore/types/src/views/body.rs similarity index 100% rename from ethcore/types/src/views/body.rs rename to crates/ethcore/types/src/views/body.rs diff --git a/ethcore/types/src/views/header.rs b/crates/ethcore/types/src/views/header.rs similarity index 100% rename from ethcore/types/src/views/header.rs rename to crates/ethcore/types/src/views/header.rs diff --git a/ethcore/types/src/views/mod.rs b/crates/ethcore/types/src/views/mod.rs similarity index 100% rename from ethcore/types/src/views/mod.rs rename to crates/ethcore/types/src/views/mod.rs diff --git a/ethcore/types/src/views/transaction.rs b/crates/ethcore/types/src/views/transaction.rs similarity index 100% rename from ethcore/types/src/views/transaction.rs rename to crates/ethcore/types/src/views/transaction.rs diff --git a/ethcore/types/src/views/view_rlp.rs b/crates/ethcore/types/src/views/view_rlp.rs similarity index 100% rename from ethcore/types/src/views/view_rlp.rs rename to crates/ethcore/types/src/views/view_rlp.rs diff --git a/json/Cargo.toml b/crates/ethjson/Cargo.toml similarity index 100% rename from json/Cargo.toml rename to crates/ethjson/Cargo.toml diff --git a/json/src/blockchain/account.rs b/crates/ethjson/src/blockchain/account.rs similarity index 100% rename from json/src/blockchain/account.rs rename to crates/ethjson/src/blockchain/account.rs diff --git a/json/src/blockchain/block.rs b/crates/ethjson/src/blockchain/block.rs similarity index 100% rename from json/src/blockchain/block.rs rename to crates/ethjson/src/blockchain/block.rs diff --git a/json/src/blockchain/blockchain.rs b/crates/ethjson/src/blockchain/blockchain.rs similarity index 100% rename from json/src/blockchain/blockchain.rs rename to crates/ethjson/src/blockchain/blockchain.rs diff --git a/json/src/blockchain/header.rs b/crates/ethjson/src/blockchain/header.rs similarity index 100% rename from json/src/blockchain/header.rs rename to crates/ethjson/src/blockchain/header.rs diff --git a/json/src/blockchain/mod.rs b/crates/ethjson/src/blockchain/mod.rs similarity index 100% rename from json/src/blockchain/mod.rs rename to crates/ethjson/src/blockchain/mod.rs diff --git a/json/src/blockchain/state.rs b/crates/ethjson/src/blockchain/state.rs similarity index 99% rename from json/src/blockchain/state.rs rename to crates/ethjson/src/blockchain/state.rs index f2ebb04b3..4447f9988 100644 --- a/json/src/blockchain/state.rs +++ b/crates/ethjson/src/blockchain/state.rs @@ -21,7 +21,6 @@ use crate::{ hash::{Address, H256}, spec::{Account, Builtin}, }; -use serde::Deserialize; use std::collections::BTreeMap; #[derive(Clone, Debug, PartialEq, Deserialize)] diff --git a/json/src/blockchain/test.rs b/crates/ethjson/src/blockchain/test.rs similarity index 100% rename from json/src/blockchain/test.rs rename to crates/ethjson/src/blockchain/test.rs diff --git a/json/src/blockchain/transaction.rs b/crates/ethjson/src/blockchain/transaction.rs similarity index 100% rename from json/src/blockchain/transaction.rs rename to crates/ethjson/src/blockchain/transaction.rs diff --git a/json/src/bytes.rs b/crates/ethjson/src/bytes.rs similarity index 100% rename from json/src/bytes.rs rename to crates/ethjson/src/bytes.rs diff --git a/json/src/hash.rs b/crates/ethjson/src/hash.rs similarity index 100% rename from json/src/hash.rs rename to crates/ethjson/src/hash.rs diff --git a/json/src/lib.rs b/crates/ethjson/src/lib.rs similarity index 100% rename from json/src/lib.rs rename to crates/ethjson/src/lib.rs diff --git a/json/src/maybe.rs b/crates/ethjson/src/maybe.rs similarity index 100% rename from json/src/maybe.rs rename to crates/ethjson/src/maybe.rs diff --git a/json/src/spec/account.rs b/crates/ethjson/src/spec/account.rs similarity index 100% rename from json/src/spec/account.rs rename to crates/ethjson/src/spec/account.rs diff --git a/json/src/spec/authority_round.rs b/crates/ethjson/src/spec/authority_round.rs similarity index 100% rename from json/src/spec/authority_round.rs rename to crates/ethjson/src/spec/authority_round.rs diff --git a/json/src/spec/basic_authority.rs b/crates/ethjson/src/spec/basic_authority.rs similarity index 100% rename from json/src/spec/basic_authority.rs rename to crates/ethjson/src/spec/basic_authority.rs diff --git a/json/src/spec/builtin.rs b/crates/ethjson/src/spec/builtin.rs similarity index 100% rename from json/src/spec/builtin.rs rename to crates/ethjson/src/spec/builtin.rs diff --git a/json/src/spec/clique.rs b/crates/ethjson/src/spec/clique.rs similarity index 100% rename from json/src/spec/clique.rs rename to crates/ethjson/src/spec/clique.rs diff --git a/json/src/spec/engine.rs b/crates/ethjson/src/spec/engine.rs similarity index 100% rename from json/src/spec/engine.rs rename to crates/ethjson/src/spec/engine.rs diff --git a/json/src/spec/ethash.rs b/crates/ethjson/src/spec/ethash.rs similarity index 100% rename from json/src/spec/ethash.rs rename to crates/ethjson/src/spec/ethash.rs diff --git a/json/src/spec/genesis.rs b/crates/ethjson/src/spec/genesis.rs similarity index 100% rename from json/src/spec/genesis.rs rename to crates/ethjson/src/spec/genesis.rs diff --git a/json/src/spec/instant_seal.rs b/crates/ethjson/src/spec/instant_seal.rs similarity index 100% rename from json/src/spec/instant_seal.rs rename to crates/ethjson/src/spec/instant_seal.rs diff --git a/json/src/spec/mod.rs b/crates/ethjson/src/spec/mod.rs similarity index 100% rename from json/src/spec/mod.rs rename to crates/ethjson/src/spec/mod.rs diff --git a/json/src/spec/null_engine.rs b/crates/ethjson/src/spec/null_engine.rs similarity index 100% rename from json/src/spec/null_engine.rs rename to crates/ethjson/src/spec/null_engine.rs diff --git a/json/src/spec/params.rs b/crates/ethjson/src/spec/params.rs similarity index 100% rename from json/src/spec/params.rs rename to crates/ethjson/src/spec/params.rs diff --git a/json/src/spec/seal.rs b/crates/ethjson/src/spec/seal.rs similarity index 100% rename from json/src/spec/seal.rs rename to crates/ethjson/src/spec/seal.rs diff --git a/json/src/spec/spec.rs b/crates/ethjson/src/spec/spec.rs similarity index 100% rename from json/src/spec/spec.rs rename to crates/ethjson/src/spec/spec.rs diff --git a/json/src/spec/state.rs b/crates/ethjson/src/spec/state.rs similarity index 100% rename from json/src/spec/state.rs rename to crates/ethjson/src/spec/state.rs diff --git a/json/src/spec/validator_set.rs b/crates/ethjson/src/spec/validator_set.rs similarity index 100% rename from json/src/spec/validator_set.rs rename to crates/ethjson/src/spec/validator_set.rs diff --git a/json/src/state/log.rs b/crates/ethjson/src/state/log.rs similarity index 100% rename from json/src/state/log.rs rename to crates/ethjson/src/state/log.rs diff --git a/json/src/state/mod.rs b/crates/ethjson/src/state/mod.rs similarity index 100% rename from json/src/state/mod.rs rename to crates/ethjson/src/state/mod.rs diff --git a/json/src/state/state.rs b/crates/ethjson/src/state/state.rs similarity index 100% rename from json/src/state/state.rs rename to crates/ethjson/src/state/state.rs diff --git a/json/src/state/test.rs b/crates/ethjson/src/state/test.rs similarity index 100% rename from json/src/state/test.rs rename to crates/ethjson/src/state/test.rs diff --git a/json/src/state/transaction.rs b/crates/ethjson/src/state/transaction.rs similarity index 100% rename from json/src/state/transaction.rs rename to crates/ethjson/src/state/transaction.rs diff --git a/json/src/test/mod.rs b/crates/ethjson/src/test/mod.rs similarity index 100% rename from json/src/test/mod.rs rename to crates/ethjson/src/test/mod.rs diff --git a/json/src/transaction/mod.rs b/crates/ethjson/src/transaction/mod.rs similarity index 100% rename from json/src/transaction/mod.rs rename to crates/ethjson/src/transaction/mod.rs diff --git a/json/src/transaction/test.rs b/crates/ethjson/src/transaction/test.rs similarity index 100% rename from json/src/transaction/test.rs rename to crates/ethjson/src/transaction/test.rs diff --git a/json/src/transaction/transaction.rs b/crates/ethjson/src/transaction/transaction.rs similarity index 100% rename from json/src/transaction/transaction.rs rename to crates/ethjson/src/transaction/transaction.rs diff --git a/json/src/transaction/txtest.rs b/crates/ethjson/src/transaction/txtest.rs similarity index 100% rename from json/src/transaction/txtest.rs rename to crates/ethjson/src/transaction/txtest.rs diff --git a/json/src/trie/input.rs b/crates/ethjson/src/trie/input.rs similarity index 100% rename from json/src/trie/input.rs rename to crates/ethjson/src/trie/input.rs diff --git a/json/src/trie/mod.rs b/crates/ethjson/src/trie/mod.rs similarity index 100% rename from json/src/trie/mod.rs rename to crates/ethjson/src/trie/mod.rs diff --git a/json/src/trie/test.rs b/crates/ethjson/src/trie/test.rs similarity index 100% rename from json/src/trie/test.rs rename to crates/ethjson/src/trie/test.rs diff --git a/json/src/trie/trie.rs b/crates/ethjson/src/trie/trie.rs similarity index 100% rename from json/src/trie/trie.rs rename to crates/ethjson/src/trie/trie.rs diff --git a/json/src/uint.rs b/crates/ethjson/src/uint.rs similarity index 100% rename from json/src/uint.rs rename to crates/ethjson/src/uint.rs diff --git a/json/src/vm/call.rs b/crates/ethjson/src/vm/call.rs similarity index 100% rename from json/src/vm/call.rs rename to crates/ethjson/src/vm/call.rs diff --git a/json/src/vm/env.rs b/crates/ethjson/src/vm/env.rs similarity index 100% rename from json/src/vm/env.rs rename to crates/ethjson/src/vm/env.rs diff --git a/json/src/vm/mod.rs b/crates/ethjson/src/vm/mod.rs similarity index 100% rename from json/src/vm/mod.rs rename to crates/ethjson/src/vm/mod.rs diff --git a/json/src/vm/test.rs b/crates/ethjson/src/vm/test.rs similarity index 100% rename from json/src/vm/test.rs rename to crates/ethjson/src/vm/test.rs diff --git a/json/src/vm/transaction.rs b/crates/ethjson/src/vm/transaction.rs similarity index 100% rename from json/src/vm/transaction.rs rename to crates/ethjson/src/vm/transaction.rs diff --git a/json/src/vm/vm.rs b/crates/ethjson/src/vm/vm.rs similarity index 100% rename from json/src/vm/vm.rs rename to crates/ethjson/src/vm/vm.rs diff --git a/util/fake-fetch/Cargo.toml b/crates/net/fake-fetch/Cargo.toml similarity index 100% rename from util/fake-fetch/Cargo.toml rename to crates/net/fake-fetch/Cargo.toml diff --git a/util/fake-fetch/src/lib.rs b/crates/net/fake-fetch/src/lib.rs similarity index 100% rename from util/fake-fetch/src/lib.rs rename to crates/net/fake-fetch/src/lib.rs diff --git a/util/fetch/Cargo.toml b/crates/net/fetch/Cargo.toml similarity index 100% rename from util/fetch/Cargo.toml rename to crates/net/fetch/Cargo.toml diff --git a/util/fetch/src/client.rs b/crates/net/fetch/src/client.rs similarity index 100% rename from util/fetch/src/client.rs rename to crates/net/fetch/src/client.rs diff --git a/util/fetch/src/lib.rs b/crates/net/fetch/src/lib.rs similarity index 100% rename from util/fetch/src/lib.rs rename to crates/net/fetch/src/lib.rs diff --git a/util/network-devp2p/Cargo.toml b/crates/net/network-devp2p/Cargo.toml similarity index 89% rename from util/network-devp2p/Cargo.toml rename to crates/net/network-devp2p/Cargo.toml index 5c1fecdea..2b3873945 100644 --- a/util/network-devp2p/Cargo.toml +++ b/crates/net/network-devp2p/Cargo.toml @@ -19,12 +19,12 @@ libc = "0.2.7" parking_lot = "0.7" ansi_term = "0.10" rustc-hex = "1.0" -ethcore-io = { path = "../io", features = ["mio"] } +ethcore-io = { path = "../../runtime/io", features = ["mio"] } parity-bytes = "0.1" parity-crypto = "0.3.0" ethcore-network = { path = "../network" } ethereum-types = "0.4" -ethkey = { path = "../../accounts/ethkey" } +ethkey = { path = "../../../crates/accounts/ethkey" } rlp = { version = "0.3.0", features = ["ethereum"] } parity-path = "0.1" ipnetwork = "0.12.6" diff --git a/util/network-devp2p/src/connection.rs b/crates/net/network-devp2p/src/connection.rs similarity index 100% rename from util/network-devp2p/src/connection.rs rename to crates/net/network-devp2p/src/connection.rs diff --git a/util/network-devp2p/src/discovery.rs b/crates/net/network-devp2p/src/discovery.rs similarity index 100% rename from util/network-devp2p/src/discovery.rs rename to crates/net/network-devp2p/src/discovery.rs diff --git a/util/network-devp2p/src/handshake.rs b/crates/net/network-devp2p/src/handshake.rs similarity index 100% rename from util/network-devp2p/src/handshake.rs rename to crates/net/network-devp2p/src/handshake.rs diff --git a/util/network-devp2p/src/host.rs b/crates/net/network-devp2p/src/host.rs similarity index 100% rename from util/network-devp2p/src/host.rs rename to crates/net/network-devp2p/src/host.rs diff --git a/util/network-devp2p/src/ip_utils.rs b/crates/net/network-devp2p/src/ip_utils.rs similarity index 100% rename from util/network-devp2p/src/ip_utils.rs rename to crates/net/network-devp2p/src/ip_utils.rs diff --git a/util/network-devp2p/src/lib.rs b/crates/net/network-devp2p/src/lib.rs similarity index 100% rename from util/network-devp2p/src/lib.rs rename to crates/net/network-devp2p/src/lib.rs diff --git a/util/network-devp2p/src/node_table.rs b/crates/net/network-devp2p/src/node_table.rs similarity index 100% rename from util/network-devp2p/src/node_table.rs rename to crates/net/network-devp2p/src/node_table.rs diff --git a/util/network-devp2p/src/service.rs b/crates/net/network-devp2p/src/service.rs similarity index 100% rename from util/network-devp2p/src/service.rs rename to crates/net/network-devp2p/src/service.rs diff --git a/util/network-devp2p/src/session.rs b/crates/net/network-devp2p/src/session.rs similarity index 100% rename from util/network-devp2p/src/session.rs rename to crates/net/network-devp2p/src/session.rs diff --git a/util/network-devp2p/tests/tests.rs b/crates/net/network-devp2p/tests/tests.rs similarity index 100% rename from util/network-devp2p/tests/tests.rs rename to crates/net/network-devp2p/tests/tests.rs diff --git a/util/network/Cargo.toml b/crates/net/network/Cargo.toml similarity index 86% rename from util/network/Cargo.toml rename to crates/net/network/Cargo.toml index 42d004094..30827dee8 100644 --- a/util/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -9,9 +9,9 @@ authors = ["Parity Technologies "] [dependencies] error-chain = { version = "0.12", default-features = false } parity-crypto = "0.3.0" -ethcore-io = { path = "../io" } +ethcore-io = { path = "../../runtime/io" } ethereum-types = "0.4" -ethkey = { path = "../../accounts/ethkey" } +ethkey = { path = "../../../crates/accounts/ethkey" } ipnetwork = "0.12.6" lazy_static = "1.0" rlp = { version = "0.3.0", features = ["ethereum"] } diff --git a/util/network/src/client_version.rs b/crates/net/network/src/client_version.rs similarity index 100% rename from util/network/src/client_version.rs rename to crates/net/network/src/client_version.rs diff --git a/util/network/src/connection_filter.rs b/crates/net/network/src/connection_filter.rs similarity index 100% rename from util/network/src/connection_filter.rs rename to crates/net/network/src/connection_filter.rs diff --git a/util/network/src/error.rs b/crates/net/network/src/error.rs similarity index 100% rename from util/network/src/error.rs rename to crates/net/network/src/error.rs diff --git a/util/network/src/lib.rs b/crates/net/network/src/lib.rs similarity index 100% rename from util/network/src/lib.rs rename to crates/net/network/src/lib.rs diff --git a/ethcore/node-filter/Cargo.toml b/crates/net/node-filter/Cargo.toml similarity index 64% rename from ethcore/node-filter/Cargo.toml rename to crates/net/node-filter/Cargo.toml index 26b343af6..ca812ca19 100644 --- a/ethcore/node-filter/Cargo.toml +++ b/crates/net/node-filter/Cargo.toml @@ -7,9 +7,9 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -ethcore = { path = ".."} -ethcore-network = { path = "../../util/network" } -ethcore-network-devp2p = { path = "../../util/network-devp2p" } +ethcore = { path = "../../ethcore"} +ethcore-network = { path = "../network" } +ethcore-network-devp2p = { path = "../network-devp2p" } ethereum-types = "0.4" log = "0.4" parking_lot = "0.7" @@ -19,7 +19,7 @@ ethabi-contract = "6.0" lru-cache = "0.1" [dev-dependencies] -ethcore = { path = "..", features = ["test-helpers"] } +ethcore = { path = "../../ethcore", features = ["test-helpers"] } kvdb-memorydb = "0.1" -ethcore-io = { path = "../../util/io" } -tempdir = "0.3" +ethcore-io = { path = "../../runtime/io" } +tempdir = "0.3" \ No newline at end of file diff --git a/ethcore/node-filter/res/node_filter.json b/crates/net/node-filter/res/node_filter.json similarity index 100% rename from ethcore/node-filter/res/node_filter.json rename to crates/net/node-filter/res/node_filter.json diff --git a/ethcore/node-filter/res/peer_set.json b/crates/net/node-filter/res/peer_set.json similarity index 100% rename from ethcore/node-filter/res/peer_set.json rename to crates/net/node-filter/res/peer_set.json diff --git a/ethcore/node-filter/src/lib.rs b/crates/net/node-filter/src/lib.rs similarity index 100% rename from ethcore/node-filter/src/lib.rs rename to crates/net/node-filter/src/lib.rs diff --git a/rpc/Cargo.toml b/crates/rpc/Cargo.toml similarity index 75% rename from rpc/Cargo.toml rename to crates/rpc/Cargo.toml index ae4215f01..4b2d9bdef 100644 --- a/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -32,35 +32,35 @@ jsonrpc-ipc-server = "15.0.0" jsonrpc-pubsub = "15.0.0" common-types = { path = "../ethcore/types" } -ethash = { path = "../ethash" } +ethash = { path = "../concensus/ethash" } ethcore = { path = "../ethcore" } ethcore-accounts = { path = "../accounts", optional = true } -ethcore-logger = { path = "../parity/logger" } -ethcore-miner = { path = "../miner" } -ethcore-network = { path = "../util/network" } +ethcore-logger = { path = "../../bin/oe/logger" } +ethcore-miner = { path = "../concensus/miner" } +ethcore-network = { path = "../net/network" } ethcore-sync = { path = "../ethcore/sync" } ethereum-types = "0.4" parity-bytes = "0.1" parity-crypto = "0.3.0" eip-712 = { path = "../util/EIP-712" } -ethjson = { path = "../json" } +ethjson = { path = "../ethjson" } ethkey = { path = "../accounts/ethkey" } ethstore = { path = "../accounts/ethstore" } -fetch = { path = "../util/fetch" } +fetch = { path = "../net/fetch" } keccak-hash = "0.1.2" -parity-runtime = { path = "../util/runtime" } +parity-runtime = { path = "../runtime/runtime" } parity-version = { path = "../util/version" } rlp = { version = "0.3.0", features = ["ethereum"] } stats = { path = "../util/stats" } -vm = { path = "../ethcore/vm" } +vm = { path = "../vm/vm" } [dev-dependencies] ethcore = { path = "../ethcore", features = ["test-helpers"] } ethcore-accounts = { path = "../accounts" } -ethcore-io = { path = "../util/io" } -ethcore-network = { path = "../util/network" } -fake-fetch = { path = "../util/fake-fetch" } +ethcore-io = { path = "../runtime/io" } +ethcore-network = { path = "../net/network" } +fake-fetch = { path = "../net/fake-fetch" } macros = { path = "../util/macros" } pretty_assertions = "0.1" transaction-pool = "2.0.1" diff --git a/rpc/src/authcodes.rs b/crates/rpc/src/authcodes.rs similarity index 100% rename from rpc/src/authcodes.rs rename to crates/rpc/src/authcodes.rs diff --git a/rpc/src/http_common.rs b/crates/rpc/src/http_common.rs similarity index 100% rename from rpc/src/http_common.rs rename to crates/rpc/src/http_common.rs diff --git a/rpc/src/lib.rs b/crates/rpc/src/lib.rs similarity index 100% rename from rpc/src/lib.rs rename to crates/rpc/src/lib.rs diff --git a/rpc/src/tests/helpers.rs b/crates/rpc/src/tests/helpers.rs similarity index 100% rename from rpc/src/tests/helpers.rs rename to crates/rpc/src/tests/helpers.rs diff --git a/rpc/src/tests/http_client.rs b/crates/rpc/src/tests/http_client.rs similarity index 100% rename from rpc/src/tests/http_client.rs rename to crates/rpc/src/tests/http_client.rs diff --git a/rpc/src/tests/mod.rs b/crates/rpc/src/tests/mod.rs similarity index 100% rename from rpc/src/tests/mod.rs rename to crates/rpc/src/tests/mod.rs diff --git a/rpc/src/tests/rpc.rs b/crates/rpc/src/tests/rpc.rs similarity index 100% rename from rpc/src/tests/rpc.rs rename to crates/rpc/src/tests/rpc.rs diff --git a/rpc/src/tests/ws.rs b/crates/rpc/src/tests/ws.rs similarity index 100% rename from rpc/src/tests/ws.rs rename to crates/rpc/src/tests/ws.rs diff --git a/rpc/src/v1/extractors.rs b/crates/rpc/src/v1/extractors.rs similarity index 100% rename from rpc/src/v1/extractors.rs rename to crates/rpc/src/v1/extractors.rs diff --git a/rpc/src/v1/helpers/block_import.rs b/crates/rpc/src/v1/helpers/block_import.rs similarity index 100% rename from rpc/src/v1/helpers/block_import.rs rename to crates/rpc/src/v1/helpers/block_import.rs diff --git a/rpc/src/v1/helpers/deprecated.rs b/crates/rpc/src/v1/helpers/deprecated.rs similarity index 100% rename from rpc/src/v1/helpers/deprecated.rs rename to crates/rpc/src/v1/helpers/deprecated.rs diff --git a/rpc/src/v1/helpers/dispatch/full.rs b/crates/rpc/src/v1/helpers/dispatch/full.rs similarity index 100% rename from rpc/src/v1/helpers/dispatch/full.rs rename to crates/rpc/src/v1/helpers/dispatch/full.rs diff --git a/rpc/src/v1/helpers/dispatch/mod.rs b/crates/rpc/src/v1/helpers/dispatch/mod.rs similarity index 100% rename from rpc/src/v1/helpers/dispatch/mod.rs rename to crates/rpc/src/v1/helpers/dispatch/mod.rs diff --git a/rpc/src/v1/helpers/dispatch/prospective_signer.rs b/crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs similarity index 100% rename from rpc/src/v1/helpers/dispatch/prospective_signer.rs rename to crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs diff --git a/rpc/src/v1/helpers/dispatch/signing.rs b/crates/rpc/src/v1/helpers/dispatch/signing.rs similarity index 100% rename from rpc/src/v1/helpers/dispatch/signing.rs rename to crates/rpc/src/v1/helpers/dispatch/signing.rs diff --git a/rpc/src/v1/helpers/eip191.rs b/crates/rpc/src/v1/helpers/eip191.rs similarity index 100% rename from rpc/src/v1/helpers/eip191.rs rename to crates/rpc/src/v1/helpers/eip191.rs diff --git a/rpc/src/v1/helpers/engine_signer.rs b/crates/rpc/src/v1/helpers/engine_signer.rs similarity index 100% rename from rpc/src/v1/helpers/engine_signer.rs rename to crates/rpc/src/v1/helpers/engine_signer.rs diff --git a/rpc/src/v1/helpers/errors.rs b/crates/rpc/src/v1/helpers/errors.rs similarity index 100% rename from rpc/src/v1/helpers/errors.rs rename to crates/rpc/src/v1/helpers/errors.rs diff --git a/rpc/src/v1/helpers/external_signer/mod.rs b/crates/rpc/src/v1/helpers/external_signer/mod.rs similarity index 100% rename from rpc/src/v1/helpers/external_signer/mod.rs rename to crates/rpc/src/v1/helpers/external_signer/mod.rs diff --git a/rpc/src/v1/helpers/external_signer/oneshot.rs b/crates/rpc/src/v1/helpers/external_signer/oneshot.rs similarity index 100% rename from rpc/src/v1/helpers/external_signer/oneshot.rs rename to crates/rpc/src/v1/helpers/external_signer/oneshot.rs diff --git a/rpc/src/v1/helpers/external_signer/signing_queue.rs b/crates/rpc/src/v1/helpers/external_signer/signing_queue.rs similarity index 100% rename from rpc/src/v1/helpers/external_signer/signing_queue.rs rename to crates/rpc/src/v1/helpers/external_signer/signing_queue.rs diff --git a/rpc/src/v1/helpers/fake_sign.rs b/crates/rpc/src/v1/helpers/fake_sign.rs similarity index 100% rename from rpc/src/v1/helpers/fake_sign.rs rename to crates/rpc/src/v1/helpers/fake_sign.rs diff --git a/rpc/src/v1/helpers/mod.rs b/crates/rpc/src/v1/helpers/mod.rs similarity index 100% rename from rpc/src/v1/helpers/mod.rs rename to crates/rpc/src/v1/helpers/mod.rs diff --git a/rpc/src/v1/helpers/network_settings.rs b/crates/rpc/src/v1/helpers/network_settings.rs similarity index 100% rename from rpc/src/v1/helpers/network_settings.rs rename to crates/rpc/src/v1/helpers/network_settings.rs diff --git a/rpc/src/v1/helpers/nonce.rs b/crates/rpc/src/v1/helpers/nonce.rs similarity index 100% rename from rpc/src/v1/helpers/nonce.rs rename to crates/rpc/src/v1/helpers/nonce.rs diff --git a/rpc/src/v1/helpers/poll_filter.rs b/crates/rpc/src/v1/helpers/poll_filter.rs similarity index 100% rename from rpc/src/v1/helpers/poll_filter.rs rename to crates/rpc/src/v1/helpers/poll_filter.rs diff --git a/rpc/src/v1/helpers/poll_manager.rs b/crates/rpc/src/v1/helpers/poll_manager.rs similarity index 100% rename from rpc/src/v1/helpers/poll_manager.rs rename to crates/rpc/src/v1/helpers/poll_manager.rs diff --git a/rpc/src/v1/helpers/requests.rs b/crates/rpc/src/v1/helpers/requests.rs similarity index 100% rename from rpc/src/v1/helpers/requests.rs rename to crates/rpc/src/v1/helpers/requests.rs diff --git a/rpc/src/v1/helpers/secretstore.rs b/crates/rpc/src/v1/helpers/secretstore.rs similarity index 100% rename from rpc/src/v1/helpers/secretstore.rs rename to crates/rpc/src/v1/helpers/secretstore.rs diff --git a/rpc/src/v1/helpers/signature.rs b/crates/rpc/src/v1/helpers/signature.rs similarity index 100% rename from rpc/src/v1/helpers/signature.rs rename to crates/rpc/src/v1/helpers/signature.rs diff --git a/rpc/src/v1/helpers/subscribers.rs b/crates/rpc/src/v1/helpers/subscribers.rs similarity index 100% rename from rpc/src/v1/helpers/subscribers.rs rename to crates/rpc/src/v1/helpers/subscribers.rs diff --git a/rpc/src/v1/helpers/subscription_manager.rs b/crates/rpc/src/v1/helpers/subscription_manager.rs similarity index 100% rename from rpc/src/v1/helpers/subscription_manager.rs rename to crates/rpc/src/v1/helpers/subscription_manager.rs diff --git a/rpc/src/v1/helpers/work.rs b/crates/rpc/src/v1/helpers/work.rs similarity index 100% rename from rpc/src/v1/helpers/work.rs rename to crates/rpc/src/v1/helpers/work.rs diff --git a/rpc/src/v1/impls/debug.rs b/crates/rpc/src/v1/impls/debug.rs similarity index 100% rename from rpc/src/v1/impls/debug.rs rename to crates/rpc/src/v1/impls/debug.rs diff --git a/rpc/src/v1/impls/eth.rs b/crates/rpc/src/v1/impls/eth.rs similarity index 100% rename from rpc/src/v1/impls/eth.rs rename to crates/rpc/src/v1/impls/eth.rs diff --git a/rpc/src/v1/impls/eth_filter.rs b/crates/rpc/src/v1/impls/eth_filter.rs similarity index 100% rename from rpc/src/v1/impls/eth_filter.rs rename to crates/rpc/src/v1/impls/eth_filter.rs diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/crates/rpc/src/v1/impls/eth_pubsub.rs similarity index 100% rename from rpc/src/v1/impls/eth_pubsub.rs rename to crates/rpc/src/v1/impls/eth_pubsub.rs diff --git a/rpc/src/v1/impls/mod.rs b/crates/rpc/src/v1/impls/mod.rs similarity index 100% rename from rpc/src/v1/impls/mod.rs rename to crates/rpc/src/v1/impls/mod.rs diff --git a/rpc/src/v1/impls/net.rs b/crates/rpc/src/v1/impls/net.rs similarity index 100% rename from rpc/src/v1/impls/net.rs rename to crates/rpc/src/v1/impls/net.rs diff --git a/rpc/src/v1/impls/parity.rs b/crates/rpc/src/v1/impls/parity.rs similarity index 100% rename from rpc/src/v1/impls/parity.rs rename to crates/rpc/src/v1/impls/parity.rs diff --git a/rpc/src/v1/impls/parity_accounts.rs b/crates/rpc/src/v1/impls/parity_accounts.rs similarity index 100% rename from rpc/src/v1/impls/parity_accounts.rs rename to crates/rpc/src/v1/impls/parity_accounts.rs diff --git a/rpc/src/v1/impls/parity_set.rs b/crates/rpc/src/v1/impls/parity_set.rs similarity index 100% rename from rpc/src/v1/impls/parity_set.rs rename to crates/rpc/src/v1/impls/parity_set.rs diff --git a/rpc/src/v1/impls/personal.rs b/crates/rpc/src/v1/impls/personal.rs similarity index 100% rename from rpc/src/v1/impls/personal.rs rename to crates/rpc/src/v1/impls/personal.rs diff --git a/rpc/src/v1/impls/pubsub.rs b/crates/rpc/src/v1/impls/pubsub.rs similarity index 100% rename from rpc/src/v1/impls/pubsub.rs rename to crates/rpc/src/v1/impls/pubsub.rs diff --git a/rpc/src/v1/impls/secretstore.rs b/crates/rpc/src/v1/impls/secretstore.rs similarity index 100% rename from rpc/src/v1/impls/secretstore.rs rename to crates/rpc/src/v1/impls/secretstore.rs diff --git a/rpc/src/v1/impls/signer.rs b/crates/rpc/src/v1/impls/signer.rs similarity index 100% rename from rpc/src/v1/impls/signer.rs rename to crates/rpc/src/v1/impls/signer.rs diff --git a/rpc/src/v1/impls/signing.rs b/crates/rpc/src/v1/impls/signing.rs similarity index 100% rename from rpc/src/v1/impls/signing.rs rename to crates/rpc/src/v1/impls/signing.rs diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/crates/rpc/src/v1/impls/signing_unsafe.rs similarity index 100% rename from rpc/src/v1/impls/signing_unsafe.rs rename to crates/rpc/src/v1/impls/signing_unsafe.rs diff --git a/rpc/src/v1/impls/traces.rs b/crates/rpc/src/v1/impls/traces.rs similarity index 100% rename from rpc/src/v1/impls/traces.rs rename to crates/rpc/src/v1/impls/traces.rs diff --git a/rpc/src/v1/impls/web3.rs b/crates/rpc/src/v1/impls/web3.rs similarity index 100% rename from rpc/src/v1/impls/web3.rs rename to crates/rpc/src/v1/impls/web3.rs diff --git a/rpc/src/v1/informant.rs b/crates/rpc/src/v1/informant.rs similarity index 100% rename from rpc/src/v1/informant.rs rename to crates/rpc/src/v1/informant.rs diff --git a/rpc/src/v1/metadata.rs b/crates/rpc/src/v1/metadata.rs similarity index 100% rename from rpc/src/v1/metadata.rs rename to crates/rpc/src/v1/metadata.rs diff --git a/rpc/src/v1/mod.rs b/crates/rpc/src/v1/mod.rs similarity index 100% rename from rpc/src/v1/mod.rs rename to crates/rpc/src/v1/mod.rs diff --git a/rpc/src/v1/tests/eth.rs b/crates/rpc/src/v1/tests/eth.rs similarity index 100% rename from rpc/src/v1/tests/eth.rs rename to crates/rpc/src/v1/tests/eth.rs diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/crates/rpc/src/v1/tests/helpers/miner_service.rs similarity index 100% rename from rpc/src/v1/tests/helpers/miner_service.rs rename to crates/rpc/src/v1/tests/helpers/miner_service.rs diff --git a/rpc/src/v1/tests/helpers/mod.rs b/crates/rpc/src/v1/tests/helpers/mod.rs similarity index 100% rename from rpc/src/v1/tests/helpers/mod.rs rename to crates/rpc/src/v1/tests/helpers/mod.rs diff --git a/rpc/src/v1/tests/helpers/snapshot_service.rs b/crates/rpc/src/v1/tests/helpers/snapshot_service.rs similarity index 100% rename from rpc/src/v1/tests/helpers/snapshot_service.rs rename to crates/rpc/src/v1/tests/helpers/snapshot_service.rs diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/crates/rpc/src/v1/tests/helpers/sync_provider.rs similarity index 100% rename from rpc/src/v1/tests/helpers/sync_provider.rs rename to crates/rpc/src/v1/tests/helpers/sync_provider.rs diff --git a/rpc/src/v1/tests/mocked/debug.rs b/crates/rpc/src/v1/tests/mocked/debug.rs similarity index 100% rename from rpc/src/v1/tests/mocked/debug.rs rename to crates/rpc/src/v1/tests/mocked/debug.rs diff --git a/rpc/src/v1/tests/mocked/eth.rs b/crates/rpc/src/v1/tests/mocked/eth.rs similarity index 100% rename from rpc/src/v1/tests/mocked/eth.rs rename to crates/rpc/src/v1/tests/mocked/eth.rs diff --git a/rpc/src/v1/tests/mocked/eth_pubsub.rs b/crates/rpc/src/v1/tests/mocked/eth_pubsub.rs similarity index 100% rename from rpc/src/v1/tests/mocked/eth_pubsub.rs rename to crates/rpc/src/v1/tests/mocked/eth_pubsub.rs diff --git a/rpc/src/v1/tests/mocked/manage_network.rs b/crates/rpc/src/v1/tests/mocked/manage_network.rs similarity index 100% rename from rpc/src/v1/tests/mocked/manage_network.rs rename to crates/rpc/src/v1/tests/mocked/manage_network.rs diff --git a/rpc/src/v1/tests/mocked/mod.rs b/crates/rpc/src/v1/tests/mocked/mod.rs similarity index 100% rename from rpc/src/v1/tests/mocked/mod.rs rename to crates/rpc/src/v1/tests/mocked/mod.rs diff --git a/rpc/src/v1/tests/mocked/net.rs b/crates/rpc/src/v1/tests/mocked/net.rs similarity index 100% rename from rpc/src/v1/tests/mocked/net.rs rename to crates/rpc/src/v1/tests/mocked/net.rs diff --git a/rpc/src/v1/tests/mocked/parity.rs b/crates/rpc/src/v1/tests/mocked/parity.rs similarity index 100% rename from rpc/src/v1/tests/mocked/parity.rs rename to crates/rpc/src/v1/tests/mocked/parity.rs diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/crates/rpc/src/v1/tests/mocked/parity_accounts.rs similarity index 100% rename from rpc/src/v1/tests/mocked/parity_accounts.rs rename to crates/rpc/src/v1/tests/mocked/parity_accounts.rs diff --git a/rpc/src/v1/tests/mocked/parity_set.rs b/crates/rpc/src/v1/tests/mocked/parity_set.rs similarity index 100% rename from rpc/src/v1/tests/mocked/parity_set.rs rename to crates/rpc/src/v1/tests/mocked/parity_set.rs diff --git a/rpc/src/v1/tests/mocked/personal.rs b/crates/rpc/src/v1/tests/mocked/personal.rs similarity index 100% rename from rpc/src/v1/tests/mocked/personal.rs rename to crates/rpc/src/v1/tests/mocked/personal.rs diff --git a/rpc/src/v1/tests/mocked/pubsub.rs b/crates/rpc/src/v1/tests/mocked/pubsub.rs similarity index 100% rename from rpc/src/v1/tests/mocked/pubsub.rs rename to crates/rpc/src/v1/tests/mocked/pubsub.rs diff --git a/rpc/src/v1/tests/mocked/secretstore.rs b/crates/rpc/src/v1/tests/mocked/secretstore.rs similarity index 100% rename from rpc/src/v1/tests/mocked/secretstore.rs rename to crates/rpc/src/v1/tests/mocked/secretstore.rs diff --git a/rpc/src/v1/tests/mocked/signer.rs b/crates/rpc/src/v1/tests/mocked/signer.rs similarity index 100% rename from rpc/src/v1/tests/mocked/signer.rs rename to crates/rpc/src/v1/tests/mocked/signer.rs diff --git a/rpc/src/v1/tests/mocked/signing.rs b/crates/rpc/src/v1/tests/mocked/signing.rs similarity index 100% rename from rpc/src/v1/tests/mocked/signing.rs rename to crates/rpc/src/v1/tests/mocked/signing.rs diff --git a/rpc/src/v1/tests/mocked/signing_unsafe.rs b/crates/rpc/src/v1/tests/mocked/signing_unsafe.rs similarity index 100% rename from rpc/src/v1/tests/mocked/signing_unsafe.rs rename to crates/rpc/src/v1/tests/mocked/signing_unsafe.rs diff --git a/rpc/src/v1/tests/mocked/traces.rs b/crates/rpc/src/v1/tests/mocked/traces.rs similarity index 100% rename from rpc/src/v1/tests/mocked/traces.rs rename to crates/rpc/src/v1/tests/mocked/traces.rs diff --git a/rpc/src/v1/tests/mocked/web3.rs b/crates/rpc/src/v1/tests/mocked/web3.rs similarity index 100% rename from rpc/src/v1/tests/mocked/web3.rs rename to crates/rpc/src/v1/tests/mocked/web3.rs diff --git a/rpc/src/v1/tests/mod.rs b/crates/rpc/src/v1/tests/mod.rs similarity index 93% rename from rpc/src/v1/tests/mod.rs rename to crates/rpc/src/v1/tests/mod.rs index e695bc4cc..794bbce8a 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/crates/rpc/src/v1/tests/mod.rs @@ -31,7 +31,7 @@ pub mod helpers; macro_rules! extract_chain { (iter $file:expr) => {{ const RAW_DATA: &'static [u8] = - include_bytes!(concat!("../../../../ethcore/res/ethereum/tests/LegacyTests/Constantinople/", $file, ".json")); + include_bytes!(concat!("../../../../ethcore/res/json_tests/LegacyTests/Constantinople/", $file, ".json")); ::ethjson::blockchain::Test::load(RAW_DATA).unwrap().into_iter() }}; diff --git a/rpc/src/v1/traits/debug.rs b/crates/rpc/src/v1/traits/debug.rs similarity index 100% rename from rpc/src/v1/traits/debug.rs rename to crates/rpc/src/v1/traits/debug.rs diff --git a/rpc/src/v1/traits/eth.rs b/crates/rpc/src/v1/traits/eth.rs similarity index 100% rename from rpc/src/v1/traits/eth.rs rename to crates/rpc/src/v1/traits/eth.rs diff --git a/rpc/src/v1/traits/eth_pubsub.rs b/crates/rpc/src/v1/traits/eth_pubsub.rs similarity index 100% rename from rpc/src/v1/traits/eth_pubsub.rs rename to crates/rpc/src/v1/traits/eth_pubsub.rs diff --git a/rpc/src/v1/traits/eth_signing.rs b/crates/rpc/src/v1/traits/eth_signing.rs similarity index 100% rename from rpc/src/v1/traits/eth_signing.rs rename to crates/rpc/src/v1/traits/eth_signing.rs diff --git a/rpc/src/v1/traits/mod.rs b/crates/rpc/src/v1/traits/mod.rs similarity index 100% rename from rpc/src/v1/traits/mod.rs rename to crates/rpc/src/v1/traits/mod.rs diff --git a/rpc/src/v1/traits/net.rs b/crates/rpc/src/v1/traits/net.rs similarity index 100% rename from rpc/src/v1/traits/net.rs rename to crates/rpc/src/v1/traits/net.rs diff --git a/rpc/src/v1/traits/parity.rs b/crates/rpc/src/v1/traits/parity.rs similarity index 100% rename from rpc/src/v1/traits/parity.rs rename to crates/rpc/src/v1/traits/parity.rs diff --git a/rpc/src/v1/traits/parity_accounts.rs b/crates/rpc/src/v1/traits/parity_accounts.rs similarity index 100% rename from rpc/src/v1/traits/parity_accounts.rs rename to crates/rpc/src/v1/traits/parity_accounts.rs diff --git a/rpc/src/v1/traits/parity_set.rs b/crates/rpc/src/v1/traits/parity_set.rs similarity index 100% rename from rpc/src/v1/traits/parity_set.rs rename to crates/rpc/src/v1/traits/parity_set.rs diff --git a/rpc/src/v1/traits/parity_signing.rs b/crates/rpc/src/v1/traits/parity_signing.rs similarity index 100% rename from rpc/src/v1/traits/parity_signing.rs rename to crates/rpc/src/v1/traits/parity_signing.rs diff --git a/rpc/src/v1/traits/personal.rs b/crates/rpc/src/v1/traits/personal.rs similarity index 100% rename from rpc/src/v1/traits/personal.rs rename to crates/rpc/src/v1/traits/personal.rs diff --git a/rpc/src/v1/traits/pubsub.rs b/crates/rpc/src/v1/traits/pubsub.rs similarity index 100% rename from rpc/src/v1/traits/pubsub.rs rename to crates/rpc/src/v1/traits/pubsub.rs diff --git a/rpc/src/v1/traits/secretstore.rs b/crates/rpc/src/v1/traits/secretstore.rs similarity index 100% rename from rpc/src/v1/traits/secretstore.rs rename to crates/rpc/src/v1/traits/secretstore.rs diff --git a/rpc/src/v1/traits/signer.rs b/crates/rpc/src/v1/traits/signer.rs similarity index 100% rename from rpc/src/v1/traits/signer.rs rename to crates/rpc/src/v1/traits/signer.rs diff --git a/rpc/src/v1/traits/traces.rs b/crates/rpc/src/v1/traits/traces.rs similarity index 100% rename from rpc/src/v1/traits/traces.rs rename to crates/rpc/src/v1/traits/traces.rs diff --git a/rpc/src/v1/traits/web3.rs b/crates/rpc/src/v1/traits/web3.rs similarity index 100% rename from rpc/src/v1/traits/web3.rs rename to crates/rpc/src/v1/traits/web3.rs diff --git a/rpc/src/v1/types/account_info.rs b/crates/rpc/src/v1/types/account_info.rs similarity index 100% rename from rpc/src/v1/types/account_info.rs rename to crates/rpc/src/v1/types/account_info.rs diff --git a/rpc/src/v1/types/block.rs b/crates/rpc/src/v1/types/block.rs similarity index 100% rename from rpc/src/v1/types/block.rs rename to crates/rpc/src/v1/types/block.rs diff --git a/rpc/src/v1/types/block_number.rs b/crates/rpc/src/v1/types/block_number.rs similarity index 100% rename from rpc/src/v1/types/block_number.rs rename to crates/rpc/src/v1/types/block_number.rs diff --git a/rpc/src/v1/types/bytes.rs b/crates/rpc/src/v1/types/bytes.rs similarity index 100% rename from rpc/src/v1/types/bytes.rs rename to crates/rpc/src/v1/types/bytes.rs diff --git a/rpc/src/v1/types/call_request.rs b/crates/rpc/src/v1/types/call_request.rs similarity index 100% rename from rpc/src/v1/types/call_request.rs rename to crates/rpc/src/v1/types/call_request.rs diff --git a/rpc/src/v1/types/confirmations.rs b/crates/rpc/src/v1/types/confirmations.rs similarity index 100% rename from rpc/src/v1/types/confirmations.rs rename to crates/rpc/src/v1/types/confirmations.rs diff --git a/rpc/src/v1/types/derivation.rs b/crates/rpc/src/v1/types/derivation.rs similarity index 100% rename from rpc/src/v1/types/derivation.rs rename to crates/rpc/src/v1/types/derivation.rs diff --git a/rpc/src/v1/types/eip191.rs b/crates/rpc/src/v1/types/eip191.rs similarity index 100% rename from rpc/src/v1/types/eip191.rs rename to crates/rpc/src/v1/types/eip191.rs diff --git a/rpc/src/v1/types/eth_types.rs b/crates/rpc/src/v1/types/eth_types.rs similarity index 100% rename from rpc/src/v1/types/eth_types.rs rename to crates/rpc/src/v1/types/eth_types.rs diff --git a/rpc/src/v1/types/filter.rs b/crates/rpc/src/v1/types/filter.rs similarity index 100% rename from rpc/src/v1/types/filter.rs rename to crates/rpc/src/v1/types/filter.rs diff --git a/rpc/src/v1/types/histogram.rs b/crates/rpc/src/v1/types/histogram.rs similarity index 100% rename from rpc/src/v1/types/histogram.rs rename to crates/rpc/src/v1/types/histogram.rs diff --git a/rpc/src/v1/types/index.rs b/crates/rpc/src/v1/types/index.rs similarity index 100% rename from rpc/src/v1/types/index.rs rename to crates/rpc/src/v1/types/index.rs diff --git a/rpc/src/v1/types/log.rs b/crates/rpc/src/v1/types/log.rs similarity index 100% rename from rpc/src/v1/types/log.rs rename to crates/rpc/src/v1/types/log.rs diff --git a/rpc/src/v1/types/mod.rs b/crates/rpc/src/v1/types/mod.rs similarity index 100% rename from rpc/src/v1/types/mod.rs rename to crates/rpc/src/v1/types/mod.rs diff --git a/rpc/src/v1/types/node_kind.rs b/crates/rpc/src/v1/types/node_kind.rs similarity index 100% rename from rpc/src/v1/types/node_kind.rs rename to crates/rpc/src/v1/types/node_kind.rs diff --git a/rpc/src/v1/types/provenance.rs b/crates/rpc/src/v1/types/provenance.rs similarity index 100% rename from rpc/src/v1/types/provenance.rs rename to crates/rpc/src/v1/types/provenance.rs diff --git a/rpc/src/v1/types/pubsub.rs b/crates/rpc/src/v1/types/pubsub.rs similarity index 100% rename from rpc/src/v1/types/pubsub.rs rename to crates/rpc/src/v1/types/pubsub.rs diff --git a/rpc/src/v1/types/receipt.rs b/crates/rpc/src/v1/types/receipt.rs similarity index 100% rename from rpc/src/v1/types/receipt.rs rename to crates/rpc/src/v1/types/receipt.rs diff --git a/rpc/src/v1/types/rpc_settings.rs b/crates/rpc/src/v1/types/rpc_settings.rs similarity index 100% rename from rpc/src/v1/types/rpc_settings.rs rename to crates/rpc/src/v1/types/rpc_settings.rs diff --git a/rpc/src/v1/types/secretstore.rs b/crates/rpc/src/v1/types/secretstore.rs similarity index 100% rename from rpc/src/v1/types/secretstore.rs rename to crates/rpc/src/v1/types/secretstore.rs diff --git a/rpc/src/v1/types/sync.rs b/crates/rpc/src/v1/types/sync.rs similarity index 100% rename from rpc/src/v1/types/sync.rs rename to crates/rpc/src/v1/types/sync.rs diff --git a/rpc/src/v1/types/trace.rs b/crates/rpc/src/v1/types/trace.rs similarity index 100% rename from rpc/src/v1/types/trace.rs rename to crates/rpc/src/v1/types/trace.rs diff --git a/rpc/src/v1/types/trace_filter.rs b/crates/rpc/src/v1/types/trace_filter.rs similarity index 100% rename from rpc/src/v1/types/trace_filter.rs rename to crates/rpc/src/v1/types/trace_filter.rs diff --git a/rpc/src/v1/types/transaction.rs b/crates/rpc/src/v1/types/transaction.rs similarity index 100% rename from rpc/src/v1/types/transaction.rs rename to crates/rpc/src/v1/types/transaction.rs diff --git a/rpc/src/v1/types/transaction_condition.rs b/crates/rpc/src/v1/types/transaction_condition.rs similarity index 100% rename from rpc/src/v1/types/transaction_condition.rs rename to crates/rpc/src/v1/types/transaction_condition.rs diff --git a/rpc/src/v1/types/transaction_request.rs b/crates/rpc/src/v1/types/transaction_request.rs similarity index 100% rename from rpc/src/v1/types/transaction_request.rs rename to crates/rpc/src/v1/types/transaction_request.rs diff --git a/rpc/src/v1/types/work.rs b/crates/rpc/src/v1/types/work.rs similarity index 100% rename from rpc/src/v1/types/work.rs rename to crates/rpc/src/v1/types/work.rs diff --git a/util/io/Cargo.toml b/crates/runtime/io/Cargo.toml similarity index 100% rename from util/io/Cargo.toml rename to crates/runtime/io/Cargo.toml diff --git a/util/io/src/lib.rs b/crates/runtime/io/src/lib.rs similarity index 100% rename from util/io/src/lib.rs rename to crates/runtime/io/src/lib.rs diff --git a/util/io/src/service_mio.rs b/crates/runtime/io/src/service_mio.rs similarity index 100% rename from util/io/src/service_mio.rs rename to crates/runtime/io/src/service_mio.rs diff --git a/util/io/src/service_non_mio.rs b/crates/runtime/io/src/service_non_mio.rs similarity index 100% rename from util/io/src/service_non_mio.rs rename to crates/runtime/io/src/service_non_mio.rs diff --git a/util/io/src/worker.rs b/crates/runtime/io/src/worker.rs similarity index 100% rename from util/io/src/worker.rs rename to crates/runtime/io/src/worker.rs diff --git a/util/runtime/Cargo.toml b/crates/runtime/runtime/Cargo.toml similarity index 100% rename from util/runtime/Cargo.toml rename to crates/runtime/runtime/Cargo.toml diff --git a/util/runtime/src/lib.rs b/crates/runtime/runtime/src/lib.rs similarity index 100% rename from util/runtime/src/lib.rs rename to crates/runtime/runtime/src/lib.rs diff --git a/util/EIP-152/Cargo.toml b/crates/util/EIP-152/Cargo.toml similarity index 100% rename from util/EIP-152/Cargo.toml rename to crates/util/EIP-152/Cargo.toml diff --git a/util/EIP-152/src/lib.rs b/crates/util/EIP-152/src/lib.rs similarity index 100% rename from util/EIP-152/src/lib.rs rename to crates/util/EIP-152/src/lib.rs diff --git a/util/EIP-712/Cargo.toml b/crates/util/EIP-712/Cargo.toml similarity index 100% rename from util/EIP-712/Cargo.toml rename to crates/util/EIP-712/Cargo.toml diff --git a/util/EIP-712/README.md b/crates/util/EIP-712/README.md similarity index 100% rename from util/EIP-712/README.md rename to crates/util/EIP-712/README.md diff --git a/util/EIP-712/src/eip712.rs b/crates/util/EIP-712/src/eip712.rs similarity index 100% rename from util/EIP-712/src/eip712.rs rename to crates/util/EIP-712/src/eip712.rs diff --git a/util/EIP-712/src/encode.rs b/crates/util/EIP-712/src/encode.rs similarity index 100% rename from util/EIP-712/src/encode.rs rename to crates/util/EIP-712/src/encode.rs diff --git a/util/EIP-712/src/error.rs b/crates/util/EIP-712/src/error.rs similarity index 100% rename from util/EIP-712/src/error.rs rename to crates/util/EIP-712/src/error.rs diff --git a/util/EIP-712/src/lib.rs b/crates/util/EIP-712/src/lib.rs similarity index 100% rename from util/EIP-712/src/lib.rs rename to crates/util/EIP-712/src/lib.rs diff --git a/util/EIP-712/src/parser.rs b/crates/util/EIP-712/src/parser.rs similarity index 100% rename from util/EIP-712/src/parser.rs rename to crates/util/EIP-712/src/parser.rs diff --git a/cli-signer/Cargo.toml b/crates/util/cli-signer/Cargo.toml similarity index 90% rename from cli-signer/Cargo.toml rename to crates/util/cli-signer/Cargo.toml index bc833fca4..afc3352ca 100644 --- a/cli-signer/Cargo.toml +++ b/crates/util/cli-signer/Cargo.toml @@ -10,5 +10,5 @@ authors = ["Parity "] ethereum-types = "0.4" futures = "0.1" rpassword = "1.0" -parity-rpc = { path = "../rpc" } +parity-rpc = { path = "../../rpc" } parity-rpc-client = { path = "rpc-client" } diff --git a/cli-signer/rpc-client/Cargo.toml b/crates/util/cli-signer/rpc-client/Cargo.toml similarity index 91% rename from cli-signer/rpc-client/Cargo.toml rename to crates/util/cli-signer/rpc-client/Cargo.toml index 90b8d73b6..19d2c61a8 100644 --- a/cli-signer/rpc-client/Cargo.toml +++ b/crates/util/cli-signer/rpc-client/Cargo.toml @@ -17,5 +17,5 @@ matches = "0.1" parking_lot = "0.9" jsonrpc-core = "15.0.0" jsonrpc-ws-server = "15.0.0" -parity-rpc = { path = "../../rpc" } +parity-rpc = { path = "../../../rpc" } keccak-hash = "0.1" diff --git a/cli-signer/rpc-client/src/client.rs b/crates/util/cli-signer/rpc-client/src/client.rs similarity index 100% rename from cli-signer/rpc-client/src/client.rs rename to crates/util/cli-signer/rpc-client/src/client.rs diff --git a/cli-signer/rpc-client/src/lib.rs b/crates/util/cli-signer/rpc-client/src/lib.rs similarity index 100% rename from cli-signer/rpc-client/src/lib.rs rename to crates/util/cli-signer/rpc-client/src/lib.rs diff --git a/cli-signer/rpc-client/src/signer_client.rs b/crates/util/cli-signer/rpc-client/src/signer_client.rs similarity index 100% rename from cli-signer/rpc-client/src/signer_client.rs rename to crates/util/cli-signer/rpc-client/src/signer_client.rs diff --git a/cli-signer/src/lib.rs b/crates/util/cli-signer/src/lib.rs similarity index 100% rename from cli-signer/src/lib.rs rename to crates/util/cli-signer/src/lib.rs diff --git a/util/dir/Cargo.toml b/crates/util/dir/Cargo.toml similarity index 83% rename from util/dir/Cargo.toml rename to crates/util/dir/Cargo.toml index 04fa52634..449805c38 100644 --- a/util/dir/Cargo.toml +++ b/crates/util/dir/Cargo.toml @@ -6,6 +6,6 @@ license = "GPL3" [dependencies] ethereum-types = "0.4" -journaldb = { path = "../journaldb" } +journaldb = { path = "../../db/journaldb" } app_dirs = { git = "https://github.com/openethereum/app-dirs-rs" } home = "0.3" diff --git a/util/dir/src/helpers.rs b/crates/util/dir/src/helpers.rs similarity index 100% rename from util/dir/src/helpers.rs rename to crates/util/dir/src/helpers.rs diff --git a/util/dir/src/lib.rs b/crates/util/dir/src/lib.rs similarity index 100% rename from util/dir/src/lib.rs rename to crates/util/dir/src/lib.rs diff --git a/util/fastmap/Cargo.toml b/crates/util/fastmap/Cargo.toml similarity index 100% rename from util/fastmap/Cargo.toml rename to crates/util/fastmap/Cargo.toml diff --git a/util/fastmap/src/lib.rs b/crates/util/fastmap/src/lib.rs similarity index 100% rename from util/fastmap/src/lib.rs rename to crates/util/fastmap/src/lib.rs diff --git a/util/keccak-hasher/Cargo.toml b/crates/util/keccak-hasher/Cargo.toml similarity index 100% rename from util/keccak-hasher/Cargo.toml rename to crates/util/keccak-hasher/Cargo.toml diff --git a/util/keccak-hasher/src/lib.rs b/crates/util/keccak-hasher/src/lib.rs similarity index 100% rename from util/keccak-hasher/src/lib.rs rename to crates/util/keccak-hasher/src/lib.rs diff --git a/util/len-caching-lock/Cargo.toml b/crates/util/len-caching-lock/Cargo.toml similarity index 100% rename from util/len-caching-lock/Cargo.toml rename to crates/util/len-caching-lock/Cargo.toml diff --git a/util/len-caching-lock/src/lib.rs b/crates/util/len-caching-lock/src/lib.rs similarity index 100% rename from util/len-caching-lock/src/lib.rs rename to crates/util/len-caching-lock/src/lib.rs diff --git a/util/len-caching-lock/src/mutex.rs b/crates/util/len-caching-lock/src/mutex.rs similarity index 100% rename from util/len-caching-lock/src/mutex.rs rename to crates/util/len-caching-lock/src/mutex.rs diff --git a/util/len-caching-lock/src/rwlock.rs b/crates/util/len-caching-lock/src/rwlock.rs similarity index 100% rename from util/len-caching-lock/src/rwlock.rs rename to crates/util/len-caching-lock/src/rwlock.rs diff --git a/util/macros/Cargo.toml b/crates/util/macros/Cargo.toml similarity index 100% rename from util/macros/Cargo.toml rename to crates/util/macros/Cargo.toml diff --git a/util/macros/src/lib.rs b/crates/util/macros/src/lib.rs similarity index 100% rename from util/macros/src/lib.rs rename to crates/util/macros/src/lib.rs diff --git a/util/memory-cache/Cargo.toml b/crates/util/memory-cache/Cargo.toml similarity index 100% rename from util/memory-cache/Cargo.toml rename to crates/util/memory-cache/Cargo.toml diff --git a/util/memory-cache/src/lib.rs b/crates/util/memory-cache/src/lib.rs similarity index 100% rename from util/memory-cache/src/lib.rs rename to crates/util/memory-cache/src/lib.rs diff --git a/util/memzero/Cargo.toml b/crates/util/memzero/Cargo.toml similarity index 100% rename from util/memzero/Cargo.toml rename to crates/util/memzero/Cargo.toml diff --git a/util/memzero/src/lib.rs b/crates/util/memzero/src/lib.rs similarity index 100% rename from util/memzero/src/lib.rs rename to crates/util/memzero/src/lib.rs diff --git a/util/panic-hook/Cargo.toml b/crates/util/panic-hook/Cargo.toml similarity index 100% rename from util/panic-hook/Cargo.toml rename to crates/util/panic-hook/Cargo.toml diff --git a/util/panic-hook/src/lib.rs b/crates/util/panic-hook/src/lib.rs similarity index 100% rename from util/panic-hook/src/lib.rs rename to crates/util/panic-hook/src/lib.rs diff --git a/util/rlp-compress/Cargo.toml b/crates/util/rlp-compress/Cargo.toml similarity index 100% rename from util/rlp-compress/Cargo.toml rename to crates/util/rlp-compress/Cargo.toml diff --git a/util/rlp-compress/src/common.rs b/crates/util/rlp-compress/src/common.rs similarity index 100% rename from util/rlp-compress/src/common.rs rename to crates/util/rlp-compress/src/common.rs diff --git a/util/rlp-compress/src/lib.rs b/crates/util/rlp-compress/src/lib.rs similarity index 100% rename from util/rlp-compress/src/lib.rs rename to crates/util/rlp-compress/src/lib.rs diff --git a/util/rlp-compress/tests/compress.rs b/crates/util/rlp-compress/tests/compress.rs similarity index 100% rename from util/rlp-compress/tests/compress.rs rename to crates/util/rlp-compress/tests/compress.rs diff --git a/util/rlp-derive/Cargo.toml b/crates/util/rlp-derive/Cargo.toml similarity index 100% rename from util/rlp-derive/Cargo.toml rename to crates/util/rlp-derive/Cargo.toml diff --git a/util/rlp-derive/src/de.rs b/crates/util/rlp-derive/src/de.rs similarity index 100% rename from util/rlp-derive/src/de.rs rename to crates/util/rlp-derive/src/de.rs diff --git a/util/rlp-derive/src/en.rs b/crates/util/rlp-derive/src/en.rs similarity index 100% rename from util/rlp-derive/src/en.rs rename to crates/util/rlp-derive/src/en.rs diff --git a/util/rlp-derive/src/lib.rs b/crates/util/rlp-derive/src/lib.rs similarity index 100% rename from util/rlp-derive/src/lib.rs rename to crates/util/rlp-derive/src/lib.rs diff --git a/util/rlp-derive/tests/rlp.rs b/crates/util/rlp-derive/tests/rlp.rs similarity index 100% rename from util/rlp-derive/tests/rlp.rs rename to crates/util/rlp-derive/tests/rlp.rs diff --git a/util/stats/Cargo.toml b/crates/util/stats/Cargo.toml similarity index 100% rename from util/stats/Cargo.toml rename to crates/util/stats/Cargo.toml diff --git a/util/stats/src/lib.rs b/crates/util/stats/src/lib.rs similarity index 100% rename from util/stats/src/lib.rs rename to crates/util/stats/src/lib.rs diff --git a/util/time-utils/Cargo.toml b/crates/util/time-utils/Cargo.toml similarity index 100% rename from util/time-utils/Cargo.toml rename to crates/util/time-utils/Cargo.toml diff --git a/util/time-utils/src/lib.rs b/crates/util/time-utils/src/lib.rs similarity index 100% rename from util/time-utils/src/lib.rs rename to crates/util/time-utils/src/lib.rs diff --git a/util/triehash-ethereum/Cargo.toml b/crates/util/triehash-ethereum/Cargo.toml similarity index 100% rename from util/triehash-ethereum/Cargo.toml rename to crates/util/triehash-ethereum/Cargo.toml diff --git a/util/triehash-ethereum/src/lib.rs b/crates/util/triehash-ethereum/src/lib.rs similarity index 100% rename from util/triehash-ethereum/src/lib.rs rename to crates/util/triehash-ethereum/src/lib.rs diff --git a/util/unexpected/Cargo.toml b/crates/util/unexpected/Cargo.toml similarity index 100% rename from util/unexpected/Cargo.toml rename to crates/util/unexpected/Cargo.toml diff --git a/util/unexpected/src/lib.rs b/crates/util/unexpected/src/lib.rs similarity index 100% rename from util/unexpected/src/lib.rs rename to crates/util/unexpected/src/lib.rs diff --git a/util/version/Cargo.toml b/crates/util/version/Cargo.toml similarity index 100% rename from util/version/Cargo.toml rename to crates/util/version/Cargo.toml diff --git a/util/version/build.rs b/crates/util/version/build.rs similarity index 100% rename from util/version/build.rs rename to crates/util/version/build.rs diff --git a/util/version/src/lib.rs b/crates/util/version/src/lib.rs similarity index 100% rename from util/version/src/lib.rs rename to crates/util/version/src/lib.rs diff --git a/ethcore/builtin/Cargo.toml b/crates/vm/builtin/Cargo.toml similarity index 95% rename from ethcore/builtin/Cargo.toml rename to crates/vm/builtin/Cargo.toml index 4a204f46f..540d179b0 100644 --- a/ethcore/builtin/Cargo.toml +++ b/crates/vm/builtin/Cargo.toml @@ -10,7 +10,7 @@ bn = { git = "https://github.com/paritytech/bn", default-features = false } byteorder = "1.3.2" eip-152 = { path = "../../util/EIP-152" } ethereum-types = "0.4" -ethjson = { path = "../../json" } +ethjson = { path = "../../ethjson" } ethkey = { path = "../../accounts/ethkey" } keccak-hash = "0.1.0" log = "0.4" diff --git a/ethcore/builtin/src/lib.rs b/crates/vm/builtin/src/lib.rs similarity index 100% rename from ethcore/builtin/src/lib.rs rename to crates/vm/builtin/src/lib.rs diff --git a/ethcore/call-contract/Cargo.toml b/crates/vm/call-contract/Cargo.toml similarity index 83% rename from ethcore/call-contract/Cargo.toml rename to crates/vm/call-contract/Cargo.toml index 7ee9bb7e6..08066e988 100644 --- a/ethcore/call-contract/Cargo.toml +++ b/crates/vm/call-contract/Cargo.toml @@ -7,6 +7,6 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -types = { path = "../types", package = "common-types" } +types = { path = "../../ethcore/types", package = "common-types" } ethereum-types = "0.4" bytes = { version = "0.1", package = "parity-bytes" } diff --git a/ethcore/call-contract/src/call_contract.rs b/crates/vm/call-contract/src/call_contract.rs similarity index 100% rename from ethcore/call-contract/src/call_contract.rs rename to crates/vm/call-contract/src/call_contract.rs diff --git a/ethcore/call-contract/src/lib.rs b/crates/vm/call-contract/src/lib.rs similarity index 100% rename from ethcore/call-contract/src/lib.rs rename to crates/vm/call-contract/src/lib.rs diff --git a/ethcore/evm/Cargo.toml b/crates/vm/evm/Cargo.toml similarity index 100% rename from ethcore/evm/Cargo.toml rename to crates/vm/evm/Cargo.toml diff --git a/ethcore/evm/benches/basic.rs b/crates/vm/evm/benches/basic.rs similarity index 100% rename from ethcore/evm/benches/basic.rs rename to crates/vm/evm/benches/basic.rs diff --git a/ethcore/evm/src/evm.rs b/crates/vm/evm/src/evm.rs similarity index 100% rename from ethcore/evm/src/evm.rs rename to crates/vm/evm/src/evm.rs diff --git a/ethcore/evm/src/factory.rs b/crates/vm/evm/src/factory.rs similarity index 100% rename from ethcore/evm/src/factory.rs rename to crates/vm/evm/src/factory.rs diff --git a/ethcore/evm/src/instructions.rs b/crates/vm/evm/src/instructions.rs similarity index 100% rename from ethcore/evm/src/instructions.rs rename to crates/vm/evm/src/instructions.rs diff --git a/ethcore/evm/src/interpreter/gasometer.rs b/crates/vm/evm/src/interpreter/gasometer.rs similarity index 100% rename from ethcore/evm/src/interpreter/gasometer.rs rename to crates/vm/evm/src/interpreter/gasometer.rs diff --git a/ethcore/evm/src/interpreter/informant.rs b/crates/vm/evm/src/interpreter/informant.rs similarity index 100% rename from ethcore/evm/src/interpreter/informant.rs rename to crates/vm/evm/src/interpreter/informant.rs diff --git a/ethcore/evm/src/interpreter/memory.rs b/crates/vm/evm/src/interpreter/memory.rs similarity index 100% rename from ethcore/evm/src/interpreter/memory.rs rename to crates/vm/evm/src/interpreter/memory.rs diff --git a/ethcore/evm/src/interpreter/mod.rs b/crates/vm/evm/src/interpreter/mod.rs similarity index 100% rename from ethcore/evm/src/interpreter/mod.rs rename to crates/vm/evm/src/interpreter/mod.rs diff --git a/ethcore/evm/src/interpreter/shared_cache.rs b/crates/vm/evm/src/interpreter/shared_cache.rs similarity index 100% rename from ethcore/evm/src/interpreter/shared_cache.rs rename to crates/vm/evm/src/interpreter/shared_cache.rs diff --git a/ethcore/evm/src/interpreter/stack.rs b/crates/vm/evm/src/interpreter/stack.rs similarity index 100% rename from ethcore/evm/src/interpreter/stack.rs rename to crates/vm/evm/src/interpreter/stack.rs diff --git a/ethcore/evm/src/lib.rs b/crates/vm/evm/src/lib.rs similarity index 100% rename from ethcore/evm/src/lib.rs rename to crates/vm/evm/src/lib.rs diff --git a/ethcore/evm/src/tests.rs b/crates/vm/evm/src/tests.rs similarity index 100% rename from ethcore/evm/src/tests.rs rename to crates/vm/evm/src/tests.rs diff --git a/ethcore/evm/src/vmtype.rs b/crates/vm/evm/src/vmtype.rs similarity index 100% rename from ethcore/evm/src/vmtype.rs rename to crates/vm/evm/src/vmtype.rs diff --git a/ethcore/vm/Cargo.toml b/crates/vm/vm/Cargo.toml similarity index 72% rename from ethcore/vm/Cargo.toml rename to crates/vm/vm/Cargo.toml index 34b086bce..70a7056b3 100644 --- a/ethcore/vm/Cargo.toml +++ b/crates/vm/vm/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Parity Technologies "] [dependencies] parity-bytes = "0.1" ethereum-types = "0.4" -patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } -ethjson = { path = "../../json" } +patricia-trie-ethereum = { path = "../../db/patricia-trie-ethereum" } +ethjson = { path = "../../ethjson" } rlp = { version = "0.3.0", features = ["ethereum"] } keccak-hash = "0.1" diff --git a/ethcore/vm/src/access_list.rs b/crates/vm/vm/src/access_list.rs similarity index 100% rename from ethcore/vm/src/access_list.rs rename to crates/vm/vm/src/access_list.rs diff --git a/ethcore/vm/src/action_params.rs b/crates/vm/vm/src/action_params.rs similarity index 100% rename from ethcore/vm/src/action_params.rs rename to crates/vm/vm/src/action_params.rs diff --git a/ethcore/vm/src/call_type.rs b/crates/vm/vm/src/call_type.rs similarity index 100% rename from ethcore/vm/src/call_type.rs rename to crates/vm/vm/src/call_type.rs diff --git a/ethcore/vm/src/env_info.rs b/crates/vm/vm/src/env_info.rs similarity index 100% rename from ethcore/vm/src/env_info.rs rename to crates/vm/vm/src/env_info.rs diff --git a/ethcore/vm/src/error.rs b/crates/vm/vm/src/error.rs similarity index 100% rename from ethcore/vm/src/error.rs rename to crates/vm/vm/src/error.rs diff --git a/ethcore/vm/src/ext.rs b/crates/vm/vm/src/ext.rs similarity index 100% rename from ethcore/vm/src/ext.rs rename to crates/vm/vm/src/ext.rs diff --git a/ethcore/vm/src/lib.rs b/crates/vm/vm/src/lib.rs similarity index 100% rename from ethcore/vm/src/lib.rs rename to crates/vm/vm/src/lib.rs diff --git a/ethcore/vm/src/return_data.rs b/crates/vm/vm/src/return_data.rs similarity index 100% rename from ethcore/vm/src/return_data.rs rename to crates/vm/vm/src/return_data.rs diff --git a/ethcore/vm/src/schedule.rs b/crates/vm/vm/src/schedule.rs similarity index 100% rename from ethcore/vm/src/schedule.rs rename to crates/vm/vm/src/schedule.rs diff --git a/ethcore/vm/src/tests.rs b/crates/vm/vm/src/tests.rs similarity index 100% rename from ethcore/vm/src/tests.rs rename to crates/vm/vm/src/tests.rs diff --git a/ethcore/wasm/Cargo.toml b/crates/vm/wasm/Cargo.toml similarity index 100% rename from ethcore/wasm/Cargo.toml rename to crates/vm/wasm/Cargo.toml diff --git a/ethcore/wasm/src/env.rs b/crates/vm/wasm/src/env.rs similarity index 100% rename from ethcore/wasm/src/env.rs rename to crates/vm/wasm/src/env.rs diff --git a/ethcore/wasm/src/lib.rs b/crates/vm/wasm/src/lib.rs similarity index 99% rename from ethcore/wasm/src/lib.rs rename to crates/vm/wasm/src/lib.rs index 5ebaaadb4..9601d53cd 100644 --- a/ethcore/wasm/src/lib.rs +++ b/crates/vm/wasm/src/lib.rs @@ -34,9 +34,6 @@ mod panic_payload; mod parser; mod runtime; -#[cfg(test)] -mod tests; - use vm::{ActionParams, GasLeft, ReturnData}; use wasmi::{Error as InterpreterError, Trap}; diff --git a/ethcore/wasm/src/panic_payload.rs b/crates/vm/wasm/src/panic_payload.rs similarity index 100% rename from ethcore/wasm/src/panic_payload.rs rename to crates/vm/wasm/src/panic_payload.rs diff --git a/ethcore/wasm/src/parser.rs b/crates/vm/wasm/src/parser.rs similarity index 100% rename from ethcore/wasm/src/parser.rs rename to crates/vm/wasm/src/parser.rs diff --git a/ethcore/wasm/src/runtime.rs b/crates/vm/wasm/src/runtime.rs similarity index 100% rename from ethcore/wasm/src/runtime.rs rename to crates/vm/wasm/src/runtime.rs diff --git a/ethcore/res/ethereum/runner/full.json b/ethcore/res/ethereum/runner/full.json deleted file mode 100644 index 58c25e658..000000000 --- a/ethcore/res/ethereum/runner/full.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "chain": [ - { - "path": "res/ethereum/tests/BlockchainTests", - "skip" : [] - }, - { - "path": "res/ethereum/tests/LegacyTests/Constantinople/BlockchainTests", - "skip" : [] - } - ], - "state": [ - { - "path": "res/ethereum/tests/GeneralStateTests", - "skip" : [] - - }, - { - "path": "res/ethereum/tests//LegacyTests/Constantinople/GeneralStateTests", - "skip" : [] - - } - ], - "difficulty": [ - { - "path": [ - "res/ethereum/tests/BasicTests/difficulty.json", - "res/ethereum/tests/BasicTests/difficultyMainNetwork.json" - ], - "chainspec": "Foundation" - } - ], - "executive": [ - { - "path": "res/ethereum/tests/VMTests" - } - ], - "transaction": [ - { - "path": "res/ethereum/tests/TransactionTests" - } - ], - "trie": [ - { - "path": [ - "res/ethereum/tests/TrieTests/trietest.json", - "res/ethereum/tests/TrieTests/trieanyorder.json" - ], - "triespec": "Generic" - }, - { - "path": [ - "res/ethereum/tests/TrieTests/hex_encoded_securetrie_test.json", - "res/ethereum/tests/TrieTests/trietest_secureTrie.json", - "res/ethereum/tests/TrieTests/trieanyorder_secureTrie.json" - ], - "triespec": "Secure" - } - ] -} \ No newline at end of file diff --git a/ethcore/res/ethereum/tests-issues/currents.json b/ethcore/res/ethereum/tests-issues/currents.json deleted file mode 100644 index d4d3f5e3a..000000000 --- a/ethcore/res/ethereum/tests-issues/currents.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "block": [], - "state": [] -} diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests deleted file mode 160000 index 0edbf860f..000000000 --- a/ethcore/res/wasm-tests +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0edbf860ff7ed4b6b6336097ba44836e8c6482dd diff --git a/ethcore/wasm/run/Cargo.toml b/ethcore/wasm/run/Cargo.toml deleted file mode 100644 index eee0d5fa3..000000000 --- a/ethcore/wasm/run/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -description = "Parity WASM Test Run" -name = "pwasm-run-test" -version = "0.1.0" -authors = ["Parity Technologies "] - -[dependencies] -serde = "1" -serde_json = "1" -serde_derive = "1" -ethereum-types = "0.4" -ethjson = { path = "../../../json" } -vm = { path = "../../vm" } -wasm = { path = "../" } -clap = "2.24" -env_logger = "0.5" -rustc-hex = "1" - -[features] -default = ["ethereum-types/std"] diff --git a/ethcore/wasm/run/res/sample-fixture.json b/ethcore/wasm/run/res/sample-fixture.json deleted file mode 100644 index 7be3e04be..000000000 --- a/ethcore/wasm/run/res/sample-fixture.json +++ /dev/null @@ -1,45 +0,0 @@ -[ - { - "caption": "Sample test", - "source": "./res/sample1.wasm", - "address": "0x1000000000000000000000000000000000000001", - "sender": "0x1000000000000000000000000000000000000002", - "value": "0x0000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": 100000, - "payload": "0x", - "asserts": [ - { "Return": "0x01" }, - { "UsedGas": 17 }, - { "HasCall": { "codeAddress": "0x1000000000000000000000000000000000000002" }}, - { "HasStorage": - { - "key": "0x0000000000000000000000000000000000000000000000000000000000000001", - "value": "0x0000000000000000000000000000000000000000000000000000000000000002" - } - } - ] - }, - { - "caption": "Keccak test", - "source": "./res/sample2.wasm", - "payload": "0x736f6d657468696e67", - "gasLimit": 100000, - "asserts": [ - { "Return": "0x68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87" } - ] - }, - { - "caption": "Token total supply", - "source": { - "constructor": "./res/sample3.wasm", - "sender": "0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f", - "at": "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6", - "arguments": "0x0000000000000000000000000000000000000000000000000000000010000000" - }, - "payload": "0x18160ddd", - "gasLimit": 100000, - "asserts": [ - { "Return": "0x0000000000000000000000000000000000000000000000000000000010000000" } - ] - } -] \ No newline at end of file diff --git a/ethcore/wasm/run/res/sample1.wasm b/ethcore/wasm/run/res/sample1.wasm deleted file mode 100644 index 6ea0c58cc77bec3f6ccec1a8d3616e28d48df29c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 125 zcmW-YK@Ng26b0Y=4QOdX+_?7y-hqYZfKnSbEfA{nL-{7x0(pYN9dJ=*qckFEHlHgH8L0 NOWJR4p3-Vv`T}Mx8bkm9 diff --git a/ethcore/wasm/run/res/sample2.wasm b/ethcore/wasm/run/res/sample2.wasm deleted file mode 100644 index baf9d17aa98fd185167448c58cc0388993dfab0a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15410 zcmd6O3v^u7dG6WgHD~6`jEnM@Hm|#)@B!pne@&F#$l06zb zxNXqLcpURI+(+Y;+z@ha8Xkp)WVK03XrOBq0%-!Ks}!21ft5>c)3xper%Bs1O>n<& zpBX)jA?;mvt=j?3-v8eF-+TX${r~^|_gI^pJ0LVo6We1G#{B%en4i$qx0u%^L8!iEGq>b- zADG;~e|AbUmbGLK%w_VL8gK7DI61R#N;BCVRR}lcGMR9K*zW!Nj~v*Y&rKfGtQGiE z2alHU4`dF^=8o#3pcmAlR(^8N{){*%YHHM!3LKrB%W#8hQo3!Lx-d;k*M!hbTbL2u z)Eps%7S-{k3)==C)@W`dDy-d8*t`~rHVAEvaBJ38uJ_ujs^e={wZ&UnrEJG1_VRV9 zFGobv{2JX)tkHwBMz&3tiQu%J^=+B(qo~Vl$_l*zPFQ$hQLm3ktz9=$!c-N_uK@0InAej6bL~L0@GE77UnM=? z3^YFpIAWm^MV&6L#s|!Ksaw);Q>!tr zyIx~lxH85qVtJNctpzHoL0$vMYXokMjQLHj ztd=#qFs2%$H-Pj;-;^~Vy~&TnO;^^)YOm3C$%RV%Rh3w#*aJ9%tyalo){jY3Mr5T- zPNxKEk6<-7WuRvHkeRQ8cJPX{gMwwW>!cmh79iIk?a~CjSkIM_ED5$#0*k&XJi9^K z?fP>9I`EvpmV=Wq2o99hxzeVBq=@N9q%Bb|EoN_mZYVZKf_b!-P=6j>LR}gi6z0`X z+jF5ToGqoK4E8vFT_F8c7QRT1xu^z?G_!srkkXuntUv{o0i;khu$7b{^^~Rd4vC_B z8kCygK*|i{2Nb!1gtQhLpTK-N5PvE}Y#zHaaq=lH19hN6PqdUdEd|ayzqC|j zI%QGKVF5Hqz=jMgrCyLmHl;yS6jBAW5A7*X1alBPS?R4&atTB}1(j1rTBLM0&@M&y z>rt=4mB!Mnkl-bNbnrNe6CrSFA%4nI_aQc%S}+2_5}qlezHq@YPoR`y%p3tTZP#Dby8!o9!+O7aE`!c06zy{64(mh0h|hpKs$hLFp2d} z)WNU_>s_e73JhwJ-vOWij3|K=fQJCY2wVhU2=aPPmL0}DllfXz4 z=mGFJfC>UF0G`G$kH7{1_1RP<>%FM|7#&pv`T*PulxhNv0KN{OhCmyD=P<06KtBMP zP1UhJfcjZvJYGx<%JoHmo9vR!vb!kP6lG6QwiHv6LnMdDbr>Nx7vY z^|uuL5!on*i>b!Z+7q&IbmPh6saCm04zXn;TWZk~jLsKt0~u+yZenW{V;AC;) zia3mdUS0(CG>_|Hu9~1F9k5ExFh)K&(*moq3A@^W#U<8g^_$=cX%L$j&gpVJ3qx*Z zVag5`mTYDbkzFio*~!9@K8vVKv53hQ7IE3l!j+p?B;*7;oz4nusZKv@Fj&>;O?2s8qM}$6#vMB|;Uf^LnqmCR z32Vj@5E3SOJcfW?V+d!!$zlN`IEPPUCqjQC@ODVB%^W)f|0tvUa(n}^5}Yz(E)79B zd@MsIBlO5iEHdDOE#-K@1o&dhcNw{W)FnhtBVC3Tx{NVS0XYnI;$`|KZt%84 zchcxmH7~7uG9Ek~mA2;x=foj6?+OdL8YzuO4N~S*3~t=t4x=Rv(T(COqssfM)w)5S zijU~S5F4r}CqS@+pam{VB0xb@7MKjZl$(Yd!$zT90mdS(3eEy#bOsPZ7+`rJRDbv6#Rn`w zdBRdL+Eb(=WL^w$B@dS#+5&Z_Q-=mfi|SKWH`S)p&7itzp6`Q?z}DPTLPScICA_HM zbOa`&dsLRik|x!MoCXu~sTJo$2@_GZ)M=L0i7A;8-e1_{<61&9LUmA4D=u@88M?4D z5)17?q3#5~wM3xOa-GpB>>Y>c!6v0wVI2WqIiwG8Q&7qtYC0eszToTOp$wHANdcwg zkgMtqm0UqkwBR`xO8%SS+D7?=8C)Clay^4T38}Tp-_oQ-{m?df$fc4F0Ef8AZkUuqJM!n1m~^h089RjfiNE;a;N#;;w9P zAgmjV77l?`##hbDaou3OG^U|BaolN@xK&|-5im2jE0Lkfmm~t%%6X;Aiz(MTDX!3pslz+Ag`kz0UoxXkLWN9u&jX?r-5kK<4`c2mtz<19P;*7bHm7t zps$`AW~B+^FhQlJ@s^zk9PR;D6?dDUTkVavA-EDA_46mf{J7U+W%ZwoA%OuZ4Xhu4 z-KHBDhmxiybYXx{p8{~Wis3V3;)JeA9AO8@3{Z2(X2p^y?WV3QDJ>c;4d+CQvVZ!L z{fh?7Kb7sDvY5&U>MM+(LMR!*C`QLt7(tyzkkTw$L4{m4X#&f_+%96nWiGRV;Bu7A zK|n?IEF{5Y0*%BT3Z_xs+v$ z+x`j}BQEh{p^T%THnu2Z1}veBV~a9|`kBaH(pyr-R!PQaQ8KocWDKSDq3SpVIBMvJ zR}4{B$XFqiq~u_90x|}MIzpKKd>LCrE{hA&SSDi!mQMMvA$f!D~}uFJ2e4C{*DRg-?XxFLFBPz zB7e_iOR7~8xkc?VU3?=LTzT4|g%>m%t_!0Sr`LrWMG4gZstbo!pB@Jp7xV#DqxXcs zc^F`&V8T$u8Pmf5XeH;aYATjgwX$h7L^#ZVfh$l&|+i7kU5yH9w9l3COAGA zN-k`wLjnbmraB}(GRSCC$HZKDOlaJYuT%ZuF=2IiJg5+s9g`@O9T@7EaO?_GSVAsq zJPgQr$0UklLTq(RaK+>?v1Bxhfn_9(**&^QcZJe}W}Es_fk+Z&MWcqZfr>;fgvDHFt}4KQ8!c#&aLhauFg6z!zN_#ht4Uyh}zFe%GVz{ev( zCB`EcSN`}l!62K!Peqn3r-*gL*-({aQs_~<8h_NXvXew%**Ehhu3OTdee-c zOzGgsLzy2=iecbF++>X6!GH0U$sEIIeH?V*a?S%MbtcI{dVqPnW@s&LHJ~YYYM#4P zgO+-^WF@y+;;0IGFT_#!Lux9t5RW>bBr=wA6fcoQ5IzRF-`*Dm&sRH zrZAg;xNusUr7S)r_^#{8|BKlXAY*noD19XSPihPVj4%+;z_#-Ofh71@!9YOwlc1o2 zfdDpq1dnIjH)}ZNc4*!}zlp#x30@DN2X{f=t&4pfCIR%&&It4ROPQjEF^dSnWffoH-VIT*?rS0He zU?OuyFwXdaV->`)zl_7CV{Mghz^mAbMH1m6>SnH%TwFvgSsyOhglX==Q0QBL$+_gC z3R8j0mP99)d@{);BYdm!6VxSGc2Yl{V;ozSqoR%?|8il%t_)6mRG;mUj3czZp0B|Av!9dCW;^v_eTBUvl9oRB* zQ@Ft((u0i;zLR8@n5{fyMtEb52FiR#>2Q3SGVC1Eg&PY1slnI++PT#BLoPx;j7;H> zAOnZ1v7OKPaF5IVRe1)3GthfsEqcEtbX@R(2bGEi@n!gB|7GXVQH4i>!ddN5uCygi zwsI3#nB0mWWx*$ArPUhksIo$c4}F?|&`JgdUYr@M3ot?lVmXzI)J=F{M-!8sCOnU%O(~uX6|W7-^hPpNJasIfhd2q;b)~Y4x(mHMuX0}o&34jb(Mtz=I8vaK|sv)LFj$Atw zM5mJ<(sa|rPjcS*G1#QO-r4uwEr#ii;xZHbO!GycLx&ag_aS72(3>HohY)XdAmiLQ z1Nw!Ek_T*AP^b?&fYsZE_0QF_`hy>$nq=*5#h?4n#>N3W z{FTGmpo7hSb{}f)jvPOMm_z2iv>C=v=oh)00;HQFbB~P{@HwL$XD%5kMeZAmQXqsa zm+}H&R~-4aFpyD6h415r90<5LT{)|=@PK5G%99Tw8J)C|G%`%fcH>bu_!Y*Zi!UFi zCT{a>HJ!8{MCw4#2Tg~V>PAWe*}3_k+X`M$Y77($oTGbr49MK?!P~IkIO2xIxm2^K zTAA|rmlX@CPRULrvi{+nUESM%l5vI0)7#yK2?GY=Y6otG;bl{Us2FgHMCQ#+cFyiW3U z3T)uXR|OJ~c#Nwks}*~k6l@*{LNy+=?Pva!SpY=;>hTRf&q7KZzjQNk!}IMjri!Bw za*ZbtO3RAzM3fg;@S*etONiRa@Qp80Z3vUWT+GjxQzkpO!7AQyr6OBpKG3$bkdEC! zUdztRD1VNogl9mDR1F3`Nh*z92}^pa2K@%Hh@Jx%E=%FS(S&W>riBAs4ei0ZAoNiE z@W+q|ba1rl!fAkVIM5s<1y@dlVaulA9LkAjU|fl1R^ZxfMTc; z8?}}AD_mCs#}o-&m34dFlW^-^9c{^C9eGv8DqwSP7H=vzd37$LY1Eu?-Vvyd=C4;r z{0b7A!VZ*Zmpe~91C&&h(uY*I7b=OGTKw9n8K^H_PIz0utpSSmRRgI%%uHVwI7g7d z!l}b!3SPLO<2rb$TsM-du$smQ+TcTZ5XELq;Fp`IBTW?4=Y#sgDk+o_yuxD|hF6zD z;o^uy5q0=Dm`Il*?8E~cd}I(&336VL_@WdIAy_0nZ^g2H63i1a$;nj86F)%Ptl{Sl zj0!bB8dLxQ_w5*p@b=G@z@+j*yq1U{q7nr%;x*DcM+sp;_3P1~LQREa7M-Pyi@&nz z&<=1n5ZG_X6<o2o-9GIjje*x&&9iQis|<_b8)yj>oGn*HZB?<(3WI%VH)CzvC;r z7bI1o!OVSuo61b^V#6kt1udXo18o=1!QxuO{24m==Kg3UV!RK8OY!H7>a7=tqaNWZ ziYAc6A2hyaHkik0*-LMOM$G!)jx_+#uv=`jw4mU)FAJfC zpJRovydx|N8voO@!kruYS|L`dl<*7X^$MkN<$XBeyan#xEd8<0KUl5TqwdF^bTkdmdfInU`Y6AxYZ|KB zWl^Xc%I$`xB~iYK#_LgD1YC4!`|(A*PoP|m_xFG$+K*sdH6qfOTeQ!j4by5bp=?EY z2j=8<`mukB@%9v?b{%WqW=L(iPFU!;K(9;4)e6* zXTO#1C#$#wKT>^Pc!@GQv%uqarX~+gPVLJdm9s}Oxf}P-e!Oey=FHUe?BRUZTyCms z|GquyAH#IzCO^LWNM@=tH`jIbTn66YSR)-89>k%iOeYE}uK9&~Dn7 zzxnW<&Z*f0n|m@-Q-i(3!+ZA(XQqaF`?~V`W{z&2M(gDC=AO>J&Yto@ocsL6r)G1R zuDSjDrZQazX7^&mzkk1z^2XWRfyq49y-DI9=IqPMeFqNi&m73iYz>Np;rCnaQe>oldX7*-okzli&y;1I&Jv_4) z)8=o^$o-iaxo=L&J?J7F4n#|LcXv;BZ+BmJfA>K5V0XHEsC&4lyQin8x2LbCzh|Il zuqWL!)HB@M-P_aK+uPUM-#gGd*qiPh>K*Ru?(6C6?d$97?;GeF>`V6z^$quT_xJSo z_V@Mo_Yd?B_NV)Y`iBR)2YLp22l@v32L=WP2hsyW1H*&egFS=2gMEYjg9C$ugXzJc z!Qpgwx+mS6?o0Ql2hxM-bb2T~Jk&kZGt@iOH`G5gFf=%n9vT`N9>x}jvHUQm9tPSl znnHrgq~|7QZpu(XQgMge00(Z$-&}&j(R2C9Tz*bY=0m<@z%2q3AMNby{MfrNJD)2F zW6#linT_P^%xqpx%BhLkKJz)(M(i!jP1A<@ZI-4jN#8b|JoZrI*Zn^9eXo{ zh2vj&@|#b+{L0y%+*Uks`<-{4e&7pVeCXjvpZMle&wT5J7r*z{m!AIP?>_s&i#x8^ zb?rxP*nRswr@s8vuRZlA&wlHb%DPn_z5XBn>Fslc#DPOUcr97cIy2kUy!#Wk{{EMK zQMan4b^MYmc76D|>u&?1{5zj|zH9fo&aMZ)^tGqH_2Mh9 zeR||mr@OjOwVwOZ+vi?5cg3#jJ{oZ>dYa)82HgTo-K4Y^X%u2f=zQt@_o3JmJ2h8l+ z!gJQK$BhR2*sqL_L~7kt(VBQoJZs172K&R#Mr%B_$&8!A=y5lh4Yq3(9z|DcaJjkJ z$U7Gq6~;wr$Jt;XJ6BonbX9IP@Q=eP3U`^uKDFAd{p58<-D=Y-{LJdIwnp@1eYEh% zU_M^>%Z9jBIA;~UYPH2b^K9(TH`X4%+ek+juCFP4-6?#1`+ymtXjVm)TK_;)guW|dW4RioGH4MvmIY_^H4F|9wTzoh>~{N?Ch>aXbE6R%r8*58z8 z&7X*W7V-64FTP^esr&E$oE?b{ZoBxJpPqTitf{}|+CLt6;livk!d!e|+J=haP+4$v;KXuC{s0 z_OTCs@xkwW_kqajhSb_;o;Br~wd>mZhPGUK+2vPWb@esWt%<43jni|td}98NFFf+4 z$Dcmawf%}0(Noc6wnba4jn@0kJM)EYGkCcixb?)QJ_octdK;joFFnAtyH2V8Bo; zOk5Qg3)-d5Rn5Z-k?~lwamkLfk#u6mzIfZGZ#?W4{`8I~k9VDV>DVQo|D$8WkqxFk zu`V_qYqzRzJ2>Wcd6gr2%cuTN^|7D+ZepSJg9~@vZcbYXBO3Ak^F-kvV{^{Ts`0`T z*G8)f_bgmu6h|v+i&t*^!u7h-|TecjY z`8fU^Y5w148J>|& Tf_{90=6Z$qRJMAI-uqtxfELf` diff --git a/ethcore/wasm/run/res/sample3.wasm b/ethcore/wasm/run/res/sample3.wasm deleted file mode 100644 index 1811215b40f3e84c572712a6fbcec5a2830cdf4e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9736 zcmeHMOKe=%c|P|wuRB9tQm+w3N&mf)ZPB7gNtQ{8vTR++vMf>*^`K;L3VJk}$Q<#Z z3`cTQNTyIESvUybO&T_p$*BtAG;=Pr8dhR@+b;|uMD<=WEHYTal`-@;mZwN<;>Sa_$k*lrk? z#c~U`Yb%R&`;cE)YpmR8w2a5BqFVS|tI@E}{R>M=cb6Bguf0>dy|6fO#P}>#vZ?J> z?Y0SeUffu&-@a!YeaCWRd9`)VbsoA8^`$|(c73Vg+{TxB#e(PMT-WpRxg5TDj|uZd zCs?SX;ikXz*X2Uqqz?;GCH!XI&8_?I?s4K^kCQ%hntL5F`&>V9&-f=(FDir*q#u}O z>;R88V-HYm#yJUMp9M{9#7*U#EgXn*%1nC3Lcv00p(?OEYIaW5?6Su8DU-9TI%V?Wq=jbUGGiN!D%(n<%C^v`vduKAY!i(t8>CTX z12n2EvPPrKXn)seU)N}*YqZ=o8g`AAx<-p#qXlcUni57jIJK>qPR$?#-%5RAvF^&h9U9PEl3`}c!!j9RHexBj-VRMNq*9+ra!{lA`B!3o+F!@;|4`iG!YqT*i7)M5u=&P z3X{kO+h?K)6A6Yp%T=bi%A!e|yU$c7OqJ!v%kTW6nT!Q{}$(tP9T%yZzaK; zf6|5p2R-LYfJV?N>Ke&Q5aoyZLrsU{`ivUiEFucQ|Gd;`C(fL|E-t)1al_QSj_eN# zQC}$DhETRx@HPEoxu@~UALF%x1-EPJ(qSsi{ z&^ghl6h0H-gU~y9#UNt&9DmW(+N+Tpo^|{u+f|MczTm+K&VbDb%nnJxcD;QQhnJq~ zPq=0DO(on4i%}6n`QPOUI%N9Zu?bTS!{40nxio}7cE95rv)dT+o@WfG`8j@e0{;zo z75FPA;@`c%m_guIfMwtz{4whrc&>g(`uCAOfb>Ve82Ik5T5IFA8_SC;<74$&>*gwI-dtR3x9+LAo2Y!}`dEE+ z`M{w@y*_dH*s&Yek2UH?55F*eeevevO8dZ(vBP5rweTH0;*G9>BZrP>(%+WC-#X`3 z?$(wTZ%FO>qAaf5UA@&{JKMk&6z=4rg>SFkkhR5|E4B8WR-<0KU8^s)?`<^o$J=rAf`PJL(Vl;?zD}3*m>8P*$g~!yF4-p3K_2DAS{U@hF zmGip=Sg_DoTD@td){|mP6<4LJ&{~b!4Xe630!<5a1)55)9)YIMPYE>DW4!_`_lX3W z@8>U>C1;Rjn*IjYScZq-cOBIMl^cg?P@fi8tprU2q6OgZV(3iRD-Hb zE2keg6mI48hd>WVAr}B4lEAi8R{?-SNoC}C$ ze)_jx{HrhJ5tm*4j|9!T)#n%|EIVQPNE_490+a*@Is_{MO(qwx?2N+s5$H#agQfsn zYM4>uQEiMGpHgPjP>ld`aUUU%SPv+~{e)s%A(Ud69yMV+Kq$w9gi5@L&=+qe^v7EW z5d-6>8Hl$M2IFmnO)+%osM#EECv1taaEzL*@eabac$iQ{zXG<$I|)PaF2as@H(@vy z!p=A*?1~evz?sPPB%#FfB&NReL@4e&fdKXaP`?2H(l-Zy$jghT3yDAjQ_MmDQ^`X8 zQpQ61Qo};`Qous*QoTa#Qo2GlvNCk5FHpUZ2Lj2yR*#fakR(k_hBHNmY=^HO!>KHLSeX=?}Zp zQ0L{7(=gxJmz41GZ2ZMd2Oz~qtRO`PXHpSt%tXgIPgxc{HYoVQtei5Hlwesou&f-` zc6J;9ma~tvGT5~;7~MT($|z9bE+Cf~Ri;M4GNWLb(U0*V!3aRg_rKQnM9xBy*`&+DB>hTNVhTxt@hT@(_hUA_{ z4b8NS@4<_@A==+aW60`xMDO%IGIaOk!g%i^LwQdwLwe7nhW3pYWg-5tBLA;1*CULc z_WkK!o^?6~G}}EZ)7`T&-90Pg?iukl$N1`uIA+pe>%a<=4%ZT}Xwuwix6; zg`<(j)}-#y2z;UY7BD>;L0Wb+0+t<(fOU>W?0Tl307vChN6PgsLl9CY-`Y$j$&Z3>YbM9Yr@-Me$H_78((TRU zCGr#CL(Sx6@>jrjG?SC$Dfn>rWCmc#yGIcPcTACxi6VSkKblJ?Qy5u@hE`DqXDIMrVB$U zWT8Nrh61o`jRgC{bYUp!ixp8f91!)uO`^`XMbz)MiMrf&QE%HJ>S#MfeQcMgdx@xL zC8AEXSJa=L6?LV3qF(g8r~{3P`pyASw;31pm_wq@@`9+JyeR4-lcL^nOw=)si~7V% zqVDjrs3)8hb%Il(^Z!-Rss9?LerML7lh-&XXXRDG897BbEhh=7yi9mSULu^3;{;cZ z5vF94a8zC-Ovnp_BXWpvSjGtlV@3wK5i^oaE!|7KY4=j|QsiE0R!W3R zPoC*>L9(IPtGd{mBQ8iZ0QKa@u6xq{_fCp?rqYo#sXtfYv8l3Ko*9 z53o0!maFrKf#**-53of9%*Pe%gHY<4ZWxm{=5f3*jgl3dTjLZWt=@@U?Ap@*bv}kYXb7o$UzEP8fkioQ2 z-*_^8Uk@^Ja!uZNkX+-^AIf<`QLYe5a+xqMSMQ@?^ZdH$Q_g+#&Ge~vui>QOO?l&f zGB0oP!_dp!kCK^qdgH@pW$u1*A^WiV$&6fJ(`GlCrUhn@vhhim%-TH*o5Mk1g9%nob)6U^1NlNp=;}q1GQCIw%cLY$e|QZoDEp|3Bq-az zU{ndx77v1g^dGR^xWMnN^E4+&^>#H#uk$n~NMGk^PLO^JnYg1#{{w{!Kt$Cd;0H*? zT9y7gl6f0^->zARq!m1U6}f%D?*jXQUjs%!tnLGVZ{u+g`1vWb3GlD5u^EWCy#??$ z$lVIWGQ16dAWId0XUc2`{03!*fWHRru*t&!$bg-IzeCksz||?U8vqfGvCjHNnf8q` zU6z$)%km>`9qmA1F7QxESK{-q^J`{xm*S6|1@>?Y?%kPIu!w%sC+sx%#DlGiorA5p z&cW8z&cW6j_FyYW8KDIie{O`k%(y?maT#um5O~Az_c%hsC-6<2)x!jKjLU<)-GY_x z@iVhea;?}Wm~6aE0`y!f_Q@x_R_y!quN8|Qa;=DqMhyB#UMnIZ{-M{3MP4iBQvx_@ie;wMxV3WctW`h43JjEai$LJ{n)*(z*p4vd64B_$s|3(3zeB<. - -use ethjson::{ - bytes::Bytes, - hash::{Address, H256}, - uint::Uint, -}; -use std::borrow::Cow; - -#[derive(Deserialize)] -#[serde(untagged)] -pub enum Source { - Raw(Cow<'static, String>), - Constructor { - #[serde(rename = "constructor")] - source: Cow<'static, String>, - arguments: Bytes, - sender: Address, - at: Address, - }, -} - -impl Source { - pub fn as_ref(&self) -> &str { - match *self { - Source::Raw(ref r) => r.as_ref(), - Source::Constructor { ref source, .. } => source.as_ref(), - } - } -} - -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Fixture { - pub caption: Cow<'static, String>, - pub source: Source, - pub address: Option
, - pub sender: Option
, - pub value: Option, - pub gas_limit: Option, - pub payload: Option, - pub storage: Option>, - pub asserts: Vec, -} - -#[derive(Deserialize, Debug)] -pub struct StorageEntry { - pub key: Uint, - pub value: Uint, -} - -#[derive(Deserialize, Debug, Clone)] -#[serde(rename_all = "camelCase")] -pub struct CallLocator { - pub sender: Option
, - pub receiver: Option
, - pub value: Option, - pub data: Option, - pub code_address: Option
, -} - -#[derive(Deserialize, Debug)] -pub struct StorageAssert { - pub key: H256, - pub value: H256, -} - -#[derive(Deserialize, Debug)] -pub enum Assert { - HasCall(CallLocator), - HasStorage(StorageAssert), - UsedGas(u64), - Return(Bytes), -} diff --git a/ethcore/wasm/run/src/main.rs b/ethcore/wasm/run/src/main.rs deleted file mode 100644 index 63e2de00b..000000000 --- a/ethcore/wasm/run/src/main.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -extern crate serde; -extern crate serde_json; -#[macro_use] -extern crate serde_derive; -extern crate clap; -extern crate env_logger; -extern crate ethereum_types; -extern crate ethjson; -extern crate rustc_hex; -extern crate vm; -extern crate wasm; - -mod fixture; -mod runner; - -use clap::{App, Arg}; -use fixture::Fixture; -use std::fs; - -fn main() { - ::env_logger::init(); - - let matches = App::new("pwasm-run-test") - .arg( - Arg::with_name("target") - .index(1) - .required(true) - .multiple(true) - .help("JSON fixture"), - ) - .get_matches(); - - let mut exit_code = 0; - - for target in matches.values_of("target").expect("No target parameter") { - let mut f = fs::File::open(target).expect("Failed to open file"); - let fixtures: Vec = - serde_json::from_reader(&mut f).expect("Failed to deserialize json"); - - for fixture in fixtures.into_iter() { - let fails = runner::run_fixture(&fixture); - for fail in fails.iter() { - exit_code = 1; - println!( - "Failed assert in test \"{}\" ('{}'): {}", - fixture.caption.as_ref(), - target, - fail - ); - } - } - } - - std::process::exit(exit_code); -} diff --git a/ethcore/wasm/run/src/runner.rs b/ethcore/wasm/run/src/runner.rs deleted file mode 100644 index a348c44e2..000000000 --- a/ethcore/wasm/run/src/runner.rs +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::{H160, H256, U256}; -use fixture::{Assert, CallLocator, Fixture, Source}; -use rustc_hex::ToHex; -use std::{ - fmt, fs, - io::{self, Read}, - path, - sync::Arc, -}; -use vm::{self, tests::FakeExt, ActionParams, ActionValue, Exec, GasLeft, ParamsType}; -use wasm::WasmInterpreter; - -fn load_code>(p: P) -> io::Result> { - let mut result = Vec::new(); - let mut f = fs::File::open(p)?; - f.read_to_end(&mut result)?; - Ok(result) -} - -fn wasm_interpreter(params: ActionParams) -> Box { - Box::new(WasmInterpreter::new(params)) -} - -#[derive(Debug)] -pub enum SpecNonconformity { - Address, -} - -#[derive(Debug)] -pub enum Fail { - Return { - expected: Vec, - actual: Vec, - }, - UsedGas { - expected: u64, - actual: u64, - }, - Runtime(String), - Load(io::Error), - NoCall(CallLocator), - StorageMismatch { - key: H256, - expected: H256, - actual: Option, - }, - Nonconformity(SpecNonconformity), -} - -impl Fail { - fn runtime(err: vm::Error) -> Vec { - vec![Fail::Runtime(format!("{}", err))] - } - - fn load(err: io::Error) -> Vec { - vec![Fail::Load(err)] - } - - fn nononformity(kind: SpecNonconformity) -> Vec { - vec![Fail::Nonconformity(kind)] - } -} - -impl fmt::Display for Fail { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::Fail::*; - match *self { - Return { - ref expected, - ref actual, - } => write!( - f, - "Expected to return result: 0x{} ({} bytes), but got 0x{} ({} bytes)", - expected.to_hex(), - expected.len(), - actual.to_hex(), - actual.len() - ), - - UsedGas { expected, actual } => write!( - f, - "Expected to use gas: {}, but got actual gas used: {}", - expected, actual - ), - - Runtime(ref s) => write!(f, "WASM Runtime error: {}", s), - - Load(ref e) => write!(f, "Load i/o error: {}", e), - - NoCall(ref call) => write!(f, "Call not found: {:?}", call), - - StorageMismatch { - ref key, - ref expected, - actual: Some(ref actual), - } => write!( - f, - "Storage key {} value mismatch, expected {}, got: {}", - key.to_vec().to_hex(), - expected.to_vec().to_hex(), - actual.to_vec().to_hex(), - ), - - StorageMismatch { - ref key, - ref expected, - actual: None, - } => write!( - f, - "No expected storage value for key {} found, expected {}", - key.to_vec().to_hex(), - expected.to_vec().to_hex(), - ), - - Nonconformity(SpecNonconformity::Address) => { - write!(f, "Cannot use address when constructor is specified!") - } - } - } -} - -pub fn construct( - ext: &mut dyn vm::Ext, - source: Vec, - arguments: Vec, - sender: H160, - at: H160, -) -> Result, vm::Error> { - let mut params = ActionParams::default(); - params.sender = sender; - params.address = at; - params.gas = U256::from(100_000_000); - params.data = Some(arguments); - params.code = Some(Arc::new(source)); - params.params_type = ParamsType::Separate; - - Ok( - match wasm_interpreter(params) - .exec(ext) - .ok() - .expect("Wasm interpreter always calls with trap=false; trap never happens; qed")? - { - GasLeft::Known(_) => Vec::new(), - GasLeft::NeedsReturn { data, .. } => data.to_vec(), - }, - ) -} - -pub fn run_fixture(fixture: &Fixture) -> Vec { - let mut params = ActionParams::default(); - - let source = match load_code(fixture.source.as_ref()) { - Ok(code) => code, - Err(e) => { - return Fail::load(e); - } - }; - - let mut ext = FakeExt::new().with_wasm(); - params.code = Some(Arc::new( - if let Source::Constructor { - ref arguments, - ref sender, - ref at, - .. - } = fixture.source - { - match construct( - &mut ext, - source, - arguments.clone().into(), - sender.clone().into(), - at.clone().into(), - ) { - Ok(code) => code, - Err(e) => { - return Fail::runtime(e); - } - } - } else { - source - }, - )); - - if let Some(ref sender) = fixture.sender { - params.sender = sender.clone().into(); - } - - if let Some(ref address) = fixture.address { - if let Source::Constructor { .. } = fixture.source { - return Fail::nononformity(SpecNonconformity::Address); - } - - params.address = address.clone().into(); - } else if let Source::Constructor { ref at, .. } = fixture.source { - params.address = at.clone().into(); - } - - if let Some(gas_limit) = fixture.gas_limit { - params.gas = U256::from(gas_limit); - } - - if let Some(ref data) = fixture.payload { - params.data = Some(data.clone().into()) - } - - if let Some(value) = fixture.value { - params.value = ActionValue::Transfer(value.clone().into()) - } - - if let Some(ref storage) = fixture.storage { - for storage_entry in storage.iter() { - let key: U256 = storage_entry.key.into(); - let val: U256 = storage_entry.value.into(); - ext.store.insert(key.into(), val.into()); - } - } - - let interpreter = wasm_interpreter(params); - - let interpreter_return = match interpreter - .exec(&mut ext) - .ok() - .expect("Wasm interpreter always calls with trap=false; trap never happens; qed") - { - Ok(ret) => ret, - Err(e) => { - return Fail::runtime(e); - } - }; - let (gas_left, result) = match interpreter_return { - GasLeft::Known(gas) => (gas, Vec::new()), - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - }; - - let mut fails = Vec::new(); - - for assert in fixture.asserts.iter() { - match *assert { - Assert::Return(ref data) => { - if &data[..] != &result[..] { - fails.push(Fail::Return { - expected: (&data[..]).to_vec(), - actual: (&result[..]).to_vec(), - }) - } - } - Assert::UsedGas(gas) => { - let used_gas = fixture.gas_limit.unwrap_or(0) - gas_left.low_u64(); - if gas != used_gas { - fails.push(Fail::UsedGas { - expected: gas, - actual: used_gas, - }); - } - } - Assert::HasCall(ref locator) => { - let mut found = false; - - for fake_call in ext.calls.iter() { - let mut match_ = true; - if let Some(ref data) = locator.data { - if data.as_ref() != &fake_call.data[..] { - match_ = false; - } - } - - if let Some(ref code_addr) = locator.code_address { - if fake_call.code_address.unwrap_or(H160::zero()) - != code_addr.clone().into() - { - match_ = false - } - } - - if let Some(ref sender) = locator.sender { - if fake_call.sender_address.unwrap_or(H160::zero()) != sender.clone().into() - { - match_ = false - } - } - - if let Some(ref receiver) = locator.receiver { - if fake_call.receive_address.unwrap_or(H160::zero()) - != receiver.clone().into() - { - match_ = false - } - } - - if match_ { - found = true; - break; - } - } - - if !found { - fails.push(Fail::NoCall(locator.clone())) - } - } - Assert::HasStorage(ref storage_entry) => { - let expected_storage_key: H256 = storage_entry.key.clone().into(); - let expected_storage_value: H256 = storage_entry.value.clone().into(); - let val = ext.store.get(&expected_storage_key); - - if let Some(val) = val { - if val != &expected_storage_value { - fails.push(Fail::StorageMismatch { - key: expected_storage_key, - expected: expected_storage_value, - actual: Some(val.clone()), - }) - } - } else { - fails.push(Fail::StorageMismatch { - key: expected_storage_key, - expected: expected_storage_value, - actual: None, - }) - } - } - } - } - fails -} diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs deleted file mode 100644 index 00cf53419..000000000 --- a/ethcore/wasm/src/tests.rs +++ /dev/null @@ -1,1181 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use byteorder::{ByteOrder, LittleEndian}; -use ethereum_types::{Address, H256, U256}; -use std::{collections::HashMap, sync::Arc}; - -use super::WasmInterpreter; -use vm::{ - self, - tests::{FakeCall, FakeCallType, FakeExt}, - ActionParams, ActionValue, CreateContractAddress, Exec, GasLeft, -}; - -macro_rules! load_sample { - ($name: expr) => { - include_bytes!(concat!("../../res/wasm-tests/compiled/", $name)).to_vec() - }; -} - -macro_rules! reqrep_test { - ($name: expr, $input: expr) => { - reqrep_test!($name, $input, vm::EnvInfo::default(), HashMap::new()) - }; - ($name: expr, $input: expr, $info: expr, $block_hashes: expr) => {{ - let _ = ::env_logger::try_init(); - let code = load_sample!($name); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some($input); - - let mut fake_ext = FakeExt::new().with_wasm(); - fake_ext.info = $info; - fake_ext.blockhashes = $block_hashes; - - let interpreter = wasm_interpreter(params); - interpreter - .exec(&mut fake_ext) - .ok() - .unwrap() - .map(|result| match result { - GasLeft::Known(_) => { - panic!("Test is expected to return payload to check"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - }) - }}; -} - -fn test_finalize(res: Result) -> Result { - match res { - Ok(GasLeft::Known(gas)) => Ok(gas), - Ok(GasLeft::NeedsReturn { .. }) => unimplemented!(), // since ret is unimplemented. - Err(e) => Err(e), - } -} - -fn wasm_interpreter(params: ActionParams) -> Box { - Box::new(WasmInterpreter::new(params)) -} - -/// Empty contract does almost nothing except producing 1 (one) local node debug log message -#[test] -fn empty() { - let code = load_sample!("empty.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); - - let gas_left = { - let interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; - - assert_eq!(gas_left, U256::from(96_926)); -} - -// This test checks if the contract deserializes payload header properly. -// Contract is provided with receiver(address), sender, origin and transaction value -// logger.wasm writes all these provided fixed header fields to some arbitrary storage keys. -#[test] -fn logger() { - let _ = ::env_logger::try_init(); - - let code = load_sample!("logger.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let sender: Address = "0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d".parse().unwrap(); - let origin: Address = "0102030405060708090a0b0c0d0e0f1011121314".parse().unwrap(); - - let mut params = ActionParams::default(); - params.address = address.clone(); - params.sender = sender.clone(); - params.origin = origin.clone(); - params.gas = U256::from(100_000); - params.value = ActionValue::transfer(1_000_000_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); - - let gas_left = { - let interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; - - let address_val: H256 = address.into(); - assert_eq!( - ext.store - .get( - &"0100000000000000000000000000000000000000000000000000000000000000" - .parse() - .unwrap() - ) - .expect("storage key to exist"), - &address_val, - "Logger sets 0x01 key to the provided address" - ); - let sender_val: H256 = sender.into(); - assert_eq!( - ext.store - .get( - &"0200000000000000000000000000000000000000000000000000000000000000" - .parse() - .unwrap() - ) - .expect("storage key to exist"), - &sender_val, - "Logger sets 0x02 key to the provided sender" - ); - let origin_val: H256 = origin.into(); - assert_eq!( - ext.store - .get( - &"0300000000000000000000000000000000000000000000000000000000000000" - .parse() - .unwrap() - ) - .expect("storage key to exist"), - &origin_val, - "Logger sets 0x03 key to the provided origin" - ); - assert_eq!( - U256::from( - ext.store - .get( - &"0400000000000000000000000000000000000000000000000000000000000000" - .parse() - .unwrap() - ) - .expect("storage key to exist") - ), - U256::from(1_000_000_000), - "Logger sets 0x04 key to the trasferred value" - ); - assert_eq!(gas_left, U256::from(17_716)); -} - -// This test checks if the contract can allocate memory and pass pointer to the result stream properly. -// 1. Contract is being provided with the call descriptor ptr -// 2. Descriptor ptr is 16 byte length -// 3. The last 8 bytes of call descriptor is the space for the contract to fill [result_ptr[4], result_len[4]] -// if it has any result. -#[test] -fn identity() { - let _ = ::env_logger::try_init(); - - let code = load_sample!("identity.wasm"); - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Identity contract should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!( - Address::from_slice(&result), - sender, - "Idenity test contract does not return the sender passed" - ); - assert_eq!(gas_left, U256::from(98_419)); -} - -// Dispersion test sends byte array and expect the contract to 'disperse' the original elements with -// their modulo 19 dopant. -// The result is always twice as long as the input. -// This also tests byte-perfect memory allocation and in/out ptr lifecycle. -#[test] -fn dispersion() { - let _ = ::env_logger::try_init(); - - let code = load_sample!("dispersion.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8, 125, 197, 255, 19]); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Dispersion routine should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!(result, vec![0u8, 0, 125, 11, 197, 7, 255, 8, 19, 0]); - assert_eq!(gas_left, U256::from(92_377)); -} - -#[test] -fn suicide_not() { - let code = load_sample!("suicidal.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8]); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!( - "Suicidal contract should return payload when had not actualy killed himself" - ); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!(result, vec![0u8]); - assert_eq!(gas_left, U256::from(93_378)); -} - -#[test] -fn suicide() { - let _ = ::env_logger::try_init(); - - let code = load_sample!("suicidal.wasm"); - - let refund: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - - let mut args = vec![127u8]; - args.extend(refund.to_vec()); - params.data = Some(args); - - let mut ext = FakeExt::new().with_wasm(); - - let gas_left = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas) => gas, - GasLeft::NeedsReturn { .. } => { - panic!("Suicidal contract should not return anything when had killed itself"); - } - } - }; - - assert!(ext.suicides.contains(&refund)); - assert_eq!(gas_left, U256::from(93_346)); -} - -#[test] -fn create() { - let _ = ::env_logger::try_init(); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("creator.wasm"))); - params.data = Some(vec![0u8, 2, 4, 8, 16, 32, 64, 128]); - params.value = ActionValue::transfer(1_000_000_000); - - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_create2 = true; - - let gas_left = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Create contract always return 40 bytes of the creation address, or in the case where it fails, return 40 bytes of zero."); - } - GasLeft::NeedsReturn { - gas_left, - data, - apply_state, - } => { - assert!(apply_state); - assert_eq!(data.as_ref(), [0u8; 40].as_ref()); // FakeExt never succeeds in create. - gas_left - } - } - }; - - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Create, - create_scheme: Some(CreateContractAddress::FromSenderAndCodeHash), - gas: U256::from(49_674), - sender_address: None, - receive_address: None, - value: Some((1_000_000_000 / 2).into()), - data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], - code_address: None, - })); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Create, - create_scheme: Some(CreateContractAddress::FromSenderSaltAndCodeHash( - H256::from([5u8].as_ref()) - )), - gas: U256::from(6039), - sender_address: None, - receive_address: None, - value: Some((1_000_000_000 / 2).into()), - data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], - code_address: None, - })); - assert_eq!(gas_left, U256::from(5974)); -} - -#[test] -fn call_msg() { - let _ = ::env_logger::try_init(); - - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.code_address = contract_address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call.wasm"))); - params.data = Some(Vec::new()); - - let mut ext = FakeExt::new().with_wasm(); - ext.balances - .insert(receiver.clone(), U256::from(10000000000u64)); - - let gas_left = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas_left) => gas_left, - GasLeft::NeedsReturn { .. } => { - panic!("Call test should not return payload"); - } - } - }; - - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(33_000), - sender_address: Some(receiver), - receive_address: Some(Address::from([ - 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 - ])), - value: Some(1000000000.into()), - data: vec![129u8, 123, 113, 107, 101, 97], - code_address: Some(Address::from([ - 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 - ])), - })); - - assert_eq!(gas_left, U256::from(91_672)); -} - -// The same as `call_msg`, but send a `pwasm_ethereum::gasleft` -// value as `gas` argument to the inner pwasm_ethereum::call -#[test] -fn call_msg_gasleft() { - let _ = ::env_logger::try_init(); - - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.code_address = contract_address.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_gasleft.wasm"))); - params.data = Some(Vec::new()); - - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; - ext.balances - .insert(receiver.clone(), U256::from(10000000000u64)); - - let gas_left = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(gas_left) => gas_left, - GasLeft::NeedsReturn { .. } => { - panic!("Call test should not return payload"); - } - } - }; - - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(91_165), - sender_address: Some(receiver), - receive_address: Some(Address::from([ - 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 - ])), - value: Some(1000000000.into()), - data: vec![129u8, 123, 113, 107, 101, 97], - code_address: Some(Address::from([ - 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0 - ])), - })); - - assert_eq!(gas_left, U256::from(91_671)); -} - -#[test] -fn call_code() { - let _ = ::env_logger::try_init(); - - let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_code.wasm"))); - params.data = Some(Vec::new()); - params.value = ActionValue::transfer(1_000_000_000); - - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Call test should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(20_000), - sender_address: Some(sender), - receive_address: Some(receiver), - value: None, - data: vec![1u8, 2, 3, 5, 7, 11], - code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()), - })); - - // siphash result - let res = LittleEndian::read_u32(&result[..]); - assert_eq!(res, 4198595614); - assert_eq!(gas_left, U256::from(90_037)); -} - -#[test] -fn call_static() { - let _ = ::env_logger::try_init(); - - let sender: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - let receiver: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); - let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); - - let mut params = ActionParams::default(); - params.sender = sender.clone(); - params.address = receiver.clone(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("call_static.wasm"))); - params.data = Some(Vec::new()); - params.value = ActionValue::transfer(1_000_000_000); - params.code_address = contract_address.clone(); - - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Static call test should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); - assert!(ext.calls.contains(&FakeCall { - call_type: FakeCallType::Call, - create_scheme: None, - gas: U256::from(20_000), - sender_address: Some(receiver), - receive_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), - value: None, - data: vec![1u8, 2, 3, 5, 7, 11], - code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), - })); - - // siphash result - let res = LittleEndian::read_u32(&result[..]); - assert_eq!(res, 317632590); - - assert_eq!(gas_left, U256::from(90_042)); -} - -// Realloc test -#[test] -fn realloc() { - let code = load_sample!("realloc.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8]); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("Realloc should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - assert_eq!(result, vec![0u8; 2]); - assert_eq!(gas_left, U256::from(92_848)); -} - -#[test] -fn alloc() { - let code = load_sample!("alloc.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(10_000_000); - params.code = Some(Arc::new(code)); - params.data = Some(vec![0u8]); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("alloc test should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - assert_eq!(result, vec![5u8; 1024 * 400]); - assert_eq!(gas_left, U256::from(6_893_881)); -} - -// Tests that contract's ability to read from a storage -// Test prepopulates address into storage, than executes a contract which read that address from storage and write this address into result -#[test] -fn storage_read() { - let _ = ::env_logger::try_init(); - - let code = load_sample!("storage_read.wasm"); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - let mut ext = FakeExt::new().with_wasm(); - ext.store.insert( - "0100000000000000000000000000000000000000000000000000000000000000".into(), - address.into(), - ); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("storage_read should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!(Address::from(&result[12..32]), address); - assert_eq!(gas_left, U256::from(98_369)); -} - -// Tests keccak calculation -// keccak.wasm runs wasm-std::keccak function on data param and returns hash -#[test] -fn keccak() { - let _ = ::env_logger::try_init(); - let code = load_sample!("keccak.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(b"something".to_vec()); - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("keccak should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!( - H256::from_slice(&result), - H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") - ); - assert_eq!(gas_left, U256::from(85_949)); -} - -// math_* tests check the ability of wasm contract to perform big integer operations -// - addition -// - multiplication -// - substraction -// - division - -// addition -#[test] -fn math_add() { - let (gas_left, result) = reqrep_test!("math.wasm", { - let mut args = [0u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - }) - .expect("Interpreter to execute without any errors"); - - assert_eq!( - U256::from_dec_str("1888888888888888888888888888887").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(92_072)); -} - -// multiplication -#[test] -fn math_mul() { - let (gas_left, result) = reqrep_test!("math.wasm", { - let mut args = [1u8; 65]; - let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); - let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - }) - .expect("Interpreter to execute without any errors"); - - assert_eq!( - U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(91_400)); -} - -// subtraction -#[test] -fn math_sub() { - let (gas_left, result) = reqrep_test!("math.wasm", { - let mut args = [2u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - }) - .expect("Interpreter to execute without any errors"); - - assert_eq!( - U256::from_dec_str("111111111111111111111111111111").unwrap(), - (&result[..]).into() - ); - assert_eq!(gas_left, U256::from(92_072)); -} - -// subtraction with overflow -#[test] -fn math_sub_with_overflow() { - let result = reqrep_test!("math.wasm", { - let mut args = [2u8; 65]; - let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap(); - let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - }); - - match result { - Err(vm::Error::Wasm(_)) => {} - _ => panic!("Unexpected result {:?}", result), - } -} - -#[test] -fn math_div() { - let (gas_left, result) = reqrep_test!("math.wasm", { - let mut args = [3u8; 65]; - let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap(); - let arg_b = U256::from_dec_str("888888888888888888888888").unwrap(); - arg_a.to_big_endian(&mut args[1..33]); - arg_b.to_big_endian(&mut args[33..65]); - args.to_vec() - }) - .expect("Interpreter to execute without any errors"); - - assert_eq!(U256::from_dec_str("1125000").unwrap(), (&result[..]).into()); - assert_eq!(gas_left, U256::from(85_700)); -} - -#[test] -fn storage_metering() { - let _ = ::env_logger::try_init(); - - // #1 - let mut ext = FakeExt::new().with_wasm(); - - let code = Arc::new(load_sample!("setter.wasm")); - let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); - - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(code.clone()); - params.data = Some(vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, - ]); - - let gas_left = { - let interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; - - // 0 -> not 0 - assert_eq!(gas_left, U256::from(72_164)); - - // #2 - - let mut params = ActionParams::default(); - params.address = address.clone(); - params.gas = U256::from(100_000); - params.code = Some(code.clone()); - params.data = Some(vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x6b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, - ]); - - let gas_left = { - let interpreter = wasm_interpreter(params); - test_finalize(interpreter.exec(&mut ext).ok().unwrap()).unwrap() - }; - - // not 0 -> not 0 - assert_eq!(gas_left, U256::from(87_164)); -} - -// This test checks the ability of wasm contract to invoke -// varios blockchain runtime methods -#[test] -fn externs() { - let (gas_left, result) = reqrep_test!( - "externs.wasm", - Vec::new(), - vm::EnvInfo { - number: 0x9999999999u64.into(), - author: "efefefefefefefefefefefefefefefefefefefef".parse().unwrap(), - timestamp: 0x8888888888u64.into(), - difficulty: H256::from( - "0f1f2f3f4f5f6f7f8f9fafbfcfdfefff0d1d2d3d4d5d6d7d8d9dadbdcdddedfd" - ) - .into(), - gas_limit: 0x777777777777u64.into(), - last_hashes: Default::default(), - gas_used: 0.into(), - }, - { - let mut hashes = HashMap::new(); - hashes.insert( - U256::from(0), - H256::from("9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d"), - ); - hashes.insert( - U256::from(1), - H256::from("7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b"), - ); - hashes - } - ) - .expect("Interpreter to execute without any errors"); - - assert_eq!( - &result[0..64].to_vec(), - &vec![ - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, - 0x9d, 0x9d, 0x9d, 0x9d, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, - ], - "Block hashes requested and returned do not match" - ); - - assert_eq!( - &result[64..84].to_vec(), - &vec![ - 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, - 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, - ], - "Coinbase requested and returned does not match" - ); - - assert_eq!( - &result[84..92].to_vec(), - &vec![0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00], - "Timestamp requested and returned does not match" - ); - - assert_eq!( - &result[92..100].to_vec(), - &vec![0x99, 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00], - "Block number requested and returned does not match" - ); - - assert_eq!( - &result[100..132].to_vec(), - &vec![ - 0x0f, 0x1f, 0x2f, 0x3f, 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f, 0xaf, 0xbf, 0xcf, 0xdf, - 0xef, 0xff, 0x0d, 0x1d, 0x2d, 0x3d, 0x4d, 0x5d, 0x6d, 0x7d, 0x8d, 0x9d, 0xad, 0xbd, - 0xcd, 0xdd, 0xed, 0xfd, - ], - "Difficulty requested and returned does not match" - ); - - assert_eq!( - &result[132..164].to_vec(), - &vec![ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x77, - 0x77, 0x77, 0x77, 0x77, - ], - "Gas limit requested and returned does not match" - ); - - assert_eq!(gas_left, U256::from(90_428)); -} - -// This test checks the ability of wasm contract to invoke gasleft -#[test] -fn gasleft() { - let _ = ::env_logger::try_init(); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); - - let mut ext = FakeExt::new().with_wasm(); - ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; - - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => {} - GasLeft::NeedsReturn { gas_left, data, .. } => { - let gas = LittleEndian::read_u64(data.as_ref()); - assert_eq!(gas, 93_423); - assert_eq!(gas_left, U256::from(93_349)); - } - } -} - -// This test should fail because -// ext.schedule.wasm.as_mut().unwrap().have_gasleft = false; -#[test] -fn gasleft_fail() { - let _ = ::env_logger::try_init(); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); - let mut ext = FakeExt::new().with_wasm(); - let interpreter = wasm_interpreter(params); - match interpreter.exec(&mut ext).ok().unwrap() { - Err(_) => {} - Ok(_) => { - panic!("interpreter.exec should return Err if ext.schedule.wasm.have_gasleft = false") - } - } -} - -#[test] -fn embedded_keccak() { - let _ = ::env_logger::try_init(); - let mut code = load_sample!("keccak.wasm"); - code.extend_from_slice(b"something"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.params_type = vm::ParamsType::Embedded; - - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("keccak should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!( - H256::from_slice(&result), - H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") - ); - assert_eq!(gas_left, U256::from(85_949)); -} - -/// This test checks the correctness of log extern -/// Target test puts one event with two topic [keccak(input), reverse(keccak(input))] -/// and reversed input as a data -#[test] -fn events() { - let _ = ::env_logger::try_init(); - let code = load_sample!("events.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000); - params.code = Some(Arc::new(code)); - params.data = Some(b"something".to_vec()); - - let mut ext = FakeExt::new().with_wasm(); - - let (gas_left, result) = { - let interpreter = wasm_interpreter(params); - let result = interpreter - .exec(&mut ext) - .ok() - .unwrap() - .expect("Interpreter to execute without any errors"); - match result { - GasLeft::Known(_) => { - panic!("events should return payload"); - } - GasLeft::NeedsReturn { - gas_left: gas, - data: result, - apply_state: _apply, - } => (gas, result.to_vec()), - } - }; - - assert_eq!(ext.logs.len(), 1); - let log_entry = &ext.logs[0]; - assert_eq!(log_entry.topics.len(), 2); - assert_eq!( - &log_entry.topics[0], - &H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87") - ); - assert_eq!( - &log_entry.topics[1], - &H256::from("871d5ea37430753faab7dff7a7187783517d83bd822c02e28a164c887e1d3768") - ); - assert_eq!(&log_entry.data, b"gnihtemos"); - - assert_eq!(&result, b"gnihtemos"); - assert_eq!(gas_left, U256::from(83_161)); -} - -#[test] -fn recursive() { - let _ = ::env_logger::try_init(); - let code = load_sample!("recursive.wasm"); - - let mut params = ActionParams::default(); - params.gas = U256::from(100_000_000); - params.code = Some(Arc::new(code)); - params.data = Some({ - // `recursive` expects only one 32-bit word in LE that - // represents an iteration count. - // - // We pick a relative big number to definitely hit stack overflow. - use byteorder::WriteBytesExt; - let mut data = vec![]; - data.write_u32::(100000).unwrap(); - data - }); - - let mut ext = FakeExt::new().with_wasm(); - - let interpreter = wasm_interpreter(params); - let result = interpreter.exec(&mut ext).ok().unwrap(); - - // We expect that stack overflow will occur and it should be generated by - // deterministic stack metering. Exceeding deterministic stack height limit - // always ends with a trap generated by `unreachable` instruction. - match result { - Err(trap) => { - let err_description = trap.to_string(); - assert!( - err_description.contains("Unreachable"), - "err_description: {} should contain 'Unreachable'", - err_description - ); - } - _ => panic!("this test should trap"), - } -} diff --git a/scripts/add_license.sh b/scripts/add_license.sh deleted file mode 100755 index 2b283590b..000000000 --- a/scripts/add_license.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env sh - -PAT_GPL="^// Copyright.*If not, see \.$" -PAT_OTHER="^// Copyright" - -for f in $(find . -type f | egrep '\.(c|cpp|rs)$'); do - HEADER=$(head -16 $f) - if [[ $HEADER =~ $PAT_GPL ]]; then - BODY=$(tail -n +17 $f) - cat license_header > temp - echo "$BODY" >> temp - mv temp $f - elif [[ $HEADER =~ $PAT_OTHER ]]; then - echo "Other license was found do nothing" - else - echo "$f was missing header" - cat license_header $f > temp - mv temp $f - fi -done diff --git a/scripts/doc.sh b/scripts/doc.sh deleted file mode 100755 index e005217cf..000000000 --- a/scripts/doc.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh -# generate documentation only for openethereum and ethcore libraries - -cargo doc --no-deps --verbose --all && - echo '' > target/doc/index.html diff --git a/scripts/evm_jsontests_bench.sh b/scripts/evm_jsontests_bench.sh index e13ff9ef4..69736cea5 100755 --- a/scripts/evm_jsontests_bench.sh +++ b/scripts/evm_jsontests_bench.sh @@ -2,15 +2,15 @@ cargo build --release -p evmbin -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmArithmeticTest -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmBitwiseLogicOperation -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmBlockInfoTest -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmEnvironmentalInfo -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmIOandFlowOperations -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmLogTest -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmPerformance -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmPushDupSwapTest -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmRandomTest -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmSha3Test -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmSystemOperations -./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/ethereum/tests/VMTests/vmTests +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmArithmeticTest +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmBitwiseLogicOperation +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmBlockInfoTest +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmEnvironmentalInfo +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmIOandFlowOperations +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmLogTest +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmPerformance +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmPushDupSwapTest +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmRandomTest +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmSha3Test +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmSystemOperations +./target/release/openethereum-evm stats-jsontests-vm ./ethcore/res/json_tests/VMTests/vmTests diff --git a/scripts/hook.sh b/scripts/hook.sh deleted file mode 100755 index 739b11e7f..000000000 --- a/scripts/hook.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env sh -FILE=./.git/hooks/pre-push - -echo "#!/bin/sh\n" > $FILE -# Exit on any error -echo "set -e" >> $FILE -# Run release build -echo "cargo build --features dev" >> $FILE -# Build tests -echo "cargo test --no-run --features dev --all" >> $FILE -echo "" >> $FILE -chmod +x $FILE diff --git a/scripts/remove_duplicate_empty_lines.sh b/scripts/remove_duplicate_empty_lines.sh deleted file mode 100755 index 0df265ab9..000000000 --- a/scripts/remove_duplicate_empty_lines.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env sh - -for f in $(find . -name '*.rs'); do - cat -s $f > $f.temp - mv $f.temp $f -done diff --git a/secret-store/Cargo.toml b/secret-store/Cargo.toml deleted file mode 100644 index 65bc0026d..000000000 --- a/secret-store/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -description = "Parity Ethereum (EthCore) Secret Store" -name = "ethcore-secretstore" -version = "1.0.0" -license = "GPL-3.0" -authors = ["Parity Technologies "] - -[dependencies] -byteorder = "1.0" -common-types = { path = "../ethcore/types" } -ethabi = "6.0" -ethabi-contract = "6.0" -ethabi-derive = "6.0" -ethcore = { path = "../ethcore" } -ethcore-accounts = { path = "../accounts", optional = true} -ethcore-call-contract = { path = "../ethcore/call-contract" } -ethcore-sync = { path = "../ethcore/sync" } -ethereum-types = "0.4" -ethkey = { path = "../accounts/ethkey" } -futures = "0.1" -hyper = { version = "0.12", default-features = false } -keccak-hash = "0.1" -kvdb = "0.1" -lazy_static = "1.0" -log = "0.4" -parity-bytes = "0.1" -parity-crypto = "0.3" -parity-runtime = { path = "../util/runtime" } -parking_lot = "0.7" -rustc-hex = "1.0" -serde = "1.0" -serde_derive = "1.0" -serde_json = "1.0" -tiny-keccak = "1.4" -tokio = "0.1.22" -tokio-io = "0.1" -tokio-service = "0.1" -url = "2" -percent-encoding = "2" -jsonrpc-server-utils = "15.0.0" - -[dev-dependencies] -env_logger = "0.5" -ethcore = { path = "../ethcore", features = ["test-helpers"] } -tempdir = "0.3" -kvdb-rocksdb = "0.1.3" - -[features] -accounts = ["ethcore-accounts"] diff --git a/secret-store/res/acl_storage.json b/secret-store/res/acl_storage.json deleted file mode 100644 index cfdefd9c7..000000000 --- a/secret-store/res/acl_storage.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"} -] diff --git a/secret-store/res/key_server_set.json b/secret-store/res/key_server_set.json deleted file mode 100644 index 28530e353..000000000 --- a/secret-store/res/key_server_set.json +++ /dev/null @@ -1,24 +0,0 @@ -[ - {"constant":true,"inputs":[],"name":"getMigrationMaster","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"startMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerIndex","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getMigrationKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[],"name":"getMigrationId","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[],"name":"getNewKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"id","type":"bytes32"}],"name":"confirmMigration","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[],"name":"getMigrationKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"isMigrationConfirmed","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[],"name":"getCurrentKeyServersCount","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[],"name":"getCurrentKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[],"name":"getCurrentLastChange","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getCurrentKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getNewKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"index","type":"uint8"}],"name":"getCurrentKeyServer","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"}, - {"anonymous":false,"inputs":[],"name":"MigrationStarted","type":"event"}, - {"anonymous":false,"inputs":[],"name":"MigrationCompleted","type":"event"} -] \ No newline at end of file diff --git a/secret-store/res/service.json b/secret-store/res/service.json deleted file mode 100644 index d79c38e7a..000000000 --- a/secret-store/res/service.json +++ /dev/null @@ -1,33 +0,0 @@ -[ - {"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"requireKeyServer","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"}, - - {"constant":true,"inputs":[],"name":"serverKeyGenerationRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyGenerationError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyGenerationRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"}],"name":"serverKeyGenerated","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyGenerationResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"threshold","type":"uint8"}],"name":"ServerKeyGenerationRequested","type":"event"}, - - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"serverKeyRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[],"name":"serverKeyRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isServerKeyRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getServerKeyRetrievalRequest","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"serverKeyPublic","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"serverKeyRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"}],"name":"ServerKeyRetrievalRequested","type":"event"}, - - {"constant":true,"inputs":[],"name":"documentKeyStoreRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStoreError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"}],"name":"documentKeyStored","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"}],"name":"isDocumentKeyStoreResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyStoreRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"address"},{"name":"","type":"bytes"},{"name":"","type":"bytes"}],"payable":false,"stateMutability":"view","type":"function"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"author","type":"address"},{"indexed":false,"name":"commonPoint","type":"bytes"},{"indexed":false,"name":"encryptedPoint","type":"bytes"}],"name":"DocumentKeyStoreRequested","type":"event"}, - - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"commonPoint","type":"bytes"},{"name":"threshold","type":"uint8"}],"name":"documentKeyCommonRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"keyServer","type":"address"},{"name":"requester","type":"address"}],"name":"isDocumentKeyShadowRetrievalResponseRequired","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"},{"name":"participants","type":"uint256"},{"name":"decryptedSecret","type":"bytes"},{"name":"shadow","type":"bytes"}],"name":"documentKeyPersonalRetrieved","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":false,"inputs":[{"name":"serverKeyId","type":"bytes32"},{"name":"requester","type":"address"}],"name":"documentKeyShadowRetrievalError","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}, - {"constant":true,"inputs":[],"name":"documentKeyShadowRetrievalRequestsCount","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}, - {"constant":true,"inputs":[{"name":"index","type":"uint256"}],"name":"getDocumentKeyShadowRetrievalRequest","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requester","type":"address"}],"name":"DocumentKeyCommonRetrievalRequested","type":"event"}, - {"anonymous":false,"inputs":[{"indexed":false,"name":"serverKeyId","type":"bytes32"},{"indexed":false,"name":"requesterPublic","type":"bytes"}],"name":"DocumentKeyPersonalRetrievalRequested","type":"event"} -] \ No newline at end of file diff --git a/secret-store/src/acl_storage.rs b/secret-store/src/acl_storage.rs deleted file mode 100644 index 47a826800..000000000 --- a/secret-store/src/acl_storage.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use call_contract::CallContract; -use ethabi::FunctionOutputDecoder; -use ethcore::client::{BlockId, ChainNotify, NewBlocks}; -use ethereum_types::Address; -use parking_lot::{Mutex, RwLock}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; -use trusted_client::TrustedClient; -use types::{ContractAddress, Error, ServerKeyId}; - -use_contract!(acl_storage, "res/acl_storage.json"); - -const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; - -/// ACL storage of Secret Store -pub trait AclStorage: Send + Sync { - /// Check if requestor can access document with hash `document` - fn check(&self, requester: Address, document: &ServerKeyId) -> Result; -} - -/// On-chain ACL storage implementation. -pub struct OnChainAclStorage { - /// Cached on-chain contract. - contract: Mutex, -} - -/// Cached on-chain ACL storage contract. -struct CachedContract { - /// Blockchain client. - client: TrustedClient, - /// Contract address source. - address_source: ContractAddress, - /// Current contract address. - contract_address: Option
, -} - -/// Dummy ACL storage implementation (check always passed). -#[derive(Default, Debug)] -pub struct DummyAclStorage { - prohibited: RwLock>>, -} - -impl OnChainAclStorage { - pub fn new( - trusted_client: TrustedClient, - address_source: ContractAddress, - ) -> Result, Error> { - let client = trusted_client.get_untrusted(); - let acl_storage = Arc::new(OnChainAclStorage { - contract: Mutex::new(CachedContract::new(trusted_client, address_source)), - }); - client - .ok_or_else(|| { - Error::Internal("Constructing OnChainAclStorage without active Client".into()) - })? - .add_notify(acl_storage.clone()); - Ok(acl_storage) - } -} - -impl AclStorage for OnChainAclStorage { - fn check(&self, requester: Address, document: &ServerKeyId) -> Result { - self.contract.lock().check(requester, document) - } -} - -impl ChainNotify for OnChainAclStorage { - // t_nb 11.5 SecretStore OnChainAclStorage. - fn new_blocks(&self, new_blocks: NewBlocks) { - if new_blocks.has_more_blocks_to_import { - return; - } - if !new_blocks.route.enacted().is_empty() || !new_blocks.route.retracted().is_empty() { - self.contract.lock().update_contract_address() - } - } -} - -impl CachedContract { - pub fn new(client: TrustedClient, address_source: ContractAddress) -> Self { - let mut contract = CachedContract { - client, - address_source, - contract_address: None, - }; - contract.update_contract_address(); - contract - } - - pub fn update_contract_address(&mut self) { - let contract_address = self.client.read_contract_address( - ACL_CHECKER_CONTRACT_REGISTRY_NAME.into(), - &self.address_source, - ); - if contract_address != self.contract_address { - trace!(target: "secretstore", "Configuring for ACL checker contract from address {:?}", - contract_address); - - self.contract_address = contract_address; - } - } - - pub fn check(&mut self, requester: Address, document: &ServerKeyId) -> Result { - if let Some(client) = self.client.get() { - // call contract to check accesss - match self.contract_address { - Some(contract_address) => { - let (encoded, decoder) = acl_storage::functions::check_permissions::call( - requester, - document.clone(), - ); - let d = client - .call_contract(BlockId::Latest, contract_address, encoded) - .map_err(|e| { - Error::Internal(format!("ACL checker call error: {}", e.to_string())) - })?; - decoder.decode(&d).map_err(|e| { - Error::Internal(format!("ACL checker call error: {}", e.to_string())) - }) - } - None => Err(Error::Internal( - "ACL checker contract is not configured".to_owned(), - )), - } - } else { - Err(Error::Internal( - "Calling ACL contract without trusted blockchain client".into(), - )) - } - } -} - -impl DummyAclStorage { - /// Prohibit given requestor access to given documents - #[cfg(test)] - pub fn prohibit(&self, requester: Address, document: ServerKeyId) { - self.prohibited - .write() - .entry(requester) - .or_insert_with(Default::default) - .insert(document); - } -} - -impl AclStorage for DummyAclStorage { - fn check(&self, requester: Address, document: &ServerKeyId) -> Result { - Ok(self - .prohibited - .read() - .get(&requester) - .map(|docs| !docs.contains(document)) - .unwrap_or(true)) - } -} diff --git a/secret-store/src/helpers.rs b/secret-store/src/helpers.rs deleted file mode 100644 index 95fcdbbdf..000000000 --- a/secret-store/src/helpers.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethcore::client::{BlockChainClient, BlockId, Client}; -use ethereum_types::H256; - -// TODO: Instead of a constant, make this based on consensus finality. -/// Number of confirmations required before request can be processed. -pub const REQUEST_CONFIRMATIONS_REQUIRED: u64 = 3; - -/// Get hash of the last block with at least n confirmations. -pub fn get_confirmed_block_hash(client: &Client, confirmations: u64) -> Option { - client - .block_number(BlockId::Latest) - .map(|b| b.saturating_sub(confirmations)) - .and_then(|b| client.block_hash(BlockId::Number(b))) -} diff --git a/secret-store/src/key_server.rs b/secret-store/src/key_server.rs deleted file mode 100644 index 64c4d4ebe..000000000 --- a/secret-store/src/key_server.rs +++ /dev/null @@ -1,809 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use super::{acl_storage::AclStorage, key_server_set::KeyServerSet, key_storage::KeyStorage}; -use crypto::DEFAULT_MAC; -use ethkey::crypto; -use key_server_cluster::{ - math, new_network_cluster, ClusterClient, ClusterConfiguration as NetClusterConfiguration, - NetConnectionsManagerConfig, -}; -use parity_runtime::Executor; -use parking_lot::Mutex; -use std::{collections::BTreeSet, sync::Arc}; -use traits::{ - AdminSessionsServer, DocumentKeyServer, KeyServer, MessageSigner, NodeKeyPair, - ServerKeyGenerator, -}; -use types::{ - ClusterConfiguration, EncryptedDocumentKey, EncryptedDocumentKeyShadow, - EncryptedMessageSignature, Error, MessageHash, NodeId, Public, RequestSignature, Requester, - ServerKeyId, -}; - -/// Secret store key server implementation -pub struct KeyServerImpl { - data: Arc>, -} - -/// Secret store key server data. -pub struct KeyServerCore { - cluster: Arc, -} - -impl KeyServerImpl { - /// Create new key server instance - pub fn new( - config: &ClusterConfiguration, - key_server_set: Arc, - self_key_pair: Arc, - acl_storage: Arc, - key_storage: Arc, - executor: Executor, - ) -> Result { - Ok(KeyServerImpl { - data: Arc::new(Mutex::new(KeyServerCore::new( - config, - key_server_set, - self_key_pair, - acl_storage, - key_storage, - executor, - )?)), - }) - } - - /// Get cluster client reference. - pub fn cluster(&self) -> Arc { - self.data.lock().cluster.clone() - } -} - -impl KeyServer for KeyServerImpl {} - -impl AdminSessionsServer for KeyServerImpl { - fn change_servers_set( - &self, - old_set_signature: RequestSignature, - new_set_signature: RequestSignature, - new_servers_set: BTreeSet, - ) -> Result<(), Error> { - let servers_set_change_session = self.data.lock().cluster.new_servers_set_change_session( - None, - None, - new_servers_set, - old_set_signature, - new_set_signature, - )?; - servers_set_change_session - .as_servers_set_change() - .expect("new_servers_set_change_session creates servers_set_change_session; qed") - .wait() - .map_err(Into::into) - } -} - -impl ServerKeyGenerator for KeyServerImpl { - fn generate_key( - &self, - key_id: &ServerKeyId, - author: &Requester, - threshold: usize, - ) -> Result { - // recover requestor' public key from signature - let address = author - .address(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // generate server key - let generation_session = self.data.lock().cluster.new_generation_session( - key_id.clone(), - None, - address, - threshold, - )?; - generation_session - .wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) - } - - fn restore_key_public( - &self, - key_id: &ServerKeyId, - author: &Requester, - ) -> Result { - // recover requestor' public key from signature - let address = author - .address(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // negotiate key version && retrieve common key data - let negotiation_session = self - .data - .lock() - .cluster - .new_key_version_negotiation_session(*key_id)?; - negotiation_session - .wait() - .and_then(|_| negotiation_session.common_key_data()) - .and_then(|key_share| { - if key_share.author == address { - Ok(key_share.public) - } else { - Err(Error::AccessDenied) - } - }) - .map_err(Into::into) - } -} - -impl DocumentKeyServer for KeyServerImpl { - fn store_document_key( - &self, - key_id: &ServerKeyId, - author: &Requester, - common_point: Public, - encrypted_document_key: Public, - ) -> Result<(), Error> { - // store encrypted key - let encryption_session = self.data.lock().cluster.new_encryption_session( - key_id.clone(), - author.clone(), - common_point, - encrypted_document_key, - )?; - encryption_session.wait(None).map_err(Into::into) - } - - fn generate_document_key( - &self, - key_id: &ServerKeyId, - author: &Requester, - threshold: usize, - ) -> Result { - // recover requestor' public key from signature - let public = author - .public(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // generate server key - let server_key = self.generate_key(key_id, author, threshold)?; - - // generate random document key - let document_key = math::generate_random_point()?; - let encrypted_document_key = math::encrypt_secret(&document_key, &server_key)?; - - // store document key in the storage - self.store_document_key( - key_id, - author, - encrypted_document_key.common_point, - encrypted_document_key.encrypted_point, - )?; - - // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) - } - - fn restore_document_key( - &self, - key_id: &ServerKeyId, - requester: &Requester, - ) -> Result { - // recover requestor' public key from signature - let public = requester - .public(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // decrypt document key - let decryption_session = self.data.lock().cluster.new_decryption_session( - key_id.clone(), - None, - requester.clone(), - None, - false, - false, - )?; - let document_key = decryption_session - .wait(None) - .expect("when wait is called without timeout it always returns Some; qed")? - .decrypted_secret; - - // encrypt document key with requestor public key - let document_key = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &document_key) - .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; - Ok(document_key) - } - - fn restore_document_key_shadow( - &self, - key_id: &ServerKeyId, - requester: &Requester, - ) -> Result { - let decryption_session = self.data.lock().cluster.new_decryption_session( - key_id.clone(), - None, - requester.clone(), - None, - true, - false, - )?; - decryption_session - .wait(None) - .expect("when wait is called without timeout it always returns Some; qed") - .map_err(Into::into) - } -} - -impl MessageSigner for KeyServerImpl { - fn sign_message_schnorr( - &self, - key_id: &ServerKeyId, - requester: &Requester, - message: MessageHash, - ) -> Result { - // recover requestor' public key from signature - let public = requester - .public(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // sign message - let signing_session = self.data.lock().cluster.new_schnorr_signing_session( - key_id.clone(), - requester.clone().into(), - None, - message, - )?; - let message_signature = signing_session.wait()?; - - // compose two message signature components into single one - let mut combined_signature = [0; 64]; - combined_signature[..32].clone_from_slice(&**message_signature.0); - combined_signature[32..].clone_from_slice(&**message_signature.1); - - // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &combined_signature) - .map_err(|err| { - Error::Internal(format!("Error encrypting message signature: {}", err)) - })?; - Ok(message_signature) - } - - fn sign_message_ecdsa( - &self, - key_id: &ServerKeyId, - requester: &Requester, - message: MessageHash, - ) -> Result { - // recover requestor' public key from signature - let public = requester - .public(key_id) - .map_err(Error::InsufficientRequesterData)?; - - // sign message - let signing_session = self.data.lock().cluster.new_ecdsa_signing_session( - key_id.clone(), - requester.clone().into(), - None, - message, - )?; - let message_signature = signing_session.wait()?; - - // encrypt combined signature with requestor public key - let message_signature = crypto::ecies::encrypt(&public, &DEFAULT_MAC, &*message_signature) - .map_err(|err| { - Error::Internal(format!("Error encrypting message signature: {}", err)) - })?; - Ok(message_signature) - } -} - -impl KeyServerCore { - pub fn new( - config: &ClusterConfiguration, - key_server_set: Arc, - self_key_pair: Arc, - acl_storage: Arc, - key_storage: Arc, - executor: Executor, - ) -> Result { - let cconfig = NetClusterConfiguration { - self_key_pair: self_key_pair.clone(), - key_server_set: key_server_set, - acl_storage: acl_storage, - key_storage: key_storage, - admin_public: config.admin_public, - preserve_sessions: false, - }; - let net_config = NetConnectionsManagerConfig { - listen_address: ( - config.listener_address.address.clone(), - config.listener_address.port, - ), - allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, - auto_migrate_enabled: config.auto_migrate_enabled, - }; - - let core = new_network_cluster(executor, cconfig, net_config)?; - let cluster = core.client(); - core.run()?; - - Ok(KeyServerCore { cluster }) - } -} - -#[cfg(test)] -pub mod tests { - use super::KeyServerImpl; - use acl_storage::DummyAclStorage; - use crypto::DEFAULT_MAC; - use ethereum_types::{H256, H520}; - use ethkey::{self, crypto, verify_public, Generator, Random, Secret}; - use key_server_cluster::math; - use key_server_set::tests::MapKeyServerSet; - use key_storage::{tests::DummyKeyStorage, KeyStorage}; - use node_key_pair::PlainNodeKeyPair; - use parity_runtime::Runtime; - use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, - sync::Arc, - time, - }; - use traits::{ - AdminSessionsServer, DocumentKeyServer, KeyServer, MessageSigner, ServerKeyGenerator, - }; - use types::{ - ClusterConfiguration, EncryptedDocumentKey, EncryptedDocumentKeyShadow, - EncryptedMessageSignature, Error, MessageHash, NodeAddress, NodeId, Public, - RequestSignature, Requester, ServerKeyId, - }; - - #[derive(Default)] - pub struct DummyKeyServer; - - impl KeyServer for DummyKeyServer {} - - impl AdminSessionsServer for DummyKeyServer { - fn change_servers_set( - &self, - _old_set_signature: RequestSignature, - _new_set_signature: RequestSignature, - _new_servers_set: BTreeSet, - ) -> Result<(), Error> { - unimplemented!("test-only") - } - } - - impl ServerKeyGenerator for DummyKeyServer { - fn generate_key( - &self, - _key_id: &ServerKeyId, - _author: &Requester, - _threshold: usize, - ) -> Result { - unimplemented!("test-only") - } - - fn restore_key_public( - &self, - _key_id: &ServerKeyId, - _author: &Requester, - ) -> Result { - unimplemented!("test-only") - } - } - - impl DocumentKeyServer for DummyKeyServer { - fn store_document_key( - &self, - _key_id: &ServerKeyId, - _author: &Requester, - _common_point: Public, - _encrypted_document_key: Public, - ) -> Result<(), Error> { - unimplemented!("test-only") - } - - fn generate_document_key( - &self, - _key_id: &ServerKeyId, - _author: &Requester, - _threshold: usize, - ) -> Result { - unimplemented!("test-only") - } - - fn restore_document_key( - &self, - _key_id: &ServerKeyId, - _requester: &Requester, - ) -> Result { - unimplemented!("test-only") - } - - fn restore_document_key_shadow( - &self, - _key_id: &ServerKeyId, - _requester: &Requester, - ) -> Result { - unimplemented!("test-only") - } - } - - impl MessageSigner for DummyKeyServer { - fn sign_message_schnorr( - &self, - _key_id: &ServerKeyId, - _requester: &Requester, - _message: MessageHash, - ) -> Result { - unimplemented!("test-only") - } - - fn sign_message_ecdsa( - &self, - _key_id: &ServerKeyId, - _requester: &Requester, - _message: MessageHash, - ) -> Result { - unimplemented!("test-only") - } - } - - fn make_key_servers( - start_port: u16, - num_nodes: usize, - ) -> (Vec, Vec>, Runtime) { - let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); - let configs: Vec<_> = (0..num_nodes) - .map(|i| ClusterConfiguration { - listener_address: NodeAddress { - address: "127.0.0.1".into(), - port: start_port + (i as u16), - }, - nodes: key_pairs - .iter() - .enumerate() - .map(|(j, kp)| { - ( - kp.public().clone(), - NodeAddress { - address: "127.0.0.1".into(), - port: start_port + (j as u16), - }, - ) - }) - .collect(), - key_server_set_contract_address: None, - allow_connecting_to_higher_nodes: false, - admin_public: None, - auto_migrate_enabled: false, - }) - .collect(); - let key_servers_set: BTreeMap = configs[0] - .nodes - .iter() - .map(|(k, a)| { - ( - k.clone(), - format!("{}:{}", a.address, a.port).parse().unwrap(), - ) - }) - .collect(); - let key_storages = (0..num_nodes) - .map(|_| Arc::new(DummyKeyStorage::default())) - .collect::>(); - let runtime = Runtime::with_thread_count(4); - let key_servers: Vec<_> = configs - .into_iter() - .enumerate() - .map(|(i, cfg)| { - KeyServerImpl::new( - &cfg, - Arc::new(MapKeyServerSet::new(false, key_servers_set.clone())), - Arc::new(PlainNodeKeyPair::new(key_pairs[i].clone())), - Arc::new(DummyAclStorage::default()), - key_storages[i].clone(), - runtime.executor(), - ) - .unwrap() - }) - .collect(); - - // wait until connections are established. It is fast => do not bother with events here - let start = time::Instant::now(); - let mut tried_reconnections = false; - loop { - if key_servers - .iter() - .all(|ks| ks.cluster().is_fully_connected()) - { - break; - } - - let old_tried_reconnections = tried_reconnections; - let mut fully_connected = true; - for key_server in &key_servers { - if !key_server.cluster().is_fully_connected() { - fully_connected = false; - if !old_tried_reconnections { - tried_reconnections = true; - key_server.cluster().connect(); - } - } - } - if fully_connected { - break; - } - if time::Instant::now() - start > time::Duration::from_millis(3000) { - panic!("connections are not established in 3000ms"); - } - } - - (key_servers, key_storages, runtime) - } - - #[test] - fn document_key_generation_and_retrievement_works_over_network_with_single_node() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6070, 1); - - // generate document key - let threshold = 0; - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0] - .generate_document_key(&document, &signature.clone().into(), threshold) - .unwrap(); - let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - - // now let's try to retrieve key back - for key_server in key_servers.iter() { - let retrieved_key = key_server - .restore_document_key(&document, &signature.clone().into()) - .unwrap(); - let retrieved_key = - crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); - } - drop(runtime); - } - - #[test] - fn document_key_generation_and_retrievement_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6080, 3); - - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate document key - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0] - .generate_document_key(&document, &signature.clone().into(), *threshold) - .unwrap(); - let generated_key = - crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - - // now let's try to retrieve key back - for (i, key_server) in key_servers.iter().enumerate() { - let retrieved_key = key_server - .restore_document_key(&document, &signature.clone().into()) - .unwrap(); - let retrieved_key = - crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); - - let key_share = key_storages[i].get(&document).unwrap().unwrap(); - assert!(key_share.common_point.is_some()); - assert!(key_share.encrypted_point.is_some()); - } - } - drop(runtime); - } - - #[test] - fn server_key_generation_and_storing_document_key_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6090, 3); - - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0] - .generate_key(&server_key_id, &signature.clone().into(), *threshold) - .unwrap(); - - // generate document key (this is done by KS client so that document key is unknown to any KS) - let generated_key = Random.generate().unwrap().public().clone(); - let encrypted_document_key = - math::encrypt_secret(&generated_key, &server_public).unwrap(); - - // store document key - key_servers[0] - .store_document_key( - &server_key_id, - &signature.clone().into(), - encrypted_document_key.common_point, - encrypted_document_key.encrypted_point, - ) - .unwrap(); - - // now let's try to retrieve key back - for key_server in key_servers.iter() { - let retrieved_key = key_server - .restore_document_key(&server_key_id, &signature.clone().into()) - .unwrap(); - let retrieved_key = - crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &retrieved_key) - .unwrap(); - let retrieved_key = Public::from_slice(&retrieved_key); - assert_eq!(retrieved_key, generated_key); - } - } - drop(runtime); - } - - #[test] - fn server_key_generation_and_message_signing_works_over_network_with_3_nodes() { - let _ = ::env_logger::try_init(); - let (key_servers, _, runtime) = make_key_servers(6100, 3); - - let test_cases = [0, 1, 2]; - for threshold in &test_cases { - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0] - .generate_key(&server_key_id, &signature.clone().into(), *threshold) - .unwrap(); - - // sign message - let message_hash = H256::from(42); - let combined_signature = key_servers[0] - .sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()) - .unwrap(); - let combined_signature = - crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature) - .unwrap(); - let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); - let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); - - // check signature - assert_eq!( - math::verify_schnorr_signature( - &server_public, - &(signature_c, signature_s), - &message_hash - ), - Ok(true) - ); - } - drop(runtime); - } - - #[test] - fn decryption_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6110, 3); - - // generate document key - let threshold = 0; - let document = Random.generate().unwrap().secret().clone(); - let secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&secret, &document).unwrap(); - let generated_key = key_servers[0] - .generate_document_key(&document, &signature.clone().into(), threshold) - .unwrap(); - let generated_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &generated_key).unwrap(); - - // remove key from node0 - key_storages[0].remove(&document).unwrap(); - - // now let's try to retrieve key back by requesting it from node0, so that session must be delegated - let retrieved_key = key_servers[0] - .restore_document_key(&document, &signature.into()) - .unwrap(); - let retrieved_key = crypto::ecies::decrypt(&secret, &DEFAULT_MAC, &retrieved_key).unwrap(); - assert_eq!(retrieved_key, generated_key); - drop(runtime); - } - - #[test] - fn schnorr_signing_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6114, 3); - let threshold = 1; - - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0] - .generate_key(&server_key_id, &signature.clone().into(), threshold) - .unwrap(); - - // remove key from node0 - key_storages[0].remove(&server_key_id).unwrap(); - - // sign message - let message_hash = H256::from(42); - let combined_signature = key_servers[0] - .sign_message_schnorr(&server_key_id, &signature.into(), message_hash.clone()) - .unwrap(); - let combined_signature = - crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &combined_signature).unwrap(); - let signature_c = Secret::from_slice(&combined_signature[..32]).unwrap(); - let signature_s = Secret::from_slice(&combined_signature[32..]).unwrap(); - - // check signature - assert_eq!( - math::verify_schnorr_signature( - &server_public, - &(signature_c, signature_s), - &message_hash - ), - Ok(true) - ); - drop(runtime); - } - - #[test] - fn ecdsa_signing_session_is_delegated_when_node_does_not_have_key_share() { - let _ = ::env_logger::try_init(); - let (key_servers, key_storages, runtime) = make_key_servers(6117, 4); - let threshold = 1; - - // generate server key - let server_key_id = Random.generate().unwrap().secret().clone(); - let requestor_secret = Random.generate().unwrap().secret().clone(); - let signature = ethkey::sign(&requestor_secret, &server_key_id).unwrap(); - let server_public = key_servers[0] - .generate_key(&server_key_id, &signature.clone().into(), threshold) - .unwrap(); - - // remove key from node0 - key_storages[0].remove(&server_key_id).unwrap(); - - // sign message - let message_hash = H256::random(); - let signature = key_servers[0] - .sign_message_ecdsa(&server_key_id, &signature.into(), message_hash.clone()) - .unwrap(); - let signature = - crypto::ecies::decrypt(&requestor_secret, &DEFAULT_MAC, &signature).unwrap(); - let signature: H520 = signature[0..65].into(); - - // check signature - assert!(verify_public(&server_public, &signature.into(), &message_hash).unwrap()); - drop(runtime); - } - - #[test] - fn servers_set_change_session_works_over_network() { - // TODO [Test] - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs b/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs deleted file mode 100644 index 442600a06..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/key_version_negotiation_session.rs +++ /dev/null @@ -1,1258 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::{Address, H256}; -use ethkey::Secret; -use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, - cluster::Cluster, - cluster_sessions::{ClusterSession, SessionIdWithSubSession}, - decryption_session::SessionImpl as DecryptionSession, - message::{ - CommonKeyData, FailedKeyVersionContinueAction, KeyVersionNegotiationMessage, KeyVersions, - KeyVersionsError, Message, RequestKeyVersions, - }, - signing_session_ecdsa::SessionImpl as EcdsaSigningSession, - signing_session_schnorr::SessionImpl as SchnorrSigningSession, - DocumentKeyShare, Error, NodeId, SessionId, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -// TODO [Opt]: change sessions so that versions are sent by chunks. -/// Number of versions sent in single message. -const VERSIONS_PER_MESSAGE: usize = 32; - -/// Key version negotiation transport. -pub trait SessionTransport { - /// Broadcast message to all nodes. - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error>; - /// Send message to given node. - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error>; -} - -/// Key version negotiation result computer. -pub trait SessionResultComputer: Send + Sync { - /// Compute result of session, if possible. - fn compute_result( - &self, - threshold: Option, - confirmations: &BTreeSet, - versions: &BTreeMap>, - ) -> Option>; -} - -/// Key discovery session API. -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, -} - -/// Action after key version is negotiated. -#[derive(Clone)] -pub enum ContinueAction { - /// Decryption session + origin + is_shadow_decryption + is_broadcast_decryption. - Decrypt(Arc, Option
, bool, bool), - /// Schnorr signing session + message hash. - SchnorrSign(Arc, H256), - /// ECDSA signing session + message hash. - EcdsaSign(Arc, H256), -} - -/// Failed action after key version is negotiated. -#[derive(Clone, Debug, PartialEq)] -pub enum FailedContinueAction { - /// Decryption origin + requester. - Decrypt(Option
, Address), -} - -/// Immutable session data. -struct SessionCore { - /// Session meta. - pub meta: ShareChangeSessionMeta, - /// Sub-session id. - pub sub_session: Secret, - /// Key share. - pub key_share: Option, - /// Session result computer. - pub result_computer: Arc, - /// Session transport. - pub transport: T, - /// Session nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Mutable session data. -struct SessionData { - /// Session state. - pub state: SessionState, - /// Initialization confirmations. - pub confirmations: Option>, - /// Common key data that nodes have agreed upon. - pub key_share: Option, - /// { Version => Nodes } - pub versions: Option>>, - /// Session result. - pub result: Option, Error>>, - /// Continue action. - pub continue_with: Option, - /// Failed continue action (reported in error message by master node). - pub failed_continue_with: Option, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// Session meta. - pub meta: ShareChangeSessionMeta, - /// Sub-session id. - pub sub_session: Secret, - /// Key share. - pub key_share: Option, - /// Session result computer. - pub result_computer: Arc, - /// Session transport to communicate to other cluster nodes. - pub transport: T, - /// Session nonce. - pub nonce: u64, -} - -/// Signing session state. -#[derive(Debug, PartialEq)] -enum SessionState { - /// Waiting for initialization. - WaitingForInitialization, - /// Waiting for responses. - WaitingForResponses, - /// Session is completed. - Finished, -} - -/// Isolated session transport. -pub struct IsolatedSessionTransport { - /// Cluster. - pub cluster: Arc, - /// Key id. - pub key_id: SessionId, - /// Sub session id. - pub sub_session: Secret, - /// Session-level nonce. - pub nonce: u64, -} - -/// Fastest session result computer. Computes first possible version that can be recovered on this node. -/// If there's no such version, selects version with the most support. -pub struct FastestResultComputer { - /// This node id. - self_node_id: NodeId, - /// Threshold (if known). - threshold: Option, - /// Count of all configured key server nodes. - configured_nodes_count: usize, - /// Count of all connected key server nodes. - connected_nodes_count: usize, -} - -/// Selects version with most support, waiting for responses from all nodes. -pub struct LargestSupportResultComputer; - -impl SessionImpl -where - T: SessionTransport, -{ - /// Create new session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { - core: SessionCore { - meta: params.meta, - sub_session: params.sub_session, - key_share: params.key_share.clone(), - result_computer: params.result_computer, - transport: params.transport, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - confirmations: None, - key_share: params.key_share.map(|key_share| DocumentKeyShare { - threshold: key_share.threshold, - author: key_share.author, - public: key_share.public, - ..Default::default() - }), - versions: None, - result: None, - continue_with: None, - failed_continue_with: None, - }), - } - } - - /// Return session meta. - pub fn meta(&self) -> &ShareChangeSessionMeta { - &self.core.meta - } - - /// Return result computer reference. - pub fn version_holders(&self, version: &H256) -> Result, Error> { - Ok(self - .data - .lock() - .versions - .as_ref() - .ok_or(Error::InvalidStateForRequest)? - .get(version) - .ok_or(Error::ServerKeyIsNotFound)? - .clone()) - } - - /// Set continue action. - pub fn set_continue_action(&self, action: ContinueAction) { - self.data.lock().continue_with = Some(action); - } - - /// Take continue action. - pub fn take_continue_action(&self) -> Option { - self.data.lock().continue_with.take() - } - - /// Take failed continue action. - pub fn take_failed_continue_action(&self) -> Option { - self.data.lock().failed_continue_with.take() - } - - /// Wait for session completion. - pub fn wait(&self) -> Result, Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| { - data.result.clone() - }) - .expect("wait_session returns Some if called without timeout; qed") - } - - /// Retrieve common key data (author, threshold, public), if available. - pub fn common_key_data(&self) -> Result { - self.data - .lock() - .key_share - .clone() - .ok_or(Error::InvalidStateForRequest) - } - - /// Initialize session. - pub fn initialize(&self, connected_nodes: BTreeSet) -> Result<(), Error> { - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // update state - let mut confirmations = connected_nodes; - let mut versions: BTreeMap> = BTreeMap::new(); - let received_own_confirmation = confirmations.remove(&self.core.meta.self_node_id); - if received_own_confirmation { - if let Some(key_share) = self.core.key_share.as_ref() { - for version in &key_share.versions { - versions - .entry(version.hash.clone()) - .or_insert_with(Default::default) - .insert(self.core.meta.self_node_id.clone()); - } - } - } - - // update state - let no_confirmations_required = confirmations.is_empty(); - data.state = SessionState::WaitingForResponses; - data.confirmations = Some(confirmations); - data.versions = Some(versions); - - // try to complete session - Self::try_complete(&self.core, &mut *data); - if no_confirmations_required && data.state != SessionState::Finished { - return Err(Error::ServerKeyIsNotFound); - } else if data.state == SessionState::Finished { - return Ok(()); - } - - // send requests - let confirmations = data - .confirmations - .as_ref() - .expect("dilled couple of lines above; qed"); - for connected_node in confirmations { - self.core.transport.send( - connected_node, - KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: self.core.meta.id.clone().into(), - sub_session: self.core.sub_session.clone().into(), - session_nonce: self.core.nonce, - }), - )?; - } - - Ok(()) - } - - /// Process single message. - pub fn process_message( - &self, - sender: &NodeId, - message: &KeyVersionNegotiationMessage, - ) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) => { - self.on_key_versions_request(sender, message) - } - &KeyVersionNegotiationMessage::KeyVersions(ref message) => { - self.on_key_versions(sender, message) - } - &KeyVersionNegotiationMessage::KeyVersionsError(ref message) => { - // remember failed continue action - if let Some(FailedKeyVersionContinueAction::Decrypt( - Some(ref origin), - ref requester, - )) = message.continue_with - { - self.data.lock().failed_continue_with = Some(FailedContinueAction::Decrypt( - Some(origin.clone().into()), - requester.clone().into(), - )); - } - - self.on_session_error(sender, message.error.clone()); - Ok(()) - } - } - } - - /// Process key versions request. - pub fn on_key_versions_request( - &self, - sender: &NodeId, - _message: &RequestKeyVersions, - ) -> Result<(), Error> { - debug_assert!(sender != &self.core.meta.self_node_id); - - // check message - if *sender != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // send response - self.core.transport.send( - sender, - KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: self.core.meta.id.clone().into(), - sub_session: self.core.sub_session.clone().into(), - session_nonce: self.core.nonce, - key_common: self.core.key_share.as_ref().map(|key_share| CommonKeyData { - threshold: key_share.threshold, - author: key_share.author.into(), - public: key_share.public.into(), - }), - versions: self - .core - .key_share - .as_ref() - .map(|key_share| { - key_share - .versions - .iter() - .rev() - .filter(|v| v.id_numbers.contains_key(sender)) - .chain( - key_share - .versions - .iter() - .rev() - .filter(|v| !v.id_numbers.contains_key(sender)), - ) - .map(|v| v.hash.clone().into()) - .take(VERSIONS_PER_MESSAGE) - .collect() - }) - .unwrap_or_else(|| Default::default()), - }), - )?; - - // update state - data.state = SessionState::Finished; - data.result = Some(Ok(None)); - self.core.completed.notify_all(); - - Ok(()) - } - - /// Process key versions response. - pub fn on_key_versions(&self, sender: &NodeId, message: &KeyVersions) -> Result<(), Error> { - debug_assert!(sender != &self.core.meta.self_node_id); - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::WaitingForResponses && data.state != SessionState::Finished { - return Err(Error::InvalidStateForRequest); - } - let reason = "this field is filled on master node when initializing; this is initialized master node; qed"; - if !data.confirmations.as_mut().expect(reason).remove(sender) { - return Err(Error::InvalidMessage); - } - - // remember versions that sender have - { - match message.key_common.as_ref() { - Some(key_common) if data.key_share.is_none() => { - data.key_share = Some(DocumentKeyShare { - threshold: key_common.threshold, - author: key_common.author.clone().into(), - public: key_common.public.clone().into(), - ..Default::default() - }); - } - Some(key_common) => { - let prev_key_share = data - .key_share - .as_ref() - .expect("data.key_share.is_none() is matched by previous branch; qed"); - if prev_key_share.threshold != key_common.threshold - || prev_key_share.author[..] != key_common.author[..] - || prev_key_share.public[..] != key_common.public[..] - { - return Err(Error::InvalidMessage); - } - } - None if message.versions.is_empty() => (), - None => return Err(Error::InvalidMessage), - } - - let versions = data.versions.as_mut().expect(reason); - for version in &message.versions { - versions - .entry(version.clone().into()) - .or_insert_with(Default::default) - .insert(sender.clone()); - } - } - - // try to compute result - if data.state != SessionState::Finished { - Self::try_complete(&self.core, &mut *data); - } - - Ok(()) - } - - /// Try to complete result && finish session. - fn try_complete(core: &SessionCore, data: &mut SessionData) { - let reason = "this field is filled on master node when initializing; try_complete is only called on initialized master node; qed"; - let confirmations = data.confirmations.as_ref().expect(reason); - let versions = data.versions.as_ref().expect(reason); - let threshold = data.key_share.as_ref().map(|key_share| key_share.threshold); - if let Some(result) = - core.result_computer - .compute_result(threshold, confirmations, versions) - { - // when the master node processing decryption service request, it starts with a key version negotiation session - // if the negotiation fails, only master node knows about it - // => if the error is fatal, only the master will know about it and report it to the contract && the request will never be rejected - // => let's broadcast fatal error so that every other node know about it, and, if it trusts to master node - // will report error to the contract - if let (Some(continue_with), Err(error)) = - (data.continue_with.as_ref(), result.as_ref()) - { - let origin = match *continue_with { - ContinueAction::Decrypt(_, origin, _, _) => origin.clone(), - _ => None, - }; - - let requester = match *continue_with { - ContinueAction::Decrypt(ref session, _, _, _) => session - .requester() - .and_then(|r| r.address(&core.meta.id).ok()), - _ => None, - }; - - if origin.is_some() && requester.is_some() && !error.is_non_fatal() { - let requester = requester.expect("checked in above condition; qed"); - data.failed_continue_with = Some(FailedContinueAction::Decrypt( - origin.clone(), - requester.clone(), - )); - - let send_result = - core.transport - .broadcast(KeyVersionNegotiationMessage::KeyVersionsError( - KeyVersionsError { - session: core.meta.id.clone().into(), - sub_session: core.sub_session.clone().into(), - session_nonce: core.nonce, - error: error.clone(), - continue_with: Some(FailedKeyVersionContinueAction::Decrypt( - origin.map(Into::into), - requester.into(), - )), - }, - )); - - if let Err(send_error) = send_result { - warn!(target: "secretstore_net", "{}: failed to broadcast key version negotiation error {}: {}", - core.meta.self_node_id, error, send_error); - } - } - } - - data.state = SessionState::Finished; - data.result = Some(result.map(Some)); - core.completed.notify_all(); - } - } -} - -impl ClusterSession for SessionImpl -where - T: SessionTransport, -{ - type Id = SessionIdWithSubSession; - - fn type_name() -> &'static str { - "version negotiation" - } - - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.sub_session.clone()) - } - - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } - - fn on_session_timeout(&self) { - let mut data = self.data.lock(); - - if data.confirmations.is_some() { - data.confirmations - .as_mut() - .expect("checked a line above; qed") - .clear(); - Self::try_complete(&self.core, &mut *data); - if data.state != SessionState::Finished { - warn!(target: "secretstore_net", "{}: key version negotiation session failed with timeout", self.core.meta.self_node_id); - - data.result = Some(Err(Error::ConsensusTemporaryUnreachable)); - self.core.completed.notify_all(); - } - } - } - - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected) - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - let mut data = self.data.lock(); - - if data.confirmations.is_some() { - let is_waiting_for_confirmation = data - .confirmations - .as_mut() - .expect("checked a line above; qed") - .remove(node); - if !is_waiting_for_confirmation { - return; - } - - Self::try_complete(&self.core, &mut *data); - if data.state == SessionState::Finished { - return; - } - } - - warn!(target: "secretstore_net", "{}: key version negotiation session failed because of {} from {}", - self.core.meta.self_node_id, error, node); - - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::KeyVersionNegotiation(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl SessionTransport for IsolatedSessionTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster - .broadcast(Message::KeyVersionNegotiation(message)) - } - - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster - .send(node, Message::KeyVersionNegotiation(message)) - } -} - -impl FastestResultComputer { - pub fn new( - self_node_id: NodeId, - key_share: Option<&DocumentKeyShare>, - configured_nodes_count: usize, - connected_nodes_count: usize, - ) -> Self { - let threshold = key_share.map(|ks| ks.threshold); - FastestResultComputer { - self_node_id, - threshold, - configured_nodes_count, - connected_nodes_count, - } - } -} - -impl SessionResultComputer for FastestResultComputer { - fn compute_result( - &self, - threshold: Option, - confirmations: &BTreeSet, - versions: &BTreeMap>, - ) -> Option> { - match self.threshold.or(threshold) { - // if there's no versions at all && we're not waiting for confirmations anymore - _ if confirmations.is_empty() && versions.is_empty() => { - Some(Err(Error::ServerKeyIsNotFound)) - } - // if we have key share on this node - Some(threshold) => { - // select version this node have, with enough participants - let has_key_share = self.threshold.is_some(); - let version = versions.iter().find(|&(_, ref n)| { - !has_key_share || n.contains(&self.self_node_id) && n.len() >= threshold + 1 - }); - // if there's no such version, wait for more confirmations - match version { - Some((version, nodes)) => Some(Ok(( - version.clone(), - if has_key_share { - self.self_node_id.clone() - } else { - nodes.iter().cloned().nth(0).expect( - "version is only inserted when there's at least one owner; qed", - ) - }, - ))), - None if !confirmations.is_empty() => None, - // otherwise - try to find any version - None => Some( - versions - .iter() - .find(|&(_, ref n)| n.len() >= threshold + 1) - .map(|(version, nodes)| { - Ok((version.clone(), nodes.iter().cloned().nth(0) - .expect("version is only inserted when there's at least one owner; qed"))) - }) - // if there's no version consensus among all connected nodes - // AND we're connected to ALL configured nodes - // OR there are less than required nodes for key restore - // => this means that we can't restore key with CURRENT configuration => respond with fatal error - // otherwise we could try later, after all nodes are connected - .unwrap_or_else(|| { - Err( - if self.configured_nodes_count == self.connected_nodes_count - || self.configured_nodes_count < threshold + 1 - { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }, - ) - }), - ), - } - } - // if we do not have share, then wait for all confirmations - None if !confirmations.is_empty() => None, - // ...and select version with largest support - None => Some( - versions - .iter() - .max_by_key(|&(_, ref n)| n.len()) - .map(|(version, nodes)| { - Ok(( - version.clone(), - nodes.iter().cloned().nth(0).expect( - "version is only inserted when there's at least one owner; qed", - ), - )) - }) - .unwrap_or_else(|| { - Err( - if self.configured_nodes_count == self.connected_nodes_count { - Error::ConsensusUnreachable - } else { - Error::ConsensusTemporaryUnreachable - }, - ) - }), - ), - } - } -} - -impl SessionResultComputer for LargestSupportResultComputer { - fn compute_result( - &self, - _threshold: Option, - confirmations: &BTreeSet, - versions: &BTreeMap>, - ) -> Option> { - if !confirmations.is_empty() { - return None; - } - if versions.is_empty() { - return Some(Err(Error::ServerKeyIsNotFound)); - } - - versions - .iter() - .max_by_key(|&(_, ref n)| n.len()) - .map(|(version, nodes)| { - Ok(( - version.clone(), - nodes - .iter() - .cloned() - .nth(0) - .expect("version is only inserted when there's at least one owner; qed"), - )) - }) - } -} - -#[cfg(test)] -mod tests { - use super::{ - ContinueAction, FailedContinueAction, FastestResultComputer, LargestSupportResultComputer, - SessionImpl, SessionParams, SessionResultComputer, SessionState, SessionTransport, - }; - use ethereum_types::{H160, H512}; - use ethkey::public_to_address; - use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, - cluster::{tests::DummyCluster, Cluster}, - cluster_sessions::ClusterSession, - decryption_session::create_default_decryption_session, - math, - message::{ - CommonKeyData, KeyVersionNegotiationMessage, KeyVersions, Message, RequestKeyVersions, - }, - DocumentKeyShare, DocumentKeyShareVersion, DummyKeyStorage, Error, KeyStorage, NodeId, - SessionId, - }; - use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, - sync::Arc, - }; - - struct DummyTransport { - cluster: Arc, - } - - impl SessionTransport for DummyTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster - .broadcast(Message::KeyVersionNegotiation(message)) - } - - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster - .send(node, Message::KeyVersionNegotiation(message)) - } - } - - struct Node { - pub cluster: Arc, - pub key_storage: Arc, - pub session: SessionImpl, - } - - struct MessageLoop { - pub session_id: SessionId, - pub nodes: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } - - impl MessageLoop { - pub fn prepare_nodes(nodes_num: usize) -> BTreeMap> { - (0..nodes_num) - .map(|_| { - ( - math::generate_random_point().unwrap(), - Arc::new(DummyKeyStorage::default()), - ) - }) - .collect() - } - - pub fn empty(nodes_num: usize) -> Self { - Self::new(Self::prepare_nodes(nodes_num)) - } - - pub fn new(nodes: BTreeMap>) -> Self { - let master_node_id = nodes.keys().cloned().nth(0).unwrap(); - let sub_sesion = math::generate_random_scalar().unwrap(); - let all_nodes_ids: BTreeSet<_> = nodes.keys().cloned().collect(); - MessageLoop { - session_id: Default::default(), - nodes: nodes - .iter() - .map(|(node_id, key_storage)| { - let cluster = Arc::new(DummyCluster::new(node_id.clone())); - cluster.add_nodes(all_nodes_ids.iter().cloned()); - ( - node_id.clone(), - Node { - cluster: cluster.clone(), - key_storage: key_storage.clone(), - session: SessionImpl::new(SessionParams { - meta: ShareChangeSessionMeta { - id: Default::default(), - self_node_id: node_id.clone(), - master_node_id: master_node_id.clone(), - configured_nodes_count: nodes.len(), - connected_nodes_count: nodes.len(), - }, - sub_session: sub_sesion.clone(), - key_share: key_storage.get(&Default::default()).unwrap(), - result_computer: Arc::new(FastestResultComputer::new( - node_id.clone(), - key_storage.get(&Default::default()).unwrap().as_ref(), - nodes.len(), - nodes.len(), - )), - transport: DummyTransport { cluster: cluster }, - nonce: 0, - }), - }, - ) - }) - .collect(), - queue: VecDeque::new(), - } - } - - pub fn node_id(&self, idx: usize) -> &NodeId { - self.nodes.keys().nth(idx).unwrap() - } - - pub fn session(&self, idx: usize) -> &SessionImpl { - &self.nodes.values().nth(idx).unwrap().session - } - - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.nodes - .values() - .filter_map(|n| { - n.cluster - .take_message() - .map(|m| (n.session.meta().self_node_id.clone(), m.0, m.1)) - }) - .nth(0) - .or_else(|| self.queue.pop_front()) - } - - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match msg.2 { - Message::KeyVersionNegotiation(message) => { - self.nodes[&msg.1].session.process_message(&msg.0, &message) - } - _ => panic!("unexpected"), - } - } - - pub fn run(&mut self) { - while let Some((from, to, message)) = self.take_message() { - self.process_message((from, to, message)).unwrap(); - } - } - } - - #[test] - fn negotiation_fails_if_initialized_twice() { - let ml = MessageLoop::empty(1); - assert_eq!(ml.session(0).initialize(BTreeSet::new()), Ok(())); - assert_eq!( - ml.session(0).initialize(BTreeSet::new()), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn negotiation_fails_if_message_contains_wrong_nonce() { - let ml = MessageLoop::empty(2); - assert_eq!( - ml.session(1).process_message( - ml.node_id(0), - &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 100, - }) - ), - Err(Error::ReplayProtection) - ); - } - - #[test] - fn negotiation_fails_if_versions_request_received_from_non_master() { - let ml = MessageLoop::empty(3); - assert_eq!( - ml.session(2).process_message( - ml.node_id(1), - &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - }) - ), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn negotiation_fails_if_versions_request_received_twice() { - let ml = MessageLoop::empty(2); - assert_eq!( - ml.session(1).process_message( - ml.node_id(0), - &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - }) - ), - Ok(()) - ); - assert_eq!( - ml.session(1).process_message( - ml.node_id(0), - &KeyVersionNegotiationMessage::RequestKeyVersions(RequestKeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - }) - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn negotiation_fails_if_versions_received_before_initialization() { - let ml = MessageLoop::empty(2); - assert_eq!( - ml.session(1).process_message( - ml.node_id(0), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 10, - author: Default::default(), - public: Default::default(), - }), - versions: Vec::new(), - }) - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn negotiation_does_not_fails_if_versions_received_after_completion() { - let ml = MessageLoop::empty(3); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - assert_eq!( - ml.session(0).data.lock().state, - SessionState::WaitingForResponses - ); - - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!( - ml.session(0).process_message( - ml.node_id(1), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 0, - author: Default::default(), - public: Default::default(), - }), - - versions: vec![version_id.clone().into()] - }) - ), - Ok(()) - ); - assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); - - assert_eq!( - ml.session(0).process_message( - ml.node_id(2), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 0, - author: Default::default(), - public: Default::default(), - }), - - versions: vec![version_id.clone().into()] - }) - ), - Ok(()) - ); - assert_eq!(ml.session(0).data.lock().state, SessionState::Finished); - } - - #[test] - fn negotiation_fails_if_wrong_common_data_sent() { - fn run_test(key_common: CommonKeyData) { - let ml = MessageLoop::empty(3); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!( - ml.session(0).process_message( - ml.node_id(1), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(CommonKeyData { - threshold: 1, - author: Default::default(), - public: Default::default(), - }), - versions: vec![version_id.clone().into()] - }) - ), - Ok(()) - ); - assert_eq!( - ml.session(0).process_message( - ml.node_id(2), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: Some(key_common), - versions: vec![version_id.clone().into()] - }) - ), - Err(Error::InvalidMessage) - ); - } - - run_test(CommonKeyData { - threshold: 2, - author: Default::default(), - public: Default::default(), - }); - - run_test(CommonKeyData { - threshold: 1, - author: H160::from(1).into(), - public: Default::default(), - }); - - run_test(CommonKeyData { - threshold: 1, - author: H160::from(2).into(), - public: Default::default(), - }); - } - - #[test] - fn negotiation_fails_if_threshold_empty_when_versions_are_not_empty() { - let ml = MessageLoop::empty(2); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - assert_eq!( - ml.session(0).process_message( - ml.node_id(1), - &KeyVersionNegotiationMessage::KeyVersions(KeyVersions { - session: Default::default(), - sub_session: math::generate_random_scalar().unwrap().into(), - session_nonce: 0, - key_common: None, - versions: vec![version_id.clone().into()] - }) - ), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn fast_negotiation_does_not_completes_instantly_when_enough_share_owners_are_connected() { - let nodes = MessageLoop::prepare_nodes(2); - let version_id = (*math::generate_random_scalar().unwrap()).clone(); - nodes - .values() - .nth(0) - .unwrap() - .insert( - Default::default(), - DocumentKeyShare { - author: H160::from(2), - threshold: 1, - public: H512::from(3), - common_point: None, - encrypted_point: None, - versions: vec![DocumentKeyShareVersion { - hash: version_id, - id_numbers: vec![( - nodes.keys().cloned().nth(0).unwrap(), - math::generate_random_scalar().unwrap(), - )] - .into_iter() - .collect(), - secret_share: math::generate_random_scalar().unwrap(), - }], - }, - ) - .unwrap(); - let ml = MessageLoop::new(nodes); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - // we can't be sure that node has given key version because previous ShareAdd session could fail - assert!(ml.session(0).data.lock().state != SessionState::Finished); - - // check that upon completion, commmon key data is known - assert_eq!( - ml.session(0).common_key_data(), - Ok(DocumentKeyShare { - author: H160::from(2), - threshold: 1, - public: H512::from(3), - ..Default::default() - }) - ); - } - - #[test] - fn fastest_computer_returns_missing_share_if_no_versions_returned() { - let computer = FastestResultComputer { - self_node_id: Default::default(), - threshold: None, - configured_nodes_count: 1, - connected_nodes_count: 1, - }; - assert_eq!( - computer.compute_result(Some(10), &Default::default(), &Default::default()), - Some(Err(Error::ServerKeyIsNotFound)) - ); - } - - #[test] - fn largest_computer_returns_missing_share_if_no_versions_returned() { - let computer = LargestSupportResultComputer; - assert_eq!( - computer.compute_result(Some(10), &Default::default(), &Default::default()), - Some(Err(Error::ServerKeyIsNotFound)) - ); - } - - #[test] - fn fatal_error_is_not_broadcasted_if_started_without_origin() { - let mut ml = MessageLoop::empty(3); - ml.session(0).set_continue_action(ContinueAction::Decrypt( - create_default_decryption_session(), - None, - false, - false, - )); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - ml.run(); - - assert!(ml - .nodes - .values() - .all(|n| n.session.is_finished() && n.session.take_failed_continue_action().is_none())); - } - - #[test] - fn fatal_error_is_broadcasted_if_started_with_origin() { - let mut ml = MessageLoop::empty(3); - ml.session(0).set_continue_action(ContinueAction::Decrypt( - create_default_decryption_session(), - Some(1.into()), - true, - true, - )); - ml.session(0) - .initialize(ml.nodes.keys().cloned().collect()) - .unwrap(); - ml.run(); - - // on all nodes session is completed - assert!(ml.nodes.values().all(|n| n.session.is_finished())); - - // slave nodes have non-empty failed continue action - assert!(ml - .nodes - .values() - .skip(1) - .all(|n| n.session.take_failed_continue_action() - == Some(FailedContinueAction::Decrypt( - Some(1.into()), - public_to_address(&2.into()) - )))); - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/mod.rs b/secret-store/src/key_server_cluster/admin_sessions/mod.rs deleted file mode 100644 index a7daa73a6..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/mod.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -pub mod key_version_negotiation_session; -pub mod servers_set_change_session; -pub mod share_add_session; -pub mod share_change_session; - -mod sessions_queue; - -use key_server_cluster::{Error, NodeId, SessionId, SessionMeta}; - -/// Share change session metadata. -#[derive(Debug, Clone)] -pub struct ShareChangeSessionMeta { - /// Key id. - pub id: SessionId, - /// Id of node, which has started this session. - pub master_node_id: NodeId, - /// Id of node, on which this session is running. - pub self_node_id: NodeId, - /// Count of all configured key server nodes. - pub configured_nodes_count: usize, - /// Count of all connected key server nodes. - pub connected_nodes_count: usize, -} - -impl ShareChangeSessionMeta { - /// Convert to consensus session meta. `all_nodes_set` is the union of `old_nodes_set` && `new_nodes_set`. - pub fn into_consensus_meta(self, all_nodes_set_len: usize) -> Result { - Ok(SessionMeta { - id: self.id, - master_node_id: self.master_node_id, - self_node_id: self.self_node_id, - threshold: all_nodes_set_len - .checked_sub(1) - .ok_or(Error::ConsensusUnreachable)?, - configured_nodes_count: self.configured_nodes_count, - connected_nodes_count: self.connected_nodes_count, - }) - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs deleted file mode 100644 index 26b689e92..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/servers_set_change_session.rs +++ /dev/null @@ -1,1832 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::H256; -use ethkey::{Public, Signature}; -use key_server_cluster::{ - admin_sessions::{sessions_queue::SessionsQueue, ShareChangeSessionMeta}, - cluster::Cluster, - cluster_sessions::ClusterSession, - jobs::{ - consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, - job_session::JobTransport, - servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}, - unknown_sessions_job::UnknownSessionsJob, - }, - key_version_negotiation_session::{ - LargestSupportResultComputer, SessionImpl as KeyVersionNegotiationSessionImpl, - SessionParams as KeyVersionNegotiationSessionParams, - SessionTransport as KeyVersionNegotiationTransport, - }, - math, - message::{ - ConfirmConsensusInitialization, ConfirmShareChangeSessionInitialization, - ConsensusMessageWithServersSet, InitializeConsensusSessionWithServersSet, - InitializeShareChangeSession, KeyVersionNegotiationMessage, Message, - ServersSetChangeCompleted, ServersSetChangeConsensusMessage, ServersSetChangeDelegate, - ServersSetChangeDelegateResponse, ServersSetChangeError, ServersSetChangeMessage, - ServersSetChangeShareAddMessage, ShareChangeKeyVersionNegotiation, UnknownSessions, - UnknownSessionsRequest, - }, - share_change_session::{ - prepare_share_change_session_plan, ShareChangeSession, ShareChangeSessionParams, - ShareChangeSessionPlan, - }, - Error, KeyStorage, NodeId, SessionId, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet}, - sync::Arc, -}; - -/// Maximal number of active share change sessions. -const MAX_ACTIVE_KEY_SESSIONS: usize = 64; - -/// Servers set change session. -/// Brief overview: -/// 1) consensus establishing -/// 2) master node requests all other nodes for sessions he is not participating (aka unknown sessions) -/// 3) every slave node responds with sessions id => we are able to collect Map of unknown sessions on master -/// 4) for every known session (i.e. session that master participates in): -/// 4.1) share change plan is created = nodes to add shares for, nodes to move shares from/to, nodes to remove shares from -/// 4.2) share change session is started. Share change session = sequential execution of ShareAdd, then ShareMove && then ShareRemove sessions (order matters here) for single key -/// 5) for every unknown session: -/// 5.1) sub_master is selected from sessions participants -/// 5.2) share change session is delegated from master to this sub_master -/// 5.3) share change session is executed by this sub_master -/// 5.4) share change confirm is sent from sub_master to master -/// 6) upon completing all known share change sessions && receiving confirmations for all unknown share change sessions, session completion signal is sent to all slave nodes && session is completed -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, -} - -/// Session state. -#[derive(Debug, PartialEq)] -enum SessionState { - /// Establishing consensus. - EstablishingConsensus, - /// Running share change sessions. - RunningShareChangeSessions, - /// Session is completed. - Finished, -} - -/// Immutable session data. -struct SessionCore { - /// Servers set change session meta (id is computed from new_nodes_set). - pub meta: ShareChangeSessionMeta, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session-level nonce. - pub nonce: u64, - /// All known nodes. - pub all_nodes_set: BTreeSet, - /// Administrator public key. - pub admin_public: Public, - /// Migration id (if this session is a part of auto-migration process). - pub migration_id: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Servers set change consensus session type. -type ServersSetChangeConsensusSession = ConsensusSession< - ServersSetChangeAccessJob, - ServersSetChangeConsensusTransport, - UnknownSessionsJob, - UnknownSessionsJobTransport, ->; - -/// Mutable session data. -struct SessionData { - /// Session state. - pub state: SessionState, - /// Consensus-based servers set change session. - pub consensus_session: Option, - /// New nodes set. - pub new_nodes_set: Option>, - /// Share change sessions queue (valid on master nodes only). - pub sessions_queue: Option, - /// Share change sessions key version negotiation. - pub negotiation_sessions: BTreeMap< - SessionId, - KeyVersionNegotiationSessionImpl, - >, - /// Share change sessions initialization state (valid on master nodes only). - pub sessions_initialization_state: BTreeMap, - /// Sessions delegated to other nodes (valid on master node only). - pub delegated_key_sessions: BTreeMap, - /// Active share change sessions. - pub active_key_sessions: BTreeMap, - /// Servers set change result. - pub result: Option>, -} - -/// Session initialization data. -struct SessionInitializationData { - /// Master node id. - pub master: NodeId, - /// Nodes that have confirmed session initialization request. - pub confirmations: BTreeSet, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// Session meta (artificial). - pub meta: ShareChangeSessionMeta, - /// Cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session nonce. - pub nonce: u64, - /// All known nodes. - pub all_nodes_set: BTreeSet, - /// Administrator public key. - pub admin_public: Public, - /// Migration id (if this session is a part of auto-migration process). - pub migration_id: Option, -} - -/// Servers set change consensus transport. -struct ServersSetChangeConsensusTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Migration id (if part of auto-migration process). - migration_id: Option, - /// Cluster. - cluster: Arc, -} - -/// Unknown sessions job transport. -struct UnknownSessionsJobTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, -} - -/// Key version negotiation transport. -struct ServersSetChangeKeyVersionNegotiationTransport { - /// Session id. - id: SessionId, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, -} - -impl SessionImpl { - /// Create new servers set change session. - pub fn new(params: SessionParams) -> Result { - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - cluster: params.cluster, - key_storage: params.key_storage, - nonce: params.nonce, - all_nodes_set: params.all_nodes_set, - admin_public: params.admin_public, - migration_id: params.migration_id, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::EstablishingConsensus, - consensus_session: None, - new_nodes_set: None, - sessions_queue: None, - negotiation_sessions: BTreeMap::new(), - sessions_initialization_state: BTreeMap::new(), - delegated_key_sessions: BTreeMap::new(), - active_key_sessions: BTreeMap::new(), - result: None, - }), - }) - } - - /// Get session id. - pub fn id(&self) -> &SessionId { - &self.core.meta.id - } - - /// Get migration id. - pub fn migration_id(&self) -> Option<&H256> { - self.core.migration_id.as_ref() - } - - /// Wait for session completion. - pub fn wait(&self) -> Result<(), Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| { - data.result.clone() - }) - .expect("wait_session returns Some if called without timeout; qed") - } - - /// Initialize servers set change session on master node. - pub fn initialize( - &self, - new_nodes_set: BTreeSet, - all_set_signature: Signature, - new_set_signature: Signature, - ) -> Result<(), Error> { - check_nodes_set(&self.core.all_nodes_set, &new_nodes_set)?; - - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus || data.consensus_session.is_some() { - return Err(Error::InvalidStateForRequest); - } - - let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self - .core - .meta - .clone() - .into_consensus_meta(self.core.all_nodes_set.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_master( - self.core.admin_public.clone(), - self.core.all_nodes_set.clone(), - new_nodes_set.clone(), - all_set_signature, - new_set_signature, - ), - consensus_transport: ServersSetChangeConsensusTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - migration_id: self.core.migration_id.clone(), - cluster: self.core.cluster.clone(), - }, - })?; - - consensus_session.initialize(self.core.all_nodes_set.clone())?; - - let is_finished = consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - data.consensus_session = Some(consensus_session); - data.new_nodes_set = Some(new_nodes_set); - - // this is the case when all other nodes are isolated - if is_finished { - Self::complete_session(&self.core, &mut *data)?; - } - - Ok(()) - } - - /// Process servers set change message. - pub fn process_message( - &self, - sender: &NodeId, - message: &ServersSetChangeMessage, - ) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &ServersSetChangeMessage::ServersSetChangeConsensusMessage(ref message) => { - self.on_consensus_message(sender, message) - } - &ServersSetChangeMessage::UnknownSessionsRequest(ref message) => { - self.on_unknown_sessions_requested(sender, message) - } - &ServersSetChangeMessage::UnknownSessions(ref message) => { - self.on_unknown_sessions(sender, message) - } - &ServersSetChangeMessage::ShareChangeKeyVersionNegotiation(ref message) => { - self.on_key_version_negotiation(sender, message) - } - &ServersSetChangeMessage::InitializeShareChangeSession(ref message) => { - self.on_initialize_share_change_session(sender, message) - } - &ServersSetChangeMessage::ConfirmShareChangeSessionInitialization(ref message) => { - self.on_share_change_session_confirmation(sender, message) - } - &ServersSetChangeMessage::ServersSetChangeDelegate(ref message) => { - self.on_sessions_delegation(sender, message) - } - &ServersSetChangeMessage::ServersSetChangeDelegateResponse(ref message) => { - self.on_delegated_session_completed(sender, message) - } - &ServersSetChangeMessage::ServersSetChangeShareAddMessage(ref message) => { - self.on_share_add_message(sender, message) - } - &ServersSetChangeMessage::ServersSetChangeError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - } - &ServersSetChangeMessage::ServersSetChangeCompleted(ref message) => { - self.on_session_completed(sender, message) - } - } - } - - /// When consensus-related message is received. - pub fn on_consensus_message( - &self, - sender: &NodeId, - message: &ServersSetChangeConsensusMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus { - return Err(Error::InvalidStateForRequest); - } - - // start slave consensus session if needed - if self.core.meta.self_node_id != self.core.meta.master_node_id { - if data.consensus_session.is_none() { - match &message.message { - &ConsensusMessageWithServersSet::InitializeConsensusSession(_) => { - data.consensus_session = - Some(ConsensusSession::new(ConsensusSessionParams { - meta: self - .core - .meta - .clone() - .into_consensus_meta(self.core.all_nodes_set.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_slave( - self.core.admin_public.clone(), - ), - consensus_transport: ServersSetChangeConsensusTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - migration_id: self.core.migration_id.clone(), - cluster: self.core.cluster.clone(), - }, - })?); - } - _ => return Err(Error::InvalidStateForRequest), - } - } - } - - // process consensus message - let consensus_session = data - .consensus_session - .as_mut() - .ok_or(Error::InvalidMessage)?; - let is_establishing_consensus = - consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - match &message.message { - &ConsensusMessageWithServersSet::InitializeConsensusSession(ref message) => { - consensus_session.on_consensus_partial_request( - sender, - ServersSetChangeAccessRequest::from(message), - )? - } - &ConsensusMessageWithServersSet::ConfirmConsensusInitialization(ref message) => { - consensus_session.on_consensus_partial_response(sender, message.is_confirmed)? - } - } - - // when consensus is established => request unknown sessions - let is_consensus_established = - consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id - || !is_establishing_consensus - || !is_consensus_established - { - return Ok(()); - } - - let unknown_sessions_job = UnknownSessionsJob::new_on_master( - self.core.key_storage.clone(), - self.core.meta.self_node_id.clone(), - ); - consensus_session - .disseminate_jobs( - unknown_sessions_job, - self.unknown_sessions_transport(), - false, - ) - .map(|_| ()) - } - - /// When unknown sessions are requested. - pub fn on_unknown_sessions_requested( - &self, - sender: &NodeId, - message: &UnknownSessionsRequest, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - let new_nodes_set = { - let consensus_session = data - .consensus_session - .as_mut() - .ok_or(Error::InvalidMessage)?; - let unknown_sessions_job = - UnknownSessionsJob::new_on_slave(self.core.key_storage.clone()); - let unknown_sessions_transport = self.unknown_sessions_transport(); - - // and respond with unknown sessions - consensus_session.on_job_request( - &sender, - sender.clone(), - unknown_sessions_job, - unknown_sessions_transport, - )?; - - consensus_session.consensus_job().executor() - .new_servers_set() - .expect("consensus session is now completed; new_servers_set is intermediate result of consensus session; qed") - .clone() - }; - - // update state - data.state = SessionState::RunningShareChangeSessions; - data.new_nodes_set = Some(new_nodes_set); - - Ok(()) - } - - /// When unknown sessions are received. - pub fn on_unknown_sessions( - &self, - sender: &NodeId, - message: &UnknownSessions, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::EstablishingConsensus { - return Err(Error::InvalidStateForRequest); - } - - // process message - let unknown_sessions = { - let consensus_session = data - .consensus_session - .as_mut() - .ok_or(Error::InvalidMessage)?; - consensus_session.on_job_response( - sender, - message - .unknown_sessions - .iter() - .cloned() - .map(Into::into) - .collect(), - )?; - if consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } - - // all nodes have reported their unknown sessions - // => we are ready to start adding/moving/removing shares - consensus_session.result()? - }; - - // initialize sessions queue - data.state = SessionState::RunningShareChangeSessions; - data.sessions_queue = Some(SessionsQueue::new( - &self.core.key_storage, - unknown_sessions.keys().cloned().collect(), - )); - - // and disseminate session initialization requests - Self::disseminate_session_initialization_requests(&self.core, &mut *data) - } - - /// When key version negotiation message is received. - pub fn on_key_version_negotiation( - &self, - sender: &NodeId, - message: &ShareChangeKeyVersionNegotiation, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // process message - match &message.message { - &KeyVersionNegotiationMessage::RequestKeyVersions(ref message) - if sender == &self.core.meta.master_node_id => - { - let key_id = message.session.clone().into(); - let key_share = self.core.key_storage.get(&key_id)?; - let negotiation_session = - KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { - meta: ShareChangeSessionMeta { - id: key_id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - master_node_id: sender.clone(), - configured_nodes_count: self.core.meta.configured_nodes_count, - connected_nodes_count: self.core.meta.connected_nodes_count, - }, - sub_session: message.sub_session.clone().into(), - key_share: key_share, - result_computer: Arc::new(LargestSupportResultComputer {}), - transport: ServersSetChangeKeyVersionNegotiationTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - cluster: self.core.cluster.clone(), - }, - nonce: message.session_nonce, - }); - negotiation_session.on_key_versions_request(sender, message)?; - debug_assert!(negotiation_session.is_finished()); - Ok(()) - } - &KeyVersionNegotiationMessage::KeyVersions(ref message) - if self.core.meta.self_node_id == self.core.meta.master_node_id => - { - let key_id = message.session.clone().into(); - { - let negotiation_session = data - .negotiation_sessions - .get(&key_id) - .ok_or(Error::InvalidMessage)?; - negotiation_session.on_key_versions(sender, message)?; - if !negotiation_session.is_finished() { - return Ok(()); - } - } - - // else prepare plan && start share change session - if !Self::initialize_share_change_session(&self.core, &mut *data, key_id)? { - Self::disseminate_session_initialization_requests(&self.core, &mut *data)?; - } - - Ok(()) - } - _ => Err(Error::InvalidMessage), - } - } - - /// When share change session initialization is requested. - pub fn on_initialize_share_change_session( - &self, - sender: &NodeId, - message: &InitializeShareChangeSession, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // we only accept delegation requests from master node - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // insert new session - let key_id = message.key_id.clone().into(); - match data.active_key_sessions.contains_key(&key_id) { - true => return Err(Error::InvalidMessage), - false => { - let master_plan = ShareChangeSessionPlan { - key_version: message.version.clone().into(), - version_holders: message - .version_holders - .iter() - .cloned() - .map(Into::into) - .collect(), - consensus_group: message - .consensus_group - .iter() - .cloned() - .map(Into::into) - .collect(), - new_nodes_map: message - .new_nodes_map - .iter() - .map(|(k, v)| (k.clone().into(), v.clone().map(Into::into))) - .collect(), - }; - - // if master plan is empty, it is cheating - if master_plan.is_empty() { - return Err(Error::InvalidMessage); - } - - // on nodes, holding selected key share version, we could check if master node plan is correct - let master_node_id = message.master_node_id.clone().into(); - if let Some(key_share) = self.core.key_storage.get(&key_id)? { - let version = message.version.clone().into(); - let key_share_owners = message - .version_holders - .iter() - .cloned() - .map(Into::into) - .collect(); - let new_nodes_set = data.new_nodes_set.as_ref() - .expect("new_nodes_set is filled during consensus establishing; change sessions are running after this; qed"); - let local_plan = prepare_share_change_session_plan( - &self.core.all_nodes_set, - key_share.threshold, - &key_id, - version, - &master_node_id, - &key_share_owners, - new_nodes_set, - )?; - - if local_plan.new_nodes_map.keys().collect::>() - != master_plan.new_nodes_map.keys().collect::>() - { - return Err(Error::InvalidMessage); - } - } - - let session = Self::create_share_change_session( - &self.core, - key_id, - master_node_id, - master_plan, - )?; - if !session.is_finished() { - data.active_key_sessions.insert(key_id.clone(), session); - } - } - }; - - // send confirmation - self.core.cluster.send( - sender, - Message::ServersSetChange( - ServersSetChangeMessage::ConfirmShareChangeSessionInitialization( - ConfirmShareChangeSessionInitialization { - session: message.session.clone(), - session_nonce: message.session_nonce.clone(), - key_id: message.key_id.clone(), - }, - ), - ), - ) - } - - /// When share change session initialization is confirmed. - pub fn on_share_change_session_confirmation( - &self, - sender: &NodeId, - message: &ConfirmShareChangeSessionInitialization, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // we only accept delegation requests from master node - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // add confirmation - let key_id = message.key_id.clone().into(); - let session_master = { - let session_init_data = data - .sessions_initialization_state - .get_mut(&key_id) - .ok_or(Error::InvalidMessage)?; - if !session_init_data.confirmations.remove(sender) { - return Err(Error::InvalidMessage); - } - - if !session_init_data.confirmations.is_empty() { - return Ok(()); - } - - session_init_data.master.clone() - }; - - // and start/delegate session if required - data.sessions_initialization_state.remove(&key_id); - if self.core.meta.self_node_id != session_master { - data.delegated_key_sessions - .insert(key_id, session_master.clone()); - return self.core.cluster.send( - &session_master, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegate( - ServersSetChangeDelegate { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - key_id: key_id.into(), - }, - )), - ); - } - - // initialize share change session - { - let key_session = data - .active_key_sessions - .get_mut(&key_id) - .ok_or(Error::InvalidMessage)?; - key_session.initialize()?; - if !key_session.is_finished() { - return Ok(()); - } - } - - // complete key session - Self::complete_key_session(&self.core, &mut *data, true, key_id) - } - - /// When sessions execution is delegated to this node. - pub fn on_sessions_delegation( - &self, - sender: &NodeId, - message: &ServersSetChangeDelegate, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // we only accept delegation requests from master node - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // start session - let key_session = data - .active_key_sessions - .get_mut(&message.key_id.clone().into()) - .ok_or(Error::InvalidMessage)?; - key_session.initialize() - } - - /// When delegated session execution is completed. - pub fn on_delegated_session_completed( - &self, - sender: &NodeId, - message: &ServersSetChangeDelegateResponse, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // we only accept delegation requests on master node - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // forget delegated session - let key_id = message.key_id.clone().into(); - match data.delegated_key_sessions.entry(key_id) { - Entry::Occupied(entry) => { - if entry.get() == sender { - entry.remove() - } else { - return Err(Error::InvalidMessage); - } - } - _ => return Err(Error::InvalidMessage), - }; - - // check if we need to complete the whole change session - Self::disseminate_session_initialization_requests(&self.core, &mut *data) - } - - /// When share add message is received. - pub fn on_share_add_message( - &self, - sender: &NodeId, - message: &ServersSetChangeShareAddMessage, - ) -> Result<(), Error> { - self.on_share_change_message(message.message.session_id().clone().into(), |session| { - session.on_share_add_message(sender, &message.message) - }) - } - - /// When session completion message is received. - pub fn on_session_completed( - &self, - sender: &NodeId, - message: &ServersSetChangeCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - let mut data = self.data.lock(); - data.result = Some(Ok(())); - if data.active_key_sessions.len() != 0 { - return Err(Error::TooEarlyForRequest); - } - - // if we are on the set of nodes that are being removed from the cluster, let's clear database - if !data.new_nodes_set.as_ref() - .expect("new_nodes_set is filled during initialization; session is completed after initialization; qed") - .contains(&self.core.meta.self_node_id) { - self.core.key_storage.clear()?; - } - - data.state = SessionState::Finished; - self.core.completed.notify_all(); - - Ok(()) - } - - /// Create unknown sessions transport. - fn unknown_sessions_transport(&self) -> UnknownSessionsJobTransport { - UnknownSessionsJobTransport { - id: self.core.meta.id.clone(), - nonce: self.core.nonce, - cluster: self.core.cluster.clone(), - } - } - - /// When share change message is received. - fn on_share_change_message Result<(), Error>>( - &self, - session_id: SessionId, - message_processor: F, - ) -> Result<(), Error> { - // check state - let mut data = self.data.lock(); - if data.state != SessionState::RunningShareChangeSessions { - return Err(Error::InvalidStateForRequest); - } - - // process message - let (is_finished, is_master) = { - let key_session = data - .active_key_sessions - .get_mut(&session_id) - .ok_or(Error::InvalidMessage)?; - message_processor(key_session)?; - (key_session.is_finished(), key_session.is_master()) - }; - - if is_finished { - Self::complete_key_session(&self.core, &mut *data, is_master, session_id)?; - } - - Ok(()) - } - - /// Create share change session. - fn create_share_change_session( - core: &SessionCore, - key_id: SessionId, - master_node_id: NodeId, - session_plan: ShareChangeSessionPlan, - ) -> Result { - ShareChangeSession::new(ShareChangeSessionParams { - session_id: core.meta.id.clone(), - nonce: core.nonce, - meta: ShareChangeSessionMeta { - id: key_id, - self_node_id: core.meta.self_node_id.clone(), - master_node_id: master_node_id, - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, - cluster: core.cluster.clone(), - key_storage: core.key_storage.clone(), - plan: session_plan, - }) - } - - /// Disseminate session initialization requests. - fn disseminate_session_initialization_requests( - core: &SessionCore, - data: &mut SessionData, - ) -> Result<(), Error> { - debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - if data.sessions_queue.is_some() { - let number_of_sessions_active = data.active_key_sessions.len() - + data.delegated_key_sessions.len() - + data.negotiation_sessions.len(); - let mut number_of_sessions_to_start = - MAX_ACTIVE_KEY_SESSIONS.saturating_sub(number_of_sessions_active); - while number_of_sessions_to_start > 0 { - let key_id = match data - .sessions_queue - .as_mut() - .expect("checked before beginning of the loop; qed") - .next() - { - None => break, // complete session - Some(Err(e)) => return Err(e), - Some(Ok(key_id)) => key_id, - }; - - let key_share = core.key_storage.get(&key_id)?; - let negotiation_session = - KeyVersionNegotiationSessionImpl::new(KeyVersionNegotiationSessionParams { - meta: ShareChangeSessionMeta { - id: key_id, - self_node_id: core.meta.self_node_id.clone(), - master_node_id: core.meta.self_node_id.clone(), - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, - sub_session: math::generate_random_scalar()?, - key_share: key_share, - result_computer: Arc::new(LargestSupportResultComputer {}), // TODO [Opt]: could use modified Fast version - transport: ServersSetChangeKeyVersionNegotiationTransport { - id: core.meta.id.clone(), - nonce: core.nonce, - cluster: core.cluster.clone(), - }, - nonce: 0, - }); - negotiation_session.initialize(core.cluster.nodes())?; - if !negotiation_session.is_finished() { - data.negotiation_sessions - .insert(key_id, negotiation_session); - continue; - } - - if !Self::initialize_share_change_session(core, data, key_id)? { - continue; - } - - number_of_sessions_to_start = number_of_sessions_to_start - 1; - } - - // if iteration is not yet finished => return - if number_of_sessions_to_start == 0 { - return Ok(()); - } - } - - // iteration is finished => complete session - if data.state != SessionState::Finished { - data.sessions_queue = None; - if data.active_key_sessions.len() == 0 - && data.delegated_key_sessions.len() == 0 - && data.negotiation_sessions.len() == 0 - { - Self::complete_session(core, data)?; - } - } - - Ok(()) - } - - /// Initialize share change session. - fn initialize_share_change_session( - core: &SessionCore, - data: &mut SessionData, - key_id: SessionId, - ) -> Result { - // get selected version && old nodes set from key negotiation session - let negotiation_session = data - .negotiation_sessions - .remove(&key_id) - .expect("share change session is only initialized when negotiation is completed; qed"); - let (selected_version, selected_master) = negotiation_session - .wait()? - .expect("initialize_share_change_session is only called on share change master; negotiation session completes with some on master; qed"); - let selected_version_holders = negotiation_session.version_holders(&selected_version)?; - let selected_version_threshold = negotiation_session.common_key_data()?.threshold; - - // prepare session change plan && check if something needs to be changed - let old_nodes_set = selected_version_holders; - let new_nodes_set = data.new_nodes_set.as_ref() - .expect("this method is called after consensus estabished; new_nodes_set is a result of consensus session; qed"); - let session_plan = prepare_share_change_session_plan( - &core.all_nodes_set, - selected_version_threshold, - &key_id, - selected_version.clone(), - &selected_master, - &old_nodes_set, - new_nodes_set, - )?; - if session_plan.is_empty() { - return Ok(false); - } - - // send key session initialization requests - let mut confirmations: BTreeSet<_> = session_plan.new_nodes_map.keys().cloned().collect(); - let need_create_session = confirmations.remove(&core.meta.self_node_id); - let initialization_message = Message::ServersSetChange( - ServersSetChangeMessage::InitializeShareChangeSession(InitializeShareChangeSession { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_id: key_id.clone().into(), - version: selected_version.into(), - version_holders: old_nodes_set.iter().cloned().map(Into::into).collect(), - master_node_id: selected_master.clone().into(), - consensus_group: session_plan - .consensus_group - .iter() - .cloned() - .map(Into::into) - .collect(), - new_nodes_map: session_plan - .new_nodes_map - .iter() - .map(|(n, nid)| (n.clone().into(), nid.clone().map(Into::into))) - .collect(), - }), - ); - for node in &confirmations { - core.cluster.send(&node, initialization_message.clone())?; - } - - // create session on this node if required - if need_create_session { - data.active_key_sessions.insert( - key_id.clone(), - Self::create_share_change_session( - core, - key_id, - selected_master.clone(), - session_plan, - )?, - ); - } - - // initialize session if required - let wait_for_confirmations = !confirmations.is_empty(); - if !wait_for_confirmations { - data.active_key_sessions.get_mut(&key_id) - .expect("!wait_for_confirmations is true only if this is the only session participant; if this is session participant, session is created above; qed") - .initialize()?; - } else { - data.sessions_initialization_state.insert( - key_id, - SessionInitializationData { - master: selected_master, - confirmations: confirmations, - }, - ); - } - - Ok(true) - } - - /// Return delegated session to master. - fn return_delegated_session(core: &SessionCore, key_id: &SessionId) -> Result<(), Error> { - assert!(core.meta.self_node_id != core.meta.master_node_id); - core.cluster.send( - &core.meta.master_node_id, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeDelegateResponse( - ServersSetChangeDelegateResponse { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_id: key_id.clone().into(), - }, - )), - ) - } - - /// Complete key session. - fn complete_key_session( - core: &SessionCore, - data: &mut SessionData, - is_master: bool, - session_id: SessionId, - ) -> Result<(), Error> { - data.active_key_sessions.remove(&session_id); - let is_general_master = core.meta.self_node_id == core.meta.master_node_id; - if is_master && !is_general_master { - Self::return_delegated_session(core, &session_id)?; - } - if is_general_master { - Self::disseminate_session_initialization_requests(core, data)?; - } - - if data.result.is_some() && data.active_key_sessions.len() == 0 { - data.state = SessionState::Finished; - core.completed.notify_all(); - } - - Ok(()) - } - - /// Complete servers set change session. - fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - debug_assert_eq!(core.meta.self_node_id, core.meta.master_node_id); - - // send completion notification - core.cluster.broadcast(Message::ServersSetChange( - ServersSetChangeMessage::ServersSetChangeCompleted(ServersSetChangeCompleted { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - }), - ))?; - - // if we are on the set of nodes that are being removed from the cluster, let's clear database - if !data.new_nodes_set.as_ref() - .expect("new_nodes_set is filled during initialization; session is completed after initialization; qed") - .contains(&core.meta.self_node_id) { - core.key_storage.clear()?; - } - - data.state = SessionState::Finished; - data.result = Some(Ok(())); - core.completed.notify_all(); - - Ok(()) - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionId; - - fn type_name() -> &'static str { - "servers set change" - } - - fn id(&self) -> SessionId { - self.core.meta.id.clone() - } - - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } - - fn on_session_timeout(&self) { - self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected); - } - - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.core.meta.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self.core.cluster.broadcast(Message::ServersSetChange( - ServersSetChangeMessage::ServersSetChangeError(ServersSetChangeError { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }), - )); - } - - let mut data = self.data.lock(); - - warn!(target: "secretstore_net", "{}: servers set change session failed: {} on {}", - self.core.meta.self_node_id, error, node); - - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::ServersSetChange(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl JobTransport for ServersSetChangeConsensusTransport { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; - - fn send_partial_request( - &self, - node: &NodeId, - request: ServersSetChangeAccessRequest, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( - ServersSetChangeConsensusMessage { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageWithServersSet::InitializeConsensusSession( - InitializeConsensusSessionWithServersSet { - migration_id: self.migration_id.clone().map(Into::into), - old_nodes_set: request - .old_servers_set - .into_iter() - .map(Into::into) - .collect(), - new_nodes_set: request - .new_servers_set - .into_iter() - .map(Into::into) - .collect(), - old_set_signature: request.old_set_signature.into(), - new_set_signature: request.new_set_signature.into(), - }, - ), - }, - )), - ) - } - - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeConsensusMessage( - ServersSetChangeConsensusMessage { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageWithServersSet::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { - is_confirmed: response, - }, - ), - }, - )), - ) - } -} - -impl JobTransport for UnknownSessionsJobTransport { - type PartialJobRequest = NodeId; - type PartialJobResponse = BTreeSet; - - fn send_partial_request(&self, node: &NodeId, _request: NodeId) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::UnknownSessionsRequest( - UnknownSessionsRequest { - session: self.id.clone().into(), - session_nonce: self.nonce, - }, - )), - ) - } - - fn send_partial_response( - &self, - node: &NodeId, - response: BTreeSet, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::UnknownSessions(UnknownSessions { - session: self.id.clone().into(), - session_nonce: self.nonce, - unknown_sessions: response.into_iter().map(Into::into).collect(), - })), - ) - } -} - -impl KeyVersionNegotiationTransport for ServersSetChangeKeyVersionNegotiationTransport { - fn broadcast(&self, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.broadcast(Message::ServersSetChange( - ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( - ShareChangeKeyVersionNegotiation { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: message, - }, - ), - )) - } - - fn send(&self, node: &NodeId, message: KeyVersionNegotiationMessage) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::ShareChangeKeyVersionNegotiation( - ShareChangeKeyVersionNegotiation { - session: self.id.clone().into(), - session_nonce: self.nonce, - message: message, - }, - )), - ) - } -} - -fn check_nodes_set( - all_nodes_set: &BTreeSet, - new_nodes_set: &BTreeSet, -) -> Result<(), Error> { - // all_nodes_set is the set of nodes we're currently connected to (and configured for) - match new_nodes_set.iter().any(|n| !all_nodes_set.contains(n)) { - true => Err(Error::NodeDisconnected), - false => Ok(()), - } -} - -#[cfg(test)] -pub mod tests { - use super::{SessionImpl, SessionParams}; - use ethereum_types::H256; - use ethkey::{sign, Generator, KeyPair, Public, Random, Signature}; - use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, cluster::tests::MessageLoop as ClusterMessageLoop, - cluster_sessions::ClusterSession, - generation_session::tests::MessageLoop as GenerationMessageLoop, - jobs::servers_set_change_access_job::ordered_nodes_hash, math, message::Message, Error, - KeyStorage, NodeId, NodeKeyPair, PlainNodeKeyPair, SessionId, - }; - use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, - sync::Arc, - }; - - pub trait AdminSessionAdapter { - const SIGN_NEW_NODES: bool; - - fn create( - meta: ShareChangeSessionMeta, - admin_public: Public, - all_nodes_set: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize, - ) -> S; - } - - pub struct MessageLoop { - pub ml: ClusterMessageLoop, - pub admin_key_pair: KeyPair, - pub original_key_pair: KeyPair, - pub original_key_version: H256, - pub all_nodes_set: BTreeSet, - pub new_nodes_set: BTreeSet, - pub all_set_signature: Signature, - pub new_set_signature: Signature, - pub sessions: BTreeMap, - pub queue: VecDeque<(NodeId, NodeId, Message)>, - } - - impl ::std::fmt::Debug for MessageLoop { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "{:?}", self.ml) - } - } - - struct Adapter; - - impl AdminSessionAdapter for Adapter { - const SIGN_NEW_NODES: bool = true; - - fn create( - mut meta: ShareChangeSessionMeta, - admin_public: Public, - all_nodes_set: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize, - ) -> SessionImpl { - meta.self_node_id = *ml.node_key_pair(idx).public(); - SessionImpl::new(SessionParams { - meta: meta, - all_nodes_set: all_nodes_set, - cluster: ml.cluster(idx).view().unwrap(), - key_storage: ml.key_storage(idx).clone(), - nonce: 1, - admin_public: admin_public, - migration_id: None, - }) - .unwrap() - } - } - - impl MessageLoop { - pub fn with_gml>( - gml: GenerationMessageLoop, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - // read generated key data - let original_key_pair = gml.compute_key_pair(); - let original_key_version = gml.key_version(); - Self::with_ml::( - gml.0, - original_key_pair, - original_key_version, - master, - add, - removed_nodes_ids, - isolated_nodes_ids, - ) - } - - pub fn and_then>( - self, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - Self::with_ml::( - self.ml, - self.original_key_pair, - self.original_key_version, - master, - add, - removed_nodes_ids, - isolated_nodes_ids, - ) - } - - pub fn with_ml>( - mut ml: ClusterMessageLoop, - original_key_pair: KeyPair, - original_key_version: H256, - master: NodeId, - add: Option>, - removed_nodes_ids: Option>, - isolated_nodes_ids: Option>, - ) -> Self { - let add = add.unwrap_or_default(); - let removed_nodes_ids = removed_nodes_ids.unwrap_or_default(); - let isolated_nodes_ids = isolated_nodes_ids.unwrap_or_default(); - - // generate admin key pair - let admin_key_pair = Random.generate().unwrap(); - let admin_public = admin_key_pair.public().clone(); - - // all active nodes set - let mut all_nodes_set: BTreeSet<_> = ml - .nodes() - .into_iter() - .filter(|n| !isolated_nodes_ids.contains(n)) - .collect(); - // new nodes set includes all old nodes, except nodes being removed + all nodes being added - let new_nodes_set: BTreeSet = all_nodes_set - .iter() - .cloned() - .chain(add.iter().map(|kp| *kp.public())) - .filter(|n| !removed_nodes_ids.contains(n)) - .collect(); - let mut old_set_to_sign = all_nodes_set.clone(); - all_nodes_set.extend(add.iter().map(|kp| *kp.public())); - if C::SIGN_NEW_NODES { - old_set_to_sign.extend(add.iter().map(|kp| *kp.public())); - } - for isolated_node_id in &isolated_nodes_ids { - all_nodes_set.remove(isolated_node_id); - } - - let meta = ShareChangeSessionMeta { - self_node_id: master, - master_node_id: master, - id: SessionId::default(), - configured_nodes_count: all_nodes_set.len(), - connected_nodes_count: all_nodes_set.len(), - }; - - // include new nodes in the cluster - for node_key_pair in &add { - ml.include(Arc::new(PlainNodeKeyPair::new(node_key_pair.clone()))); - } - // isolate nodes from the cluster - for isolated_node_id in &isolated_nodes_ids { - let idx = ml - .nodes() - .iter() - .position(|n| n == isolated_node_id) - .unwrap(); - ml.exclude(idx); - } - - // prepare set of nodes - let sessions: BTreeMap<_, _> = (0..ml.nodes().len()) - .map(|idx| { - ( - ml.node(idx), - C::create(meta.clone(), admin_public, all_nodes_set.clone(), &ml, idx), - ) - }) - .collect(); - - let all_set_signature = sign( - admin_key_pair.secret(), - &ordered_nodes_hash(&old_set_to_sign), - ) - .unwrap(); - let new_set_signature = - sign(admin_key_pair.secret(), &ordered_nodes_hash(&new_nodes_set)).unwrap(); - - MessageLoop { - ml, - admin_key_pair: admin_key_pair, - original_key_pair, - original_key_version, - all_nodes_set: all_nodes_set.clone(), - new_nodes_set: new_nodes_set, - all_set_signature: all_set_signature, - new_set_signature: new_set_signature, - sessions, - queue: Default::default(), - } - } - - pub fn run(&mut self) { - // run session until completion - while let Some((from, to, message)) = self.take_message() { - self.process_message((from, to, message)).unwrap(); - } - - // check that all sessions have finished - assert!(self.sessions.values().all(|s| s.is_finished())); - } - - pub fn take_message(&mut self) -> Option<(NodeId, NodeId, Message)> { - self.ml.take_message().or_else(|| self.queue.pop_front()) - } - - pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match self.sessions[&msg.1].on_message(&msg.0, &msg.2) { - Ok(_) => Ok(()), - Err(Error::TooEarlyForRequest) => { - self.queue.push_back(msg); - Ok(()) - } - Err(err) => Err(err), - } - } - - /// This only works for schemes where threshold = 1 - pub fn check_secret_is_preserved<'a, I: IntoIterator>(&self, nodes: I) { - let nodes: Vec<_> = nodes.into_iter().collect(); - let key_storages: Vec<_> = nodes.iter().map(|n| self.ml.key_storage_of(n)).collect(); - let n = nodes.len(); - let document_secret_plain = math::generate_random_point().unwrap(); - for n1 in 0..n { - for n2 in n1 + 1..n { - let share1 = key_storages[n1].get(&SessionId::default()).unwrap(); - let share2 = key_storages[n2].get(&SessionId::default()).unwrap(); - - let id_number1 = share1.as_ref().unwrap().last_version().unwrap().id_numbers - [nodes[n1]] - .clone(); - let id_number2 = share1.as_ref().unwrap().last_version().unwrap().id_numbers - [nodes[n2]] - .clone(); - // now encrypt and decrypt data - let (document_secret_decrypted, document_secret_decrypted_test) = - math::tests::do_encryption_and_decryption( - 1, - self.original_key_pair.public(), - &[id_number1, id_number2], - &[ - share1.unwrap().last_version().unwrap().secret_share.clone(), - share2.unwrap().last_version().unwrap().secret_share.clone(), - ], - Some(self.original_key_pair.secret()), - document_secret_plain.clone(), - ); - - assert_eq!(document_secret_plain, document_secret_decrypted_test); - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - } - } - - impl MessageLoop { - pub fn run_at(mut self, master: NodeId) -> Self { - self.sessions[&master] - .initialize( - self.new_nodes_set.clone(), - self.all_set_signature.clone(), - self.new_set_signature.clone(), - ) - .unwrap(); - self.run(); - self - } - } - - pub fn generate_key(num_nodes: usize, threshold: usize) -> GenerationMessageLoop { - let gml = GenerationMessageLoop::new(num_nodes) - .init(threshold) - .unwrap(); - gml.0.loop_until(|| gml.0.is_empty()); - gml - } - - #[test] - fn node_added_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // add 1 node so that it becames 2-of-4 session - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - let ml = - MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); - - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } - - #[test] - fn node_added_using_server_set_change_from_this_node() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // insert 1 node so that it becames 2-of-4 session - // master node is the node we are adding => - // 1) add session is delegated to one of old nodes - // 2) key share is pushed to new node - // 3) delegated session is returned back to added node - let add = vec![Random.generate().unwrap()]; - let master = add[0].public().clone(); - let ml = - MessageLoop::with_gml::(gml, master, Some(add), None, None).run_at(master); - - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } - - #[test] - fn node_moved_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // remove 1 node && insert 1 node so that one share is moved - let master = gml.0.node(0); - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let add = vec![Random.generate().unwrap()]; - let ml = - MessageLoop::with_gml::(gml, master, Some(add), Some(remove.clone()), None) - .run_at(master); - - // check that secret is still the same as before moving the share - ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !remove.contains(k))); - - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml - .ml - .key_storage_of(k) - .get(&SessionId::default()) - .unwrap() - .is_none())); - } - - #[test] - fn node_removed_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // remove 1 node so that session becames 2-of-2 - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(0)).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None) - .run_at(master); - - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !remove.contains(k))); - - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml - .ml - .key_storage_of(k) - .get(&SessionId::default()) - .unwrap() - .is_none())); - } - - #[test] - fn isolated_node_removed_using_servers_set_change() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // remove 1 node so that session becames 2-of-2 - let isolate: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, None, Some(isolate.clone())) - .run_at(master); - - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys().filter(|k| !isolate.contains(k))); - - // check that all isolated nodes still OWN key share - assert!(ml - .sessions - .keys() - .filter(|k| isolate.contains(k)) - .all(|k| ml - .ml - .key_storage_of(k) - .get(&SessionId::default()) - .unwrap() - .is_some())); - } - - #[test] - fn having_less_than_required_nodes_after_change_does_not_fail_change_session() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // remove 2 nodes so that key becomes irrecoverable (make sure the session is completed - // even though key is irrecoverable) - let remove: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(2).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, None, Some(remove.clone()), None) - .run_at(master); - - // check that all removed nodes do not own key share - assert!(ml.sessions.keys().filter(|k| remove.contains(k)).all(|k| ml - .ml - .key_storage_of(k) - .get(&SessionId::default()) - .unwrap() - .is_none())); - - // and now let's add new node (make sure the session is completed, even though key is still irrecoverable) - // isolated here are not actually isolated, but removed on the previous step - let add = vec![Random.generate().unwrap()]; - let master = add[0].public().clone(); - let ml = ml - .and_then::(master, Some(add.clone()), None, Some(remove)) - .run_at(master); - - // check that all added nodes do not own key share (there's not enough nodes to run share add session) - assert!(ml - .sessions - .keys() - .filter(|k| add.iter().any(|n| n.public() == *k)) - .all(|k| ml - .ml - .key_storage_of(k) - .get(&SessionId::default()) - .unwrap() - .is_none())); - } - - #[test] - fn removing_node_from_cluster_of_2_works() { - // initial 2-of-2 session - let gml = generate_key(2, 1); - - // make 2nd node isolated so that key becomes irrecoverable (make sure the session is completed, - // even though key is irrecoverable) - let isolate: BTreeSet<_> = gml.0.nodes().into_iter().skip(1).take(1).collect(); - let master = gml.0.node(0); - MessageLoop::with_gml::(gml, master, None, None, Some(isolate)).run_at(master); - } - - #[test] - fn adding_node_that_has_lost_its_database_works() { - // initial 2-of-2 session - let gml = generate_key(2, 1); - - // insert 1 node so that it becames 2-of-3 session - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, Some(add.clone()), None, None) - .run_at(master); - - // now let's say new node has lost its db and we're trying to join it again - ml.ml.key_storage_of(add[0].public()).clear().unwrap(); - - // this time old nodes have version, where new node is mentioned, but it doesn't report it when negotiating - let ml = ml - .and_then::(master, Some(add), None, None) - .run_at(master); - - // try to recover secret for every possible combination of nodes && check that secret is the same - ml.check_secret_is_preserved(ml.sessions.keys()); - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs b/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs deleted file mode 100644 index 8ebe8f6e8..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/sessions_queue.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use key_server_cluster::{Error, KeyStorage, SessionId}; -use std::{ - collections::{BTreeSet, VecDeque}, - sync::Arc, -}; - -/// Queue of share change sessions. -pub struct SessionsQueue { - /// Sessions, known on this node. - known_sessions: VecDeque, - /// Unknown sessions. - unknown_sessions: VecDeque, -} - -impl SessionsQueue { - /// Create new sessions queue. - pub fn new(key_storage: &Arc, unknown_sessions: BTreeSet) -> Self { - // TODO [Opt]: - // 1) known sessions - change to iter - // 2) unknown sesions - request chunk-by-chunk - SessionsQueue { - known_sessions: key_storage.iter().map(|(k, _)| k).collect(), - unknown_sessions: unknown_sessions.into_iter().collect(), - } - } -} - -impl Iterator for SessionsQueue { - type Item = Result; - - fn next(&mut self) -> Option { - if let Some(known_session) = self.known_sessions.pop_front() { - return Some(Ok(known_session)); - } - - if let Some(unknown_session) = self.unknown_sessions.pop_front() { - return Some(Ok(unknown_session)); - } - - None - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs deleted file mode 100644 index df60e2f39..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/share_add_session.rs +++ /dev/null @@ -1,1463 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::{Address, H256}; -use ethkey::{Public, Secret, Signature}; -use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, - cluster::Cluster, - cluster_sessions::ClusterSession, - jobs::{ - consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, - dummy_job::{DummyJob, DummyJobTransport}, - job_session::JobTransport, - servers_set_change_access_job::{ServersSetChangeAccessJob, ServersSetChangeAccessRequest}, - }, - math, - message::{ - CommonKeyData, ConfirmConsensusInitialization, ConsensusMessageOfShareAdd, - InitializeConsensusSessionOfShareAdd, KeyShareCommon, Message, NewKeysDissemination, - ShareAddConsensusMessage, ShareAddError, ShareAddMessage, - }, - DocumentKeyShare, DocumentKeyShareVersion, Error, KeyStorage, NodeId, SessionId, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -/// Share addition session transport. -pub trait SessionTransport: - Clone + JobTransport -{ - /// Get all connected nodes. Since ShareAdd session requires all cluster nodes to be connected, this set equals to all known cluster nodes set. - fn nodes(&self) -> BTreeSet; - /// Send message to given node. - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error>; - /// Set data for master node (sent to slave nodes in consensus session initialization message). - fn set_master_data( - &mut self, - consensus_group: BTreeSet, - version_holders: BTreeSet, - id_numbers: BTreeMap>, - ); -} - -/// Share addition session. -/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Networks" paper: -/// http://www.wu.ece.ufl.edu/mypapers/msig.pdf -/// Brief overview: -/// 1) initialization: master node (which has received request for shares addition the message) asks all other nodes to support addition -/// 2) key refreshing distribution (KRD): node generates new random polynom && sends required data to all other nodes -/// 3) key refreshing verification (KRV): node verifies received data -/// 4) node updates its own key share using generated (&& received) data -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex>, -} - -/// Immutable session data. -struct SessionCore { - /// Session metadata. - pub meta: ShareChangeSessionMeta, - /// Session-level nonce. - pub nonce: u64, - /// Original key share (for old nodes only). - pub key_share: Option, - /// Session transport to communicate to other cluster nodes. - pub transport: T, - /// Key storage. - pub key_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Share add consensus session type. -type ShareAddChangeConsensusSession = - ConsensusSession; - -/// Mutable session data. -struct SessionData { - /// Session state. - pub state: SessionState, - /// Key version to use for decryption. - pub version: Option, - /// Consensus session. - pub consensus_session: Option>, - /// Holders of key version. - pub version_holders: Option>, - /// NewKeyShare (for nodes being added). - pub new_key_share: Option, - /// Nodes id numbers. - pub id_numbers: Option>>, - /// Secret subshares received from nodes. - pub secret_subshares: Option>>, - /// Share add change result. - pub result: Option>, -} - -/// New key share. -struct NewKeyShare { - /// NewKeyShare: threshold. - pub threshold: usize, - /// NewKeyShare: author. - pub author: Address, - /// NewKeyShare: joint public. - pub joint_public: Public, - /// NewKeyShare: Common (shared) encryption point. - pub common_point: Option, - /// NewKeyShare: Encrypted point. - pub encrypted_point: Option, -} - -/// Session state. -#[derive(Debug, PartialEq)] -enum SessionState { - /// State when consensus is establishing. - ConsensusEstablishing, - /// Waiting for keys dissemination. - WaitingForKeysDissemination, - /// Session is completed. - Finished, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// Session metadata. - pub meta: ShareChangeSessionMeta, - /// Session transport. - pub transport: T, - /// Key storage. - pub key_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// Session nonce. - pub nonce: u64, -} - -/// Isolated ShareAdd session transport. -#[derive(Clone)] -pub struct IsolatedSessionTransport { - /// Key id. - session: SessionId, - /// Key version. - version: Option, - /// Session-level nonce. - nonce: u64, - /// Holders of key version. - version_holders: Option>, - /// Consensus group. - consensus_group: Option>, - /// Id numbers of all new nodes. - id_numbers: Option>>, - /// Cluster. - cluster: Arc, -} - -impl SessionImpl -where - T: SessionTransport, -{ - /// Create new share addition session. - pub fn new(params: SessionParams) -> Result { - let key_share = params.key_storage.get(¶ms.meta.id)?; - - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - nonce: params.nonce, - key_share: key_share, - transport: params.transport, - key_storage: params.key_storage, - admin_public: params.admin_public, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - version: None, - consensus_session: None, - version_holders: None, - new_key_share: None, - id_numbers: None, - secret_subshares: None, - result: None, - }), - }) - } - - /// Set pre-established consensus data. - pub fn set_consensus_output( - &self, - version: &H256, - consensus_group: BTreeSet, - version_holders: BTreeSet, - mut new_nodes_map: BTreeMap>, - ) -> Result<(), Error> { - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::ConsensusEstablishing - || data.consensus_session.is_some() - || data.id_numbers.is_some() - || data.secret_subshares.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - // key share version is required on ShareAdd master node - if let Some(key_share) = self.core.key_share.as_ref() { - if let Ok(key_version) = key_share.version(version) { - let non_isolated_nodes = self.core.transport.nodes(); - for (node, id_number) in &key_version.id_numbers { - { - let external_id_number = new_nodes_map.get(node); - match external_id_number { - Some(&Some(ref external_id_number)) => { - if !version_holders.contains(node) { - // possible when joining version holder, that has lost its database - // and haven't reported version ownership - continue; - } - if external_id_number == id_number { - continue; - } - - return Err(Error::ConsensusUnreachable); - } - Some(&None) => (), - None => { - if non_isolated_nodes.contains(node) { - return Err(Error::ConsensusUnreachable); - } - continue; - } - } - } - - new_nodes_map.insert(node.clone(), Some(id_number.clone())); - } - - // check that all id_numbers are filled - if new_nodes_map.values().any(Option::is_none) { - return Err(Error::ConsensusUnreachable); - } - } - } - - // check passed consensus data - Self::check_nodes_map( - &self.core, - version, - &consensus_group, - &version_holders, - &new_nodes_map, - )?; - - // update data - data.version = Some(version.clone()); - data.id_numbers = Some(new_nodes_map); - data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); - data.version_holders = Some(version_holders); - - Ok(()) - } - - /// Initialize share add session on master node. - pub fn initialize( - &self, - version: Option, - new_nodes_set: Option>, - old_set_signature: Option, - new_set_signature: Option, - ) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::ConsensusEstablishing || data.consensus_session.is_some() { - return Err(Error::InvalidStateForRequest); - } - - // if consensus is pre-established => start sending ShareAdd-specific messages - let is_consensus_pre_established = data.id_numbers.is_some(); - if is_consensus_pre_established { - return Self::on_consensus_established(&self.core, &mut *data); - } - - // else => prepare to start consensus session - // require all initialization params for consensus session - let version = version.ok_or(Error::InvalidMessage)?; - let old_set_signature = old_set_signature.ok_or(Error::InvalidMessage)?; - let new_set_signature = new_set_signature.ok_or(Error::InvalidMessage)?; - let new_nodes_set = new_nodes_set.ok_or(Error::InvalidMessage)?; - let admin_public = self - .core - .admin_public - .as_ref() - .cloned() - .ok_or(Error::ConsensusUnreachable)?; - - // key share version is required on ShareAdd master node - let key_share = self - .core - .key_share - .as_ref() - .ok_or_else(|| Error::ServerKeyIsNotFound)?; - let key_version = key_share.version(&version)?; - - // old nodes set is all non-isolated owners of version holders - let non_isolated_nodes = self.core.transport.nodes(); - let old_nodes_set: BTreeSet<_> = key_version - .id_numbers - .keys() - .filter(|n| non_isolated_nodes.contains(n)) - .cloned() - .collect(); - - // new nodes map contains previous id_numbers for old nodes && random number for new nodes - let mut new_nodes_map = BTreeMap::new(); - for new_node in new_nodes_set - .into_iter() - .filter(|n| non_isolated_nodes.contains(n)) - { - new_nodes_map.insert( - new_node, - match key_version.id_numbers.get(&new_node) { - Some(old_id_number) => Some(old_id_number.clone()), - None => Some(math::generate_random_scalar()?), - }, - ); - } - - // let's select consensus group - let consensus_group: BTreeSet<_> = ::std::iter::once(self.core.meta.self_node_id.clone()) - .chain( - old_nodes_set - .iter() - .filter(|n| { - **n != self.core.meta.self_node_id && non_isolated_nodes.contains(*n) - }) - .take(key_share.threshold) - .cloned(), - ) - .collect(); - let version_holders = &old_nodes_set; - - // now check nodes map - Self::check_nodes_map( - &self.core, - &version, - &consensus_group, - version_holders, - &new_nodes_map, - )?; - - // prepare consensus session transport - let mut consensus_transport = self.core.transport.clone(); - consensus_transport.set_master_data( - consensus_group.clone(), - version_holders.clone(), - new_nodes_map.clone(), - ); - - // create && initialize consensus session - let mut consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: self - .core - .meta - .clone() - .into_consensus_meta(new_nodes_map.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_master( - admin_public, - old_nodes_set.clone(), - new_nodes_map.keys().cloned().collect(), - old_set_signature, - new_set_signature, - ), - consensus_transport: consensus_transport, - })?; - - consensus_session.initialize(new_nodes_map.keys().cloned().collect())?; - - // update data - data.version = Some(version); - data.consensus_session = Some(consensus_session); - data.id_numbers = Some(new_nodes_map); - data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); - data.version_holders = Some(version_holders.clone()); - - Ok(()) - } - - /// Process single message. - pub fn process_message(&self, sender: &NodeId, message: &ShareAddMessage) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &ShareAddMessage::ShareAddConsensusMessage(ref message) => { - self.on_consensus_message(sender, message) - } - &ShareAddMessage::KeyShareCommon(ref message) => { - self.on_common_key_share_data(sender, message) - } - &ShareAddMessage::NewKeysDissemination(ref message) => { - self.on_new_keys_dissemination(sender, message) - } - &ShareAddMessage::ShareAddError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - } - } - } - - /// When consensus-related message is received. - pub fn on_consensus_message( - &self, - sender: &NodeId, - message: &ShareAddConsensusMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // start slave consensus session if needed - let mut data = self.data.lock(); - match &message.message { - &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) - if data.consensus_session.is_none() && sender == &self.core.meta.master_node_id => - { - let admin_public = self - .core - .admin_public - .as_ref() - .cloned() - .ok_or(Error::ConsensusUnreachable)?; - data.consensus_session = Some(ConsensusSession::new(ConsensusSessionParams { - meta: self - .core - .meta - .clone() - .into_consensus_meta(message.new_nodes_map.len())?, - consensus_executor: ServersSetChangeAccessJob::new_on_slave(admin_public), - consensus_transport: self.core.transport.clone(), - })?); - } - _ => (), - }; - - // process consensus message - let ( - is_establishing_consensus, - is_consensus_established, - version, - new_nodes_map, - consensus_group, - version_holders, - ) = { - let consensus_session = data - .consensus_session - .as_mut() - .ok_or(Error::InvalidMessage)?; - let is_establishing_consensus = - consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - - let (version, new_nodes_map, consensus_group, version_holders) = match &message.message - { - &ConsensusMessageOfShareAdd::InitializeConsensusSession(ref message) => { - consensus_session.on_consensus_partial_request( - sender, - ServersSetChangeAccessRequest::from(message), - )?; - - let version = message.version.clone().into(); - let consensus_group = message - .consensus_group - .iter() - .cloned() - .map(Into::into) - .collect(); - let version_holders = message - .version_holders - .iter() - .cloned() - .map(Into::into) - .collect(); - let new_nodes_map: BTreeMap<_, _> = message - .new_nodes_map - .iter() - .map(|(n, nn)| (n.clone().into(), Some(nn.clone().into()))) - .collect(); - - // check that all id_numbers are filled - if new_nodes_map.values().any(Option::is_none) { - return Err(Error::ConsensusUnreachable); - } - - // check old set of nodes - Self::check_nodes_map( - &self.core, - &version, - &consensus_group, - &version_holders, - &new_nodes_map, - )?; - - ( - Some(version), - Some(new_nodes_map), - Some(consensus_group), - Some(version_holders), - ) - } - &ConsensusMessageOfShareAdd::ConfirmConsensusInitialization(ref message) => { - consensus_session - .on_consensus_partial_response(sender, message.is_confirmed)?; - (None, None, None, None) - } - }; - - ( - is_establishing_consensus, - consensus_session.state() == ConsensusSessionState::ConsensusEstablished, - version, - new_nodes_map, - consensus_group, - version_holders, - ) - }; - - // update data - if let Some(version) = version { - data.version = Some(version); - } - if let Some(new_nodes_map) = new_nodes_map { - data.id_numbers = Some(new_nodes_map); - } - if let Some(consensus_group) = consensus_group { - data.secret_subshares = Some(consensus_group.into_iter().map(|n| (n, None)).collect()); - } - if let Some(version_holders) = version_holders { - data.version_holders = Some(version_holders); - } - - // if consensus is stablished, proceed - if !is_establishing_consensus - || !is_consensus_established - || self.core.meta.self_node_id != self.core.meta.master_node_id - { - return Ok(()); - } - - Self::on_consensus_established(&self.core, &mut *data) - } - - /// When common key share data is received by new node. - pub fn on_common_key_share_data( - &self, - sender: &NodeId, - message: &KeyShareCommon, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - // only master can send this message - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::ConsensusEstablishing || data.id_numbers.is_none() { - return Ok(()); - } - - // we only expect this message once - if data.new_key_share.is_some() { - return Err(Error::InvalidStateForRequest); - } - - // check if we actually waiting for this message - { - let version = data.version.as_ref().ok_or(Error::InvalidStateForRequest)?; - let key_version = self - .core - .key_share - .as_ref() - .and_then(|ks| ks.version(version).ok()); - if key_version.is_some() { - return Ok(()); - } - } - - // update data - data.state = SessionState::WaitingForKeysDissemination; - data.new_key_share = Some(NewKeyShare { - threshold: message.key_common.threshold, - author: message.key_common.author.clone().into(), - joint_public: message.key_common.public.clone().into(), - common_point: message.common_point.clone().map(Into::into), - encrypted_point: message.encrypted_point.clone().map(Into::into), - }); - - let id_numbers = data.id_numbers.as_mut() - .expect("common key share data is expected after initialization; id_numbers are filled during initialization; qed"); - for (node, id_number) in &message.id_numbers { - let id_number: Secret = id_number.clone().into(); - { - let local_id_number = id_numbers.get(&node.clone().into()); - match local_id_number { - Some(&Some(ref local_id_number)) => { - if *local_id_number == id_number { - continue; - } - - return Err(Error::ConsensusUnreachable); - } - Some(&None) => (), - None => continue, // can happen for isolated nodes - } - } - - id_numbers.insert(node.clone().into(), Some(id_number)); - } - - Ok(()) - } - - /// When keys dissemination message is received. - pub fn on_new_keys_dissemination( - &self, - sender: &NodeId, - message: &NewKeysDissemination, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - // check state - if data.state == SessionState::ConsensusEstablishing && data.secret_subshares.is_some() { - data.state = SessionState::WaitingForKeysDissemination; - } else if data.state != SessionState::WaitingForKeysDissemination { - return Err(Error::InvalidStateForRequest); - } - - // update data - let explanation = "secret_subshares is filled during initialization; keys are disseminated after initialization; qed"; - { - match data - .secret_subshares - .as_ref() - .expect(explanation) - .get(sender) - { - None => return Err(Error::InvalidMessage), - Some(&Some(_)) => return Err(Error::InvalidMessage), - Some(&None) => (), - }; - - let secret_subshare = Self::compute_secret_subshare( - &self.core, - &mut *data, - sender, - &message.secret_subshare.clone().into(), - )?; - *data - .secret_subshares - .as_mut() - .expect(explanation) - .get_mut(sender) - .expect("checked couple of lines above; qed") = Some(secret_subshare); - } - - // if we have received subshare from master node, it means that we should start dissemination - if sender == &self.core.meta.master_node_id { - Self::on_consensus_established(&self.core, &mut *data)?; - } - - // check if shares from all nodes are received - if data - .secret_subshares - .as_ref() - .expect(explanation) - .values() - .any(|v| v.is_none()) - { - return Ok(()); - } - - // TODO [Trust]: find a way to verificate keys - Self::complete_session(&self.core, &mut *data) - } - - /// Check nodes map. - fn check_nodes_map( - core: &SessionCore, - version: &H256, - consensus_group: &BTreeSet, - version_holders: &BTreeSet, - new_nodes_map: &BTreeMap>, - ) -> Result<(), Error> { - // check if this node has given version - let has_this_version = match core.key_share.as_ref() { - Some(key_share) => key_share.version(version).is_ok(), - None => false, - }; - - // check && update passed data - match has_this_version { - true => { - // check if version exists - let explanation = - "has_this_version is true; it is true if we have given version of the key; qed"; - let key_share = core.key_share.as_ref().expect(explanation); - let key_version = key_share.version(version).expect(explanation); - - // there must be exactly thresold + 1 nodes in consensus group - if consensus_group.len() != key_share.threshold + 1 { - return Err(Error::ConsensusUnreachable); - } - - // every non-isolated node must be a part of new_nodes_set - let non_isolated_nodes = core.transport.nodes(); - if key_version - .id_numbers - .keys() - .any(|n| non_isolated_nodes.contains(n) && !new_nodes_map.contains_key(n)) - { - return Err(Error::ConsensusUnreachable); - } - - // there must be at least one new node in new_nodes_map - if key_version - .id_numbers - .keys() - .filter(|n| non_isolated_nodes.contains(n) && version_holders.contains(n)) - .count() - >= new_nodes_map.len() - { - return Err(Error::ConsensusUnreachable); - } - } - false => { - // if we do not have a share, we should not be a part of consenus group - // but we must be on new nodes set, since this is a ShareAdd session - if consensus_group.contains(&core.meta.self_node_id) - || !new_nodes_map.contains_key(&core.meta.self_node_id) - { - return Err(Error::ConsensusUnreachable); - } - } - } - - // master node must always be a part of consensus group - if !consensus_group.contains(&core.meta.master_node_id) { - return Err(Error::ConsensusUnreachable); - } - - // master node must always be a part of new_nodes_map - if !new_nodes_map.contains_key(&core.meta.master_node_id) { - return Err(Error::ConsensusUnreachable); - } - - Ok(()) - } - - /// Start sending ShareAdd-specific messages, when consensus is established. - fn on_consensus_established( - core: &SessionCore, - data: &mut SessionData, - ) -> Result<(), Error> { - // update state - data.state = SessionState::WaitingForKeysDissemination; - - // if we're not a part of consensus group, wait for secret subshares - let explanation = - "secret_subshares is a result of consensus job; consensus is established; qed"; - let is_consensus_group_node = data - .secret_subshares - .as_ref() - .expect(explanation) - .contains_key(&core.meta.self_node_id); - if !is_consensus_group_node { - return Ok(()); - } - - // else if master => send shared data to every new node - if core.meta.self_node_id == core.meta.master_node_id { - Self::disseminate_common_share_data(core, data)?; - } - - // ...and then disseminate keys - Self::disseminate_keys(core, data)?; - - // ..and check if session could be completed - if data - .secret_subshares - .as_ref() - .expect(explanation) - .values() - .any(|v| v.is_none()) - { - return Ok(()); - } - - // TODO [Trust]: find a way to verificate keys - Self::complete_session(core, data) - } - - /// Send common share data to evey new node. - fn disseminate_common_share_data( - core: &SessionCore, - data: &SessionData, - ) -> Result<(), Error> { - let explanation = "disseminate_common_share_data is only called on master node; master node has specified version of the key; qed"; - let old_key_share = core.key_share.as_ref().expect(explanation); - let old_key_version = old_key_share - .version(data.version.as_ref().expect(explanation)) - .expect(explanation); - let version_holders = data.version_holders.as_ref() - .expect("disseminate_common_share_data is only called on master node; version holders is created during initialization on master node; qed"); - let consensus_group = data.secret_subshares.as_ref() - .expect("disseminate_common_share_data is only called on master node; consensus group is created during initialization on master node; qed"); - let nodes = data.id_numbers.as_ref() - .expect("nodes are filled during consensus establishing; common share data sent after consensus is established; qed") - .keys() - .filter(|n| !consensus_group.contains_key(n)); - for new_node in nodes { - core.transport.send( - new_node, - ShareAddMessage::KeyShareCommon(KeyShareCommon { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - key_common: CommonKeyData { - threshold: old_key_share.threshold, - author: old_key_share.author.into(), - public: old_key_share.public.into(), - }, - common_point: old_key_share.common_point.clone().map(Into::into), - encrypted_point: old_key_share.encrypted_point.clone().map(Into::into), - id_numbers: old_key_version - .id_numbers - .iter() - .filter(|&(k, _)| version_holders.contains(k)) - .map(|(k, v)| (k.clone().into(), v.clone().into())) - .collect(), - }), - )?; - } - - Ok(()) - } - - /// Disseminate key refreshing data. - fn disseminate_keys(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - // generate random polynom with secret share as absolute term - let explanation = "disseminate_keys is only called on consensus group nodes; consensus group nodes have specified version of the key; qed"; - let key_share = core.key_share.as_ref().expect(explanation); - let key_version = key_share - .version(data.version.as_ref().expect(explanation)) - .expect(explanation); - let mut secret_share_polynom = math::generate_random_polynom(key_share.threshold)?; - secret_share_polynom[0] = key_version.secret_share.clone(); - - // calculate secret subshare for every new node (including this node) - let explanation = "disseminate_keys is called after initialization has completed; this field is filled during initialization; qed"; - for (new_node, new_node_number) in data.id_numbers.as_ref().expect(explanation).iter() { - let new_node_number = new_node_number.as_ref().ok_or(Error::InvalidMessage)?; - let secret_subshare = math::compute_polynom(&secret_share_polynom, new_node_number)?; - if new_node != &core.meta.self_node_id { - core.transport.send( - new_node, - ShareAddMessage::NewKeysDissemination(NewKeysDissemination { - session: core.meta.id.clone().into(), - session_nonce: core.nonce, - secret_subshare: secret_subshare.into(), - }), - )?; - } else { - let secret_subshare = - Self::compute_secret_subshare(core, data, new_node, &secret_subshare)?; - *data.secret_subshares.as_mut().expect(explanation) - .get_mut(&core.meta.self_node_id) - .expect("disseminate_keys is only calle on consensus group nodes; there's entry for every consensus node in secret_subshares; qed") - = Some(secret_subshare); - } - } - - Ok(()) - } - - /// Compute secret subshare from passed secret value. - fn compute_secret_subshare( - core: &SessionCore, - data: &SessionData, - sender: &NodeId, - secret_value: &Secret, - ) -> Result { - let explanation = "this field is a result of consensus job; compute_secret_subshare is called after consensus is established"; - let id_numbers = data.id_numbers.as_ref().expect(explanation); - let secret_subshares = data.secret_subshares.as_ref().expect(explanation); - let threshold = core - .key_share - .as_ref() - .map(|ks| ks.threshold) - .unwrap_or_else(|| { - data.new_key_share.as_ref() - .expect("computation occurs after receiving key share threshold if not having one already; qed") - .threshold - }); - - let explanation = "id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"; - let sender_id_number = id_numbers[sender].as_ref().expect(explanation); - let other_id_numbers = secret_subshares - .keys() - .filter(|k| *k != sender) - .map(|n| id_numbers[n].as_ref().expect(explanation)); - math::compute_secret_subshare(threshold, secret_value, sender_id_number, other_id_numbers) - } - - /// Complete session. - fn complete_session(core: &SessionCore, data: &mut SessionData) -> Result<(), Error> { - // if already completed, do nothing - if data.state == SessionState::Finished { - return Ok(()); - } - - // compose updated key share - let explanation = "this field is a result of consensus job; complete_session is called after consensus is established"; - let id_numbers = data.id_numbers.as_ref().expect(explanation); - let secret_subshares = data.secret_subshares.as_ref() - .expect("nodes are filled during consensus establishing; session is completed after consensus is established; qed"); - let secret_share = math::compute_secret_share(secret_subshares.values().map(|ss| { - ss.as_ref().expect( - "complete_session is only called when subshares from all nodes are received; qed", - ) - }))?; - - let refreshed_key_version = DocumentKeyShareVersion::new(id_numbers.clone().into_iter().map(|(k, v)| (k.clone(), - v.expect("id_numbers are checked to have Some value for every consensus group node when consensus is establishe; qed"))).collect(), - secret_share); - let mut refreshed_key_share = core.key_share.as_ref().cloned().unwrap_or_else(|| { - let new_key_share = data.new_key_share.as_ref() - .expect("this is new node; on new nodes this field is filled before KRD; session is completed after KRD; qed"); - DocumentKeyShare { - author: new_key_share.author.clone(), - threshold: new_key_share.threshold, - public: new_key_share.joint_public.clone(), - common_point: new_key_share.common_point.clone(), - encrypted_point: new_key_share.encrypted_point.clone(), - versions: Vec::new(), - } - }); - refreshed_key_share.versions.push(refreshed_key_version); - - // save encrypted data to the key storage - data.state = SessionState::Finished; - if core.key_share.is_some() { - core.key_storage - .update(core.meta.id.clone(), refreshed_key_share.clone())?; - } else { - core.key_storage - .insert(core.meta.id.clone(), refreshed_key_share.clone())?; - } - - // signal session completion - data.state = SessionState::Finished; - data.result = Some(Ok(())); - core.completed.notify_all(); - - Ok(()) - } -} - -impl ClusterSession for SessionImpl -where - T: SessionTransport, -{ - type Id = SessionId; - - fn type_name() -> &'static str { - "share add" - } - - fn id(&self) -> SessionId { - self.core.meta.id.clone() - } - - fn is_finished(&self) -> bool { - self.data.lock().state == SessionState::Finished - } - - fn on_session_timeout(&self) { - self.on_session_error(&self.core.meta.self_node_id, Error::NodeDisconnected) - } - - fn on_node_timeout(&self, node: &NodeId) { - self.on_session_error(node, Error::NodeDisconnected) - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.core.meta.self_node_id { - for node in self.core.transport.nodes() { - // do not bother processing send error, as we already processing error - let _ = self.core.transport.send( - &node, - ShareAddMessage::ShareAddError(ShareAddError { - session: self.core.meta.id.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }), - ); - } - } - - let mut data = self.data.lock(); - - warn!(target: "secretstore_net", "{}: share add session failed: {} on {}", - self.core.meta.self_node_id, error, node); - - data.state = SessionState::Finished; - data.result = Some(Err(error)); - self.core.completed.notify_all(); - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::ShareAdd(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl IsolatedSessionTransport { - pub fn new( - session_id: SessionId, - version: Option, - nonce: u64, - cluster: Arc, - ) -> Self { - IsolatedSessionTransport { - session: session_id, - version: version, - nonce: nonce, - cluster: cluster, - id_numbers: None, - version_holders: None, - consensus_group: None, - } - } -} - -impl JobTransport for IsolatedSessionTransport { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; - - fn send_partial_request( - &self, - node: &NodeId, - request: ServersSetChangeAccessRequest, - ) -> Result<(), Error> { - let explanation = "partial requests are sent from master node only; on master node this field is filled during creation; qed"; - let id_numbers = self.id_numbers.as_ref().expect(explanation); - - self.cluster.send(node, Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage(ShareAddConsensusMessage { - session: self.session.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageOfShareAdd::InitializeConsensusSession(InitializeConsensusSessionOfShareAdd { - version: self.version.clone().expect(explanation).into(), - version_holders: self.version_holders.as_ref().expect(explanation).iter().cloned().map(Into::into).collect(), - consensus_group: self.consensus_group.as_ref().expect(explanation).iter().cloned().map(Into::into).collect(), - old_nodes_set: request.old_servers_set.into_iter().map(Into::into).collect(), - new_nodes_map: request.new_servers_set.into_iter() - .filter_map(|n| id_numbers.get(&n) - .map(|id| (n.into(), id.clone() - .expect("partial requests are sent from master node only after consensus is established; - on master id_numbers are initialized with Some id_number for every consensus group node; qed").into()))) - .collect(), - old_set_signature: request.old_set_signature.into(), - new_set_signature: request.new_set_signature.into(), - }), - }))) - } - - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send( - node, - Message::ShareAdd(ShareAddMessage::ShareAddConsensusMessage( - ShareAddConsensusMessage { - session: self.session.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessageOfShareAdd::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { - is_confirmed: response, - }, - ), - }, - )), - ) - } -} - -impl SessionTransport for IsolatedSessionTransport { - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } - - fn set_master_data( - &mut self, - consensus_group: BTreeSet, - version_holders: BTreeSet, - id_numbers: BTreeMap>, - ) { - self.version_holders = Some(version_holders); - self.consensus_group = Some(consensus_group); - self.id_numbers = Some(id_numbers); - } - - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { - self.cluster.send(node, Message::ShareAdd(message)) - } -} - -#[cfg(test)] -pub mod tests { - use super::{IsolatedSessionTransport, SessionImpl, SessionParams}; - use ethkey::{Generator, Public, Random}; - use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, - cluster::tests::MessageLoop as ClusterMessageLoop, - servers_set_change_session::tests::{generate_key, AdminSessionAdapter, MessageLoop}, - Error, KeyStorage, NodeId, NodeKeyPair, - }; - use std::collections::BTreeSet; - - struct Adapter; - - impl AdminSessionAdapter> for Adapter { - const SIGN_NEW_NODES: bool = false; - - fn create( - mut meta: ShareChangeSessionMeta, - admin_public: Public, - _: BTreeSet, - ml: &ClusterMessageLoop, - idx: usize, - ) -> SessionImpl { - let key_storage = ml.key_storage(idx).clone(); - let key_version = key_storage - .get(&meta.id) - .unwrap() - .map(|ks| ks.last_version().unwrap().hash); - - meta.self_node_id = *ml.node_key_pair(idx).public(); - SessionImpl::new(SessionParams { - meta: meta.clone(), - transport: IsolatedSessionTransport::new( - meta.id, - key_version, - 1, - ml.cluster(idx).view().unwrap(), - ), - key_storage, - admin_public: Some(admin_public), - nonce: 1, - }) - .unwrap() - } - } - - impl MessageLoop> { - pub fn init_at(self, master: NodeId) -> Result { - self.sessions[&master].initialize( - Some(self.original_key_version), - Some(self.new_nodes_set.clone()), - Some(self.all_set_signature.clone()), - Some(self.new_set_signature.clone()), - )?; - Ok(self) - } - - pub fn run_at(self, master: NodeId) -> Result { - let mut ml = self.init_at(master)?; - ml.run(); - Ok(ml) - } - } - - #[test] - fn node_add_fails_if_nodes_removed() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // try to remove 1 node - let add = vec![Random.generate().unwrap()]; - let remove: BTreeSet<_> = ::std::iter::once(gml.0.node(1)).collect(); - let master = gml.0.node(0); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), Some(remove), None) - .run_at(master) - .unwrap_err(), - Error::ConsensusUnreachable - ); - } - - #[test] - fn node_add_fails_if_no_nodes_added() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // try to add 0 nodes - let add = vec![]; - let master = gml.0.node(0); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master) - .unwrap_err(), - Error::ConsensusUnreachable - ); - } - - #[test] - fn node_add_fails_if_started_on_adding_node() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = *add[0].public(); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master) - .unwrap_err(), - Error::ServerKeyIsNotFound - ); - } - - #[test] - fn node_add_fails_if_initialized_twice() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), None, None) - .init_at(master) - .unwrap() - .init_at(master) - .unwrap_err(), - Error::InvalidStateForRequest - ); - } - - #[test] - fn node_add_fails_if_started_without_signatures() { - // initial 2-of-3 session - let gml = generate_key(3, 1); - - // try to add 1 node using this node as a master node - let add = vec![Random.generate().unwrap()]; - let master = gml.0.node(0); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), None, None).sessions[&master] - .initialize(None, None, None, None) - .unwrap_err(), - Error::InvalidMessage - ); - } - - #[test] - fn nodes_added_using_share_add() { - let test_cases = vec![(3, 1), (3, 3)]; - for (n, add) in test_cases { - // generate key - let gml = generate_key(n, 1); - - // run share add session - let add = (0..add).map(|_| Random.generate().unwrap()).collect(); - let master = gml.0.node(0); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, None) - .run_at(master) - .unwrap(); - - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); - } - } - - #[test] - fn nodes_added_using_share_add_with_isolated_nodes() { - let (n, add) = (3, 3); - - // generate key - let gml = generate_key(n, 1); - - // run share add session - let master = gml.0.node(0); - let node_to_isolate = gml.0.node(1); - let add = (0..add).map(|_| Random.generate().unwrap()).collect(); - let isolate = ::std::iter::once(node_to_isolate).collect(); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master) - .unwrap(); - - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); - } - - #[test] - fn nodes_add_to_the_node_with_obsolete_version() { - let (n, add) = (3, 3); - - // generate key - let gml = generate_key(n, 1); - - // run share add session - let master = gml.0.node(0); - let node_to_isolate_key_pair = gml.0.node_key_pair(1).clone(); - let node_to_isolate = gml.0.node(1); - let isolated_key_storage = gml.0.key_storage(1).clone(); - let mut oldest_nodes_set = gml.0.nodes(); - oldest_nodes_set.remove(&node_to_isolate); - let add = (0..add) - .map(|_| Random.generate().unwrap()) - .collect::>(); - let newest_nodes_set = add.iter().map(|kp| *kp.public()).collect::>(); - let isolate = ::std::iter::once(node_to_isolate).collect(); - let ml = MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master) - .unwrap(); - let new_key_version = ml - .ml - .key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .last_version() - .unwrap() - .hash; - - // now let's add back old node so that key becames 2-of-6 - let add = vec![node_to_isolate_key_pair.key_pair().clone()]; - let mut ml = ml.and_then::(master.clone(), Some(add), None, None); - ml.original_key_version = new_key_version; - ml.ml - .replace_key_storage_of(&node_to_isolate, isolated_key_storage.clone()); - ml.sessions - .get_mut(&node_to_isolate) - .unwrap() - .core - .key_share = isolated_key_storage.get(&Default::default()).unwrap(); - ml.sessions - .get_mut(&node_to_isolate) - .unwrap() - .core - .key_storage = isolated_key_storage; - let ml = ml.run_at(master).unwrap(); - - // check that secret is still the same as before adding the share - ml.check_secret_is_preserved(ml.sessions.keys()); - - // check that all oldest nodes have versions A, B, C - // isolated node has version A, C - // new nodes have versions B, C - let oldest_key_share = ml - .ml - .key_storage_of(oldest_nodes_set.iter().nth(0).unwrap()) - .get(&Default::default()) - .unwrap() - .unwrap(); - debug_assert_eq!(oldest_key_share.versions.len(), 3); - let version_a = oldest_key_share.versions[0].hash.clone(); - let version_b = oldest_key_share.versions[1].hash.clone(); - let version_c = oldest_key_share.versions[2].hash.clone(); - debug_assert!(version_a != version_b && version_b != version_c); - - debug_assert!(oldest_nodes_set.iter().all(|n| vec![ - version_a.clone(), - version_b.clone(), - version_c.clone() - ] == ml - .ml - .key_storage_of(n) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .map(|v| v.hash) - .collect::>())); - debug_assert!(::std::iter::once(&node_to_isolate).all(|n| vec![ - version_a.clone(), - version_c.clone() - ] == ml - .ml - .key_storage_of(n) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .map(|v| v.hash) - .collect::>())); - debug_assert!(newest_nodes_set - .iter() - .all(|n| vec![version_b.clone(), version_c.clone()] - == ml - .ml - .key_storage_of(n) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .map(|v| v.hash) - .collect::>())); - } - - #[test] - fn nodes_add_fails_when_not_enough_share_owners_are_connected() { - let (n, add) = (3, 3); - - // generate key - let gml = generate_key(n, 1); - - // run share add session - let master = gml.0.node(0); - let add = (0..add) - .map(|_| Random.generate().unwrap()) - .collect::>(); - let isolate = vec![gml.0.node(1), gml.0.node(2)].into_iter().collect(); - assert_eq!( - MessageLoop::with_gml::(gml, master, Some(add), None, Some(isolate)) - .run_at(master) - .unwrap_err(), - Error::ConsensusUnreachable - ); - } -} diff --git a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs b/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs deleted file mode 100644 index 336a2e0d8..000000000 --- a/secret-store/src/key_server_cluster/admin_sessions/share_change_session.rs +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::H256; -use ethkey::Secret; -use key_server_cluster::{ - admin_sessions::ShareChangeSessionMeta, - cluster::Cluster, - cluster_sessions::ClusterSession, - jobs::{ - job_session::JobTransport, servers_set_change_access_job::ServersSetChangeAccessRequest, - }, - math, - message::{Message, ServersSetChangeMessage, ServersSetChangeShareAddMessage, ShareAddMessage}, - share_add_session::{ - SessionImpl as ShareAddSessionImpl, SessionParams as ShareAddSessionParams, - SessionTransport as ShareAddSessionTransport, - }, - Error, KeyStorage, NodeId, ServerKeyId, SessionId, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -/// Single session meta-change session. Brief overview: -/// 1) nodes that have been already removed from cluster (isolated nodes) are removed from session -/// 2) new shares are added to the session -/// 3) shares are moved between nodes -/// 4) shares are removed from nodes -pub struct ShareChangeSession { - /// Servers set change session id. - session_id: SessionId, - /// Session nonce. - nonce: u64, - /// Share change session meta. - meta: ShareChangeSessionMeta, - /// Cluster. - cluster: Arc, - /// Key storage. - key_storage: Arc, - /// Key version. - key_version: H256, - /// Nodes that have reported version ownership. - version_holders: Option>, - /// Consensus group to use in ShareAdd session. - consensus_group: Option>, - /// Nodes to add shares for. - new_nodes_map: Option>>, - /// Share add session. - share_add_session: Option>, - /// Is finished. - is_finished: bool, -} - -/// Share change session plan. -#[derive(Debug)] -pub struct ShareChangeSessionPlan { - /// Key version that plan is valid for. - pub key_version: H256, - /// Nodes that have reported version ownership. - pub version_holders: BTreeSet, - /// Consensus group to use in ShareAdd session. - pub consensus_group: BTreeSet, - /// Nodes to add shares for. - pub new_nodes_map: BTreeMap>, -} - -/// Session parameters. -pub struct ShareChangeSessionParams { - /// Servers set change session id. - pub session_id: SessionId, - /// Session nonce. - pub nonce: u64, - /// Share change session meta. - pub meta: ShareChangeSessionMeta, - /// Cluster. - pub cluster: Arc, - /// Keys storage. - pub key_storage: Arc, - /// Session plan. - pub plan: ShareChangeSessionPlan, -} - -/// Share add session transport. -#[derive(Clone)] -pub struct ShareChangeTransport { - /// Servers set change session id. - session_id: SessionId, - /// Session nonce. - nonce: u64, - /// Cluster. - cluster: Arc, -} - -impl ShareChangeSession { - /// Create new share change session. - pub fn new(params: ShareChangeSessionParams) -> Result { - // we can't create sessions right now, because key share is read when session is created, but it can change in previous session - let key_version = params.plan.key_version; - let consensus_group = if !params.plan.consensus_group.is_empty() { - Some(params.plan.consensus_group) - } else { - None - }; - let version_holders = if !params.plan.version_holders.is_empty() { - Some(params.plan.version_holders) - } else { - None - }; - let new_nodes_map = if !params.plan.new_nodes_map.is_empty() { - Some(params.plan.new_nodes_map) - } else { - None - }; - debug_assert!(new_nodes_map.is_some()); - - let is_finished = new_nodes_map.is_none(); - Ok(ShareChangeSession { - session_id: params.session_id, - nonce: params.nonce, - meta: params.meta, - cluster: params.cluster, - key_storage: params.key_storage, - key_version: key_version, - version_holders: version_holders, - consensus_group: consensus_group, - new_nodes_map: new_nodes_map, - share_add_session: None, - is_finished: is_finished, - }) - } - - /// Is finished?. - pub fn is_finished(&self) -> bool { - self.is_finished - } - - /// Is master node?. - pub fn is_master(&self) -> bool { - self.meta.self_node_id == self.meta.master_node_id - } - - /// Initialize session (on master node). - pub fn initialize(&mut self) -> Result<(), Error> { - self.proceed_to_next_state() - } - - /// When share-add message is received. - pub fn on_share_add_message( - &mut self, - sender: &NodeId, - message: &ShareAddMessage, - ) -> Result<(), Error> { - if self.share_add_session.is_none() { - self.create_share_add_session()?; - } - - let change_state_needed = self - .share_add_session - .as_ref() - .map(|share_add_session| { - let was_finished = share_add_session.is_finished(); - share_add_session - .process_message(sender, message) - .map(|_| share_add_session.is_finished() && !was_finished) - }) - .unwrap_or(Err(Error::InvalidMessage))?; - if change_state_needed { - self.proceed_to_next_state()?; - } - - Ok(()) - } - - /// Create new share add session. - fn create_share_add_session(&mut self) -> Result<(), Error> { - let consensus_group = self - .consensus_group - .take() - .ok_or(Error::InvalidStateForRequest)?; - let version_holders = self - .version_holders - .take() - .ok_or(Error::InvalidStateForRequest)?; - let new_nodes_map = self - .new_nodes_map - .take() - .ok_or(Error::InvalidStateForRequest)?; - let share_add_session = ShareAddSessionImpl::new(ShareAddSessionParams { - meta: self.meta.clone(), - nonce: self.nonce, - transport: ShareChangeTransport::new(self.session_id, self.nonce, self.cluster.clone()), - key_storage: self.key_storage.clone(), - admin_public: None, - })?; - share_add_session.set_consensus_output( - &self.key_version, - consensus_group, - version_holders, - new_nodes_map, - )?; - self.share_add_session = Some(share_add_session); - Ok(()) - } - - /// Proceed to the next state. - fn proceed_to_next_state(&mut self) -> Result<(), Error> { - if self.meta.self_node_id != self.meta.master_node_id { - if self.new_nodes_map.is_none() { - self.is_finished = true; - } - return Ok(()); - } - - if self.new_nodes_map.is_some() { - self.create_share_add_session()?; - return self - .share_add_session - .as_ref() - .expect("either create_share_add_session fails, or session is created; qed") - .initialize(None, None, None, None); - } - - self.is_finished = true; - - Ok(()) - } -} - -impl ShareChangeTransport { - pub fn new(session_id: SessionId, nonce: u64, cluster: Arc) -> Self { - ShareChangeTransport { - session_id: session_id, - nonce: nonce, - cluster: cluster, - } - } -} - -impl JobTransport for ShareChangeTransport { - type PartialJobRequest = ServersSetChangeAccessRequest; - type PartialJobResponse = bool; - - fn send_partial_request( - &self, - _node: &NodeId, - _request: ServersSetChangeAccessRequest, - ) -> Result<(), Error> { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } - - fn send_partial_response(&self, _node: &NodeId, _response: bool) -> Result<(), Error> { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } -} - -impl ShareAddSessionTransport for ShareChangeTransport { - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } - - fn set_master_data( - &mut self, - _consensus_group: BTreeSet, - _version_holders: BTreeSet, - _id_numbers: BTreeMap>, - ) { - unreachable!("only called when establishing consensus; this transport is never used for establishing consensus; qed") - } - - fn send(&self, node: &NodeId, message: ShareAddMessage) -> Result<(), Error> { - self.cluster.send( - node, - Message::ServersSetChange(ServersSetChangeMessage::ServersSetChangeShareAddMessage( - ServersSetChangeShareAddMessage { - session: self.session_id.clone().into(), - session_nonce: self.nonce, - message: message, - }, - )), - ) - } -} - -/// Prepare share change plan for moving from old `old_key_version_owners` to `new_nodes_set`. -pub fn prepare_share_change_session_plan( - cluster_nodes: &BTreeSet, - threshold: usize, - key_id: &ServerKeyId, - key_version: H256, - master: &NodeId, - old_key_version_owners: &BTreeSet, - new_nodes_set: &BTreeSet, -) -> Result { - // we can't do anything if there are no enought shares - if old_key_version_owners.len() < threshold + 1 { - warn!( - "cannot add shares to key {} with threshold {}: only {} shares owners are available", - key_id, - threshold, - old_key_version_owners.len() - ); - return Ok(ShareChangeSessionPlan { - key_version: key_version, - version_holders: Default::default(), - consensus_group: Default::default(), - new_nodes_map: Default::default(), - }); - } - - // warn if we're loosing the key - if new_nodes_set.len() < threshold + 1 { - warn!( - "losing key {} with threshold {}: only {} nodes left after servers set change session", - key_id, - threshold, - new_nodes_set.len() - ); - } - - // make new nodes map, so that: - // all non-isolated old nodes will have their id number preserved - // all new nodes will have new id number - let mut new_nodes_map = new_nodes_set - .difference(&old_key_version_owners) - .map(|n| math::generate_random_scalar().map(|id| (n.clone(), Some(id)))) - .collect::, _>>()?; - if !new_nodes_map.is_empty() { - for old_node in old_key_version_owners - .iter() - .filter(|n| cluster_nodes.contains(n)) - { - new_nodes_map.insert(old_node.clone(), None); - } - } - - // select consensus group if there are some nodes to add - let consensus_group = if !new_nodes_map.is_empty() { - ::std::iter::once(master.clone()) - .chain( - old_key_version_owners - .iter() - .filter(|n| *n != master && cluster_nodes.contains(*n)) - .take(threshold) - .cloned(), - ) - .collect() - } else { - BTreeSet::new() - }; - - Ok(ShareChangeSessionPlan { - key_version: key_version, - version_holders: old_key_version_owners.clone(), - consensus_group: consensus_group, - new_nodes_map: new_nodes_map, - }) -} - -impl ShareChangeSessionPlan { - /// Is empty (nothing-to-do) plan? - pub fn is_empty(&self) -> bool { - self.new_nodes_map.is_empty() - } -} - -#[cfg(test)] -mod tests { - use super::prepare_share_change_session_plan; - use key_server_cluster::math; - - #[test] - fn share_change_plan_creates_empty_plan() { - let cluster_nodes: Vec<_> = (0..3) - .map(|_| math::generate_random_point().unwrap()) - .collect(); - let master = cluster_nodes[0].clone(); - let old_key_version_owners = cluster_nodes.iter().cloned().collect(); - let new_nodes_set = cluster_nodes.iter().cloned().collect(); - let plan = prepare_share_change_session_plan( - &cluster_nodes.iter().cloned().collect(), - 1, - &Default::default(), - Default::default(), - &master, - &old_key_version_owners, - &new_nodes_set, - ) - .unwrap(); - - assert!(plan.is_empty()); - } - - #[test] - fn share_change_plan_adds_new_nodes() { - let cluster_nodes: Vec<_> = (0..3) - .map(|_| math::generate_random_point().unwrap()) - .collect(); - let master = cluster_nodes[0].clone(); - let old_key_version_owners = cluster_nodes[0..2].iter().cloned().collect(); - let new_nodes_set = cluster_nodes.iter().cloned().collect(); - let plan = prepare_share_change_session_plan( - &cluster_nodes.iter().cloned().collect(), - 1, - &Default::default(), - Default::default(), - &master, - &old_key_version_owners, - &new_nodes_set, - ) - .unwrap(); - - assert!(!plan.is_empty()); - assert_eq!(old_key_version_owners, plan.consensus_group); - assert_eq!(new_nodes_set, plan.new_nodes_map.keys().cloned().collect()); - } -} diff --git a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs deleted file mode 100644 index 6cd01b606..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/decryption_session.rs +++ /dev/null @@ -1,2211 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::{Address, H256}; -use ethkey::Secret; -use key_server_cluster::{ - cluster::Cluster, - cluster_sessions::{ClusterSession, SessionIdWithSubSession}, - jobs::{ - consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, - decryption_job::{DecryptionJob, PartialDecryptionRequest, PartialDecryptionResponse}, - job_session::{JobSession, JobSessionState, JobTransport}, - key_access_job::KeyAccessJob, - }, - message::{ - ConfirmConsensusInitialization, ConsensusMessage, DecryptionConsensusMessage, - DecryptionMessage, DecryptionSessionCompleted, DecryptionSessionDelegation, - DecryptionSessionDelegationCompleted, DecryptionSessionError, InitializeConsensusSession, - Message, PartialDecryption, RequestPartialDecryption, - }, - AclStorage, DocumentKeyShare, EncryptedDocumentKeyShadow, Error, NodeId, Requester, SessionId, - SessionMeta, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, - time, -}; - -/// Distributed decryption session. -/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: -/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf -/// Brief overview: -/// 1) initialization: master node (which has received request for decrypting the secret) requests all other nodes to decrypt the secret -/// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document -/// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption -/// 4) decryption: master node receives all partial decryptions of the secret and restores the secret -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, -} - -/// Immutable session data. -struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Decryption session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Decryption consensus session type. -type DecryptionConsensusSession = ConsensusSession< - KeyAccessJob, - DecryptionConsensusTransport, - DecryptionJob, - DecryptionJobTransport, ->; -/// Broadcast decryption job session type. -type BroadcastDecryptionJobSession = JobSession; - -/// Mutable session data. -struct SessionData { - /// Key version to use for decryption. - pub version: Option, - /// Session origin (if any). - pub origin: Option
, - /// Consensus-based decryption session. - pub consensus_session: DecryptionConsensusSession, - /// Broadcast decryption job. - pub broadcast_job_session: Option, - /// Is shadow decryption requested? - pub is_shadow_decryption: Option, - /// Decryption result must be reconstructed on all participating nodes. This is useful - /// for service contract API so that all nodes from consensus group can confirm decryption. - pub is_broadcast_session: Option, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster. - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, -} - -/// Decryption consensus transport. -struct DecryptionConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Session origin (if any). - origin: Option
, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, -} - -/// Decryption job transport -struct DecryptionJobTransport { - /// Session id. - id: SessionId, - //// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Is this a broadcast transport? If true, requests are not send and responses are sent only to non-master nodes. - is_broadcast_transport: bool, - /// Master node id. - master_node_id: NodeId, - /// Cluster. - cluster: Arc, -} - -/// Session delegation status. -enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), -} - -impl SessionImpl { - /// Create new decryption session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!( - params.meta.threshold, - params - .key_share - .as_ref() - .map(|ks| ks.threshold) - .unwrap_or_default() - ); - - // check that common_point and encrypted_point are already set - if let Some(key_share) = params.key_share.as_ref() { - // encrypted data must be set - if key_share.common_point.is_none() || key_share.encrypted_point.is_none() { - return Err(Error::DocumentKeyIsNotFound); - } - } - - let consensus_transport = DecryptionConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - origin: None, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: params.meta.clone(), - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master( - params.meta.id.clone(), - params.acl_storage.clone(), - requester, - ), - None => { - KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) - } - }, - consensus_transport: consensus_transport, - })?; - - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - version: None, - origin: None, - consensus_session: consensus_session, - broadcast_job_session: None, - is_shadow_decryption: None, - is_broadcast_session: None, - delegation_status: None, - result: None, - }), - }) - } - - /// Get this node id. - #[cfg(test)] - pub fn node(&self) -> &NodeId { - &self.core.meta.self_node_id - } - - /// Get this session access key. - #[cfg(test)] - pub fn access_key(&self) -> &Secret { - &self.core.access_key - } - - /// Get session state. - #[cfg(test)] - pub fn state(&self) -> ConsensusSessionState { - self.data.lock().consensus_session.state() - } - - /// Get decrypted secret - #[cfg(test)] - pub fn decrypted_secret(&self) -> Option> { - self.data.lock().result.clone() - } - - /// Get key requester. - pub fn requester(&self) -> Option { - self.data - .lock() - .consensus_session - .consensus_job() - .executor() - .requester() - .cloned() - } - - /// Get session origin. - pub fn origin(&self) -> Option
{ - self.data.lock().origin.clone() - } - - /// Wait for session completion. - pub fn wait( - &self, - timeout: Option, - ) -> Option> { - Self::wait_session(&self.core.completed, &self.data, timeout, |data| { - data.result.clone() - }) - } - - /// Get broadcasted shadows. - pub fn broadcast_shadows(&self) -> Option>> { - let data = self.data.lock(); - - if data.result.is_none() - || (data.is_broadcast_session, data.is_shadow_decryption) != (Some(true), Some(true)) - { - return None; - } - - let proof = "data.is_shadow_decryption is true; decrypt_shadow.is_some() is checked in DecryptionJob::check_partial_response; qed"; - Some(match self.core.meta.master_node_id == self.core.meta.self_node_id { - true => data.consensus_session.computation_job().responses().iter() - .map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof))) - .collect(), - false => data.broadcast_job_session.as_ref().expect("session completed; is_shadow_decryption == true; we're on non-master node; qed").responses().iter() - .map(|(n, r)| (n.clone(), r.decrypt_shadow.clone().expect(proof))) - .collect(), - }) - } - - /// Delegate session to other node. - pub fn delegate( - &self, - master: NodeId, - origin: Option
, - version: H256, - is_shadow_decryption: bool, - is_broadcast_session: bool, - ) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(false); - self.core.cluster.send(&master, Message::Decryption(DecryptionMessage::DecryptionSessionDelegation(DecryptionSessionDelegation { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - origin: origin.map(Into::into), - requester: data.consensus_session.consensus_job().executor().requester() - .expect("signature is passed to master node on creation; session can be delegated from master node only; qed") - .clone().into(), - version: version.into(), - is_shadow_decryption: is_shadow_decryption, - is_broadcast_session: is_broadcast_session, - })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) - } - - /// Initialize decryption session on master node. - pub fn initialize( - &self, - origin: Option
, - version: H256, - is_shadow_decryption: bool, - is_broadcast_session: bool, - ) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; - - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version - .id_numbers - .keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = - data.delegation_status.as_ref() - { - consensus_nodes.remove(&delegation_master); - } - - data.consensus_session - .consensus_job_mut() - .transport_mut() - .version = Some(version.clone()); - data.consensus_session - .consensus_job_mut() - .transport_mut() - .origin = origin.clone(); - data.origin = origin; - data.version = Some(version.clone()); - data.is_shadow_decryption = Some(is_shadow_decryption); - data.is_broadcast_session = Some(is_broadcast_session); - data.consensus_session.initialize(consensus_nodes)?; - - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - Self::disseminate_jobs( - &self.core, - &mut *data, - &version, - is_shadow_decryption, - is_broadcast_session, - )?; - - debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); - let result = data.consensus_session.result()?; - Self::set_decryption_result(&self.core, &mut *data, Ok(result)); - } - - Ok(()) - } - - /// Process decryption message. - pub fn process_message( - &self, - sender: &NodeId, - message: &DecryptionMessage, - ) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &DecryptionMessage::DecryptionConsensusMessage(ref message) => { - self.on_consensus_message(sender, message) - } - &DecryptionMessage::RequestPartialDecryption(ref message) => { - self.on_partial_decryption_requested(sender, message) - } - &DecryptionMessage::PartialDecryption(ref message) => { - self.on_partial_decryption(sender, message) - } - &DecryptionMessage::DecryptionSessionError(ref message) => { - self.process_node_error(Some(&sender), message.error.clone()) - } - &DecryptionMessage::DecryptionSessionCompleted(ref message) => { - self.on_session_completed(sender, message) - } - &DecryptionMessage::DecryptionSessionDelegation(ref message) => { - self.on_session_delegated(sender, message) - } - &DecryptionMessage::DecryptionSessionDelegationCompleted(ref message) => { - self.on_session_delegation_completed(sender, message) - } - } - } - - /// When session is delegated to this node. - pub fn on_session_delegated( - &self, - sender: &NodeId, - message: &DecryptionSessionDelegation, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom( - sender.clone(), - message.session_nonce, - )); - } - - self.initialize( - message.origin.clone().map(Into::into), - message.version.clone().into(), - message.is_shadow_decryption, - message.is_broadcast_session, - ) - } - - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed( - &self, - sender: &NodeId, - message: &DecryptionSessionDelegationCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } - - Self::set_decryption_result( - &self.core, - &mut *data, - Ok(EncryptedDocumentKeyShadow { - decrypted_secret: message.decrypted_secret.clone().into(), - common_point: message.common_point.clone().map(Into::into), - decrypt_shadows: message.decrypt_shadows.clone().map(Into::into), - }), - ); - - Ok(()) - } - - /// When consensus-related message is received. - pub fn on_consensus_message( - &self, - sender: &NodeId, - message: &DecryptionConsensusMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - let mut data = self.data.lock(); - let is_establishing_consensus = - data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self - .core - .key_share - .as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(has_key_share); - data.version = Some(version); - data.origin = message.origin.clone().map(Into::into); - } - data.consensus_session - .on_consensus_message(&sender, &message.message)?; - - let is_consensus_established = - data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id - || !is_establishing_consensus - || !is_consensus_established - { - return Ok(()); - } - - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let is_shadow_decryption = data.is_shadow_decryption - .expect("we are on master node; on master node is_shadow_decryption is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed"); - let is_broadcast_session = data.is_broadcast_session - .expect("we are on master node; on master node is_broadcast_session is filled in initialize(); on_consensus_message follows initialize (state check in consensus_session); qed"); - Self::disseminate_jobs( - &self.core, - &mut *data, - &version, - is_shadow_decryption, - is_broadcast_session, - ) - } - - /// When partial decryption is requested. - pub fn on_partial_decryption_requested( - &self, - sender: &NodeId, - message: &RequestPartialDecryption, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let mut data = self.data.lock(); - let key_version = key_share - .version(data.version.as_ref().ok_or(Error::InvalidMessage)?)? - .hash - .clone(); - let requester_public = data - .consensus_session - .consensus_job() - .executor() - .requester() - .ok_or(Error::InvalidStateForRequest)? - .public(&self.core.meta.id) - .map_err(Error::InsufficientRequesterData)?; - let decryption_job = DecryptionJob::new_on_slave( - self.core.meta.self_node_id.clone(), - self.core.access_key.clone(), - requester_public.clone(), - key_share.clone(), - key_version, - )?; - let decryption_transport = self.core.decryption_transport(false); - - // update flags if not on master - if self.core.meta.self_node_id != self.core.meta.master_node_id { - data.is_shadow_decryption = Some(message.is_shadow_decryption); - data.is_broadcast_session = Some(message.is_broadcast_session); - } - - // respond to request - let partial_decryption = data.consensus_session.on_job_request( - sender, - PartialDecryptionRequest { - id: message.request_id.clone().into(), - is_shadow_decryption: message.is_shadow_decryption, - is_broadcast_session: message.is_broadcast_session, - other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), - }, - decryption_job, - decryption_transport, - )?; - - // ...and prepare decryption job session if we need to broadcast result - if message.is_broadcast_session { - let consensus_group: BTreeSet<_> = - message.nodes.iter().cloned().map(Into::into).collect(); - let broadcast_decryption_job = DecryptionJob::new_on_master( - self.core.meta.self_node_id.clone(), - self.core.access_key.clone(), - requester_public, - key_share.clone(), - key_version, - message.is_shadow_decryption, - message.is_broadcast_session, - )?; - Self::create_broadcast_decryption_job( - &self.core, - &mut *data, - consensus_group, - broadcast_decryption_job, - message.request_id.clone().into(), - Some(partial_decryption.take_response()), - )?; - } - - Ok(()) - } - - /// When partial decryption is received. - pub fn on_partial_decryption( - &self, - sender: &NodeId, - message: &PartialDecryption, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - let is_master_node = self.core.meta.self_node_id == self.core.meta.master_node_id; - let result = if is_master_node { - data.consensus_session.on_job_response( - sender, - PartialDecryptionResponse { - request_id: message.request_id.clone().into(), - shadow_point: message.shadow_point.clone().into(), - decrypt_shadow: message.decrypt_shadow.clone(), - }, - )?; - - if data.consensus_session.state() != ConsensusSessionState::Finished - && data.consensus_session.state() != ConsensusSessionState::Failed - { - return Ok(()); - } - - // send completion signal to all nodes, except for rejected nodes - if is_master_node { - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send( - &node, - Message::Decryption(DecryptionMessage::DecryptionSessionCompleted( - DecryptionSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - }, - )), - )?; - } - } - - data.consensus_session.result() - } else { - match data.broadcast_job_session.as_mut() { - Some(broadcast_job_session) => { - broadcast_job_session.on_partial_response( - sender, - PartialDecryptionResponse { - request_id: message.request_id.clone().into(), - shadow_point: message.shadow_point.clone().into(), - decrypt_shadow: message.decrypt_shadow.clone(), - }, - )?; - - if broadcast_job_session.state() != JobSessionState::Finished - && broadcast_job_session.state() != JobSessionState::Failed - { - return Ok(()); - } - - broadcast_job_session.result() - } - None => return Err(Error::InvalidMessage), - } - }; - - Self::set_decryption_result(&self.core, &mut *data, result); - - Ok(()) - } - - /// When session is completed. - pub fn on_session_completed( - &self, - sender: &NodeId, - message: &DecryptionSessionCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - // if it is a broadcast session, wait for all answers before completing the session - let decryption_result = match data.broadcast_job_session.as_ref() { - Some(broadcast_job_session) => { - if !broadcast_job_session.is_result_ready() { - return Err(Error::TooEarlyForRequest); - } - - Some(broadcast_job_session.result()) - } - None => None, - }; - if let Some(decryption_result) = decryption_result { - Self::set_decryption_result(&self.core, &mut *data, decryption_result); - } - - data.consensus_session.on_session_completed(sender) - } - - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node - .map(|n| n == &self.core.meta.self_node_id) - .unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_decryption_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } - - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => Ok(()), - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when is_shadow_decryption.is_some(); qed"; - let is_shadow_decryption = data.is_shadow_decryption.expect(proof); - let is_broadcast_session = data.is_broadcast_session.expect(proof); - let disseminate_result = Self::disseminate_jobs( - &self.core, - &mut *data, - &version, - is_shadow_decryption, - is_broadcast_session, - ); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!( - "{}: decryption session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - - Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - Err(err) => { - warn!( - "{}: decryption session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - - Self::set_decryption_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - - /// Disseminate jobs on session master. - fn disseminate_jobs( - core: &SessionCore, - data: &mut SessionData, - version: &H256, - is_shadow_decryption: bool, - is_broadcast_session: bool, - ) -> Result<(), Error> { - let key_share = match core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let key_version = key_share.version(version)?.hash.clone(); - let requester = data - .consensus_session - .consensus_job() - .executor() - .requester() - .ok_or(Error::InvalidStateForRequest)? - .clone(); - let requester_public = requester - .public(&core.meta.id) - .map_err(Error::InsufficientRequesterData)?; - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let decryption_job = DecryptionJob::new_on_master( - core.meta.self_node_id.clone(), - core.access_key.clone(), - requester_public.clone(), - key_share.clone(), - key_version, - is_shadow_decryption, - is_broadcast_session, - )?; - let decryption_request_id = decryption_job.request_id().clone() - .expect("DecryptionJob always have request_id when created on master; it is created using new_on_master above; qed"); - let decryption_transport = core.decryption_transport(false); - let is_broadcast_session = data.is_broadcast_session - .expect("disseminate_jobs is called on master node only; on master node is_broadcast_session is filled during initialization; qed"); - let self_response = data.consensus_session.disseminate_jobs( - decryption_job, - decryption_transport, - is_broadcast_session, - )?; - - // ...and prepare decryption job session if we need to broadcast result - if is_broadcast_session { - let broadcast_decryption_job = DecryptionJob::new_on_master( - core.meta.self_node_id.clone(), - core.access_key.clone(), - requester_public, - key_share.clone(), - key_version, - is_shadow_decryption, - is_broadcast_session, - )?; - Self::create_broadcast_decryption_job( - &core, - data, - consensus_group, - broadcast_decryption_job, - decryption_request_id, - self_response, - )?; - } - - Ok(()) - } - - /// Create broadcast decryption job. - fn create_broadcast_decryption_job( - core: &SessionCore, - data: &mut SessionData, - mut consensus_group: BTreeSet, - mut job: DecryptionJob, - request_id: Secret, - self_response: Option, - ) -> Result<(), Error> { - consensus_group.insert(core.meta.self_node_id.clone()); - job.set_request_id(request_id.clone().into()); - - let transport = core.decryption_transport(true); - let mut job_session = JobSession::new( - SessionMeta { - id: core.meta.id.clone(), - master_node_id: core.meta.self_node_id.clone(), - self_node_id: core.meta.self_node_id.clone(), - threshold: core.meta.threshold, - configured_nodes_count: core.meta.configured_nodes_count, - connected_nodes_count: core.meta.connected_nodes_count, - }, - job, - transport, - ); - job_session.initialize( - consensus_group, - self_response, - core.meta.self_node_id != core.meta.master_node_id, - )?; - data.broadcast_job_session = Some(job_session); - - Ok(()) - } - - /// Set decryption result. - fn set_decryption_result( - core: &SessionCore, - data: &mut SessionData, - result: Result, - ) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() - { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(document_key) => core.cluster.send( - &master, - Message::Decryption(DecryptionMessage::DecryptionSessionDelegationCompleted( - DecryptionSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - decrypted_secret: document_key.decrypted_secret.clone().into(), - common_point: document_key.common_point.clone().map(Into::into), - decrypt_shadows: document_key.decrypt_shadows.clone(), - }, - )), - ), - Err(error) => core.cluster.send( - &master, - Message::Decryption(DecryptionMessage::DecryptionSessionError( - DecryptionSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }, - )), - ), - }; - } - - data.result = Some(result); - core.completed.notify_all(); - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; - - fn type_name() -> &'static str { - "decryption" - } - - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } - - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } - - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } - - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::Decryption(DecryptionMessage::DecryptionSessionError( - DecryptionSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }, - )); - - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core - .cluster - .send(&self.core.meta.master_node_id, message) - }; - } - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::Decryption(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl SessionCore { - pub fn decryption_transport(&self, is_broadcast_transport: bool) -> DecryptionJobTransport { - DecryptionJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - is_broadcast_transport: is_broadcast_transport, - master_node_id: self.meta.master_node_id.clone(), - cluster: self.cluster.clone(), - } - } -} - -impl JobTransport for DecryptionConsensusTransport { - type PartialJobRequest = Requester; - type PartialJobResponse = bool; - - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() - .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send( - node, - Message::Decryption(DecryptionMessage::DecryptionConsensusMessage( - DecryptionConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - origin: self.origin.clone().map(Into::into), - message: ConsensusMessage::InitializeConsensusSession( - InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }, - ), - }, - )), - ) - } - - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send( - node, - Message::Decryption(DecryptionMessage::DecryptionConsensusMessage( - DecryptionConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - origin: None, - message: ConsensusMessage::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { - is_confirmed: response, - }, - ), - }, - )), - ) - } -} - -impl JobTransport for DecryptionJobTransport { - type PartialJobRequest = PartialDecryptionRequest; - type PartialJobResponse = PartialDecryptionResponse; - - fn send_partial_request( - &self, - node: &NodeId, - request: PartialDecryptionRequest, - ) -> Result<(), Error> { - if !self.is_broadcast_transport { - self.cluster.send( - node, - Message::Decryption(DecryptionMessage::RequestPartialDecryption( - RequestPartialDecryption { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - is_shadow_decryption: request.is_shadow_decryption, - is_broadcast_session: request.is_broadcast_session, - nodes: request - .other_nodes_ids - .into_iter() - .map(Into::into) - .collect(), - }, - )), - )?; - } - - Ok(()) - } - - fn send_partial_response( - &self, - node: &NodeId, - response: PartialDecryptionResponse, - ) -> Result<(), Error> { - if !self.is_broadcast_transport || *node != self.master_node_id { - self.cluster.send( - node, - Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - shadow_point: response.shadow_point.into(), - decrypt_shadow: response.decrypt_shadow, - })), - )?; - } - - Ok(()) - } -} - -#[cfg(test)] -pub fn create_default_decryption_session() -> Arc { - use acl_storage::DummyAclStorage; - use key_server_cluster::cluster::tests::DummyCluster; - - Arc::new( - SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: Default::default(), - self_node_id: Default::default(), - master_node_id: Default::default(), - threshold: 0, - configured_nodes_count: 0, - connected_nodes_count: 0, - }, - access_key: Secret::zero(), - key_share: Default::default(), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(Default::default())), - nonce: 0, - }, - Some(Requester::Public(2.into())), - ) - .unwrap(), - ) -} - -#[cfg(test)] -mod tests { - use acl_storage::DummyAclStorage; - use ethkey::{self, public_to_address, Generator, KeyPair, Public, Random, Secret}; - use key_server_cluster::{ - cluster::tests::DummyCluster, - cluster_sessions::ClusterSession, - decryption_session::{SessionImpl, SessionParams}, - jobs::consensus_session::ConsensusSessionState, - math, - message::{self, DecryptionMessage, Message}, - DocumentKeyShare, DocumentKeyShareVersion, EncryptedDocumentKeyShadow, Error, NodeId, - Requester, SessionId, SessionMeta, - }; - use std::{ - collections::{BTreeMap, VecDeque}, - sync::Arc, - }; - - const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - - fn prepare_decryption_sessions() -> ( - KeyPair, - Vec>, - Vec>, - Vec, - ) { - // prepare encrypted data + cluster configuration for scheme 4-of-5 - let session_id = SessionId::default(); - let access_key = Random.generate().unwrap().secret().clone(); - let secret_shares: Vec = vec![ - "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec" - .parse() - .unwrap(), - "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b" - .parse() - .unwrap(), - "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9" - .parse() - .unwrap(), - "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4" - .parse() - .unwrap(), - "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad" - .parse() - .unwrap(), - ]; - let id_numbers: Vec<(NodeId, Secret)> = vec![ - ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), - "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()), - ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), - "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse().unwrap()), - ("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), - "f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62".parse().unwrap()), - ("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), - "5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f".parse().unwrap()), - ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), - "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()), - ]; - let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); - let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); - let encrypted_datas: Vec<_> = (0..5) - .map(|i| DocumentKeyShare { - author: Default::default(), - threshold: 3, - public: Default::default(), - common_point: Some(common_point.clone()), - encrypted_point: Some(encrypted_point.clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: id_numbers.clone().into_iter().collect(), - secret_share: secret_shares[i].clone(), - }], - }) - .collect(); - let acl_storages: Vec<_> = (0..5) - .map(|_| Arc::new(DummyAclStorage::default())) - .collect(); - let clusters: Vec<_> = (0..5) - .map(|i| { - let cluster = Arc::new(DummyCluster::new( - id_numbers.iter().nth(i).clone().unwrap().0, - )); - for id_number in &id_numbers { - cluster.add_node(id_number.0.clone()); - } - cluster - }) - .collect(); - let requester = Random.generate().unwrap(); - let signature = Some(ethkey::sign(requester.secret(), &SessionId::default()).unwrap()); - let sessions: Vec<_> = (0..5) - .map(|i| { - SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: session_id.clone(), - self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, - master_node_id: id_numbers.iter().nth(0).clone().unwrap().0, - threshold: encrypted_datas[i].threshold, - configured_nodes_count: 5, - connected_nodes_count: 5, - }, - access_key: access_key.clone(), - key_share: Some(encrypted_datas[i].clone()), - acl_storage: acl_storages[i].clone(), - cluster: clusters[i].clone(), - nonce: 0, - }, - if i == 0 { - signature.clone().map(Into::into) - } else { - None - }, - ) - .unwrap() - }) - .collect(); - - (requester, clusters, acl_storages, sessions) - } - - fn do_messages_exchange( - clusters: &[Arc], - sessions: &[SessionImpl], - ) -> Result<(), Error> { - do_messages_exchange_until(clusters, sessions, |_, _, _| false) - } - - fn do_messages_exchange_until( - clusters: &[Arc], - sessions: &[SessionImpl], - mut cond: F, - ) -> Result<(), Error> - where - F: FnMut(&NodeId, &NodeId, &Message) -> bool, - { - let mut queue: VecDeque<(NodeId, NodeId, Message)> = VecDeque::new(); - while let Some((mut from, mut to, mut message)) = clusters - .iter() - .filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))) - .next() - { - if cond(&from, &to, &message) { - break; - } - - let mut is_queued_message = false; - loop { - let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; - match session.on_message(&from, &message) { - Ok(_) => { - if let Some(qmessage) = queue.pop_front() { - from = qmessage.0; - to = qmessage.1; - message = qmessage.2; - is_queued_message = true; - continue; - } - break; - } - Err(Error::TooEarlyForRequest) => { - if is_queued_message { - queue.push_front((from, to, message)); - } else { - queue.push_back((from, to, message)); - } - break; - } - Err(err) => return Err(err), - } - } - } - - Ok(()) - } - - #[test] - fn constructs_in_cluster_of_single_node() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 0, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, - Some(Requester::Signature( - ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), - )), - ) { - Ok(_) => (), - _ => panic!("unexpected"), - } - } - - #[test] - fn fails_to_initialize_if_does_not_have_a_share() { - let self_node_id = Random.generate().unwrap().public().clone(); - let session = SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 0, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: None, - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, - Some(Requester::Signature( - ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), - )), - ) - .unwrap(); - assert_eq!( - session.initialize(Default::default(), Default::default(), false, false), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn fails_to_initialize_if_threshold_is_wrong() { - let mut nodes = BTreeMap::new(); - let self_node_id = Random.generate().unwrap().public().clone(); - nodes.insert( - self_node_id.clone(), - Random.generate().unwrap().secret().clone(), - ); - nodes.insert( - Random.generate().unwrap().public().clone(), - Random.generate().unwrap().secret().clone(), - ); - let session = SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self_node_id.clone(), - master_node_id: self_node_id.clone(), - threshold: 2, - configured_nodes_count: 1, - connected_nodes_count: 1, - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: Some(DocumentKeyShare { - author: Default::default(), - threshold: 2, - public: Default::default(), - common_point: Some(Random.generate().unwrap().public().clone()), - encrypted_point: Some(Random.generate().unwrap().public().clone()), - versions: vec![DocumentKeyShareVersion { - hash: Default::default(), - id_numbers: nodes, - secret_share: Random.generate().unwrap().secret().clone(), - }], - }), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: Arc::new(DummyCluster::new(self_node_id.clone())), - nonce: 0, - }, - Some(Requester::Signature( - ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), - )), - ) - .unwrap(); - assert_eq!( - session.initialize(Default::default(), Default::default(), false, false), - Err(Error::ConsensusUnreachable) - ); - } - - #[test] - fn fails_to_initialize_when_already_initialized() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(), - () - ); - assert_eq!( - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap_err(), - Error::InvalidStateForRequest - ); - } - - #[test] - fn fails_to_accept_initialization_when_already_initialized() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(), - () - ); - assert_eq!( - sessions[0] - .on_consensus_message( - sessions[1].node(), - &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession( - message::InitializeConsensusSession { - requester: Requester::Signature( - ethkey::sign( - Random.generate().unwrap().secret(), - &SessionId::default() - ) - .unwrap() - ) - .into(), - version: Default::default(), - } - ), - } - ) - .unwrap_err(), - Error::InvalidMessage - ); - } - - #[test] - fn fails_to_partial_decrypt_if_requested_by_slave() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[1] - .on_consensus_message( - sessions[0].node(), - &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession( - message::InitializeConsensusSession { - requester: Requester::Signature( - ethkey::sign( - Random.generate().unwrap().secret(), - &SessionId::default() - ) - .unwrap() - ) - .into(), - version: Default::default(), - } - ), - } - ) - .unwrap(), - () - ); - assert_eq!( - sessions[1] - .on_partial_decryption_requested( - sessions[2].node(), - &message::RequestPartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - is_shadow_decryption: false, - is_broadcast_session: false, - nodes: sessions - .iter() - .map(|s| s.node().clone().into()) - .take(4) - .collect(), - } - ) - .unwrap_err(), - Error::InvalidMessage - ); - } - - #[test] - fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[1] - .on_consensus_message( - sessions[0].node(), - &message::DecryptionConsensusMessage { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - origin: None, - message: message::ConsensusMessage::InitializeConsensusSession( - message::InitializeConsensusSession { - requester: Requester::Signature( - ethkey::sign( - Random.generate().unwrap().secret(), - &SessionId::default() - ) - .unwrap() - ) - .into(), - version: Default::default(), - } - ), - } - ) - .unwrap(), - () - ); - assert_eq!( - sessions[1] - .on_partial_decryption_requested( - sessions[0].node(), - &message::RequestPartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - is_shadow_decryption: false, - is_broadcast_session: false, - nodes: sessions - .iter() - .map(|s| s.node().clone().into()) - .take(2) - .collect(), - } - ) - .unwrap_err(), - Error::InvalidMessage - ); - } - - #[test] - fn fails_to_accept_partial_decrypt_if_not_waiting() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[0] - .on_partial_decryption( - sessions[1].node(), - &message::PartialDecryption { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 0, - request_id: Random.generate().unwrap().secret().clone().into(), - shadow_point: Random.generate().unwrap().public().clone().into(), - decrypt_shadow: None, - } - ) - .unwrap_err(), - Error::InvalidStateForRequest - ); - } - - #[test] - fn fails_to_accept_partial_decrypt_twice() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - let mut pd_from = None; - let mut pd_msg = None; - do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { - &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { - pd_from = Some(from.clone()); - pd_msg = Some(msg.clone()); - true - } - _ => false, - }) - .unwrap(); - - assert_eq!( - sessions[0] - .on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.clone().unwrap()) - .unwrap(), - () - ); - assert_eq!( - sessions[0] - .on_partial_decryption(pd_from.as_ref().unwrap(), &pd_msg.unwrap()) - .unwrap_err(), - Error::InvalidNodeForRequest - ); - } - - #[test] - fn decryption_fails_on_session_timeout() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert!(sessions[0].decrypted_secret().is_none()); - sessions[0].on_session_timeout(); - assert_eq!( - sessions[0].decrypted_secret().unwrap().unwrap_err(), - Error::ConsensusTemporaryUnreachable - ); - } - - #[test] - fn node_is_marked_rejected_when_timed_out_during_initialization_confirmation() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - // 1 node disconnects => we still can recover secret - sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0] - .data - .lock() - .consensus_session - .consensus_job() - .rejects() - .contains_key(sessions[1].node())); - assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); - - // 2 node are disconnected => we can not recover secret - sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::Failed); - } - - #[test] - fn session_does_not_fail_if_rejected_node_disconnects() { - let (_, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - let key_pair = Random.generate().unwrap(); - - acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| { - sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - }) - .unwrap(); - - // 1st node disconnects => ignore this - sessions[0].on_node_timeout(sessions[1].node()); - assert_eq!( - sessions[0].state(), - ConsensusSessionState::EstablishingConsensus - ); - } - - #[test] - fn session_does_not_fail_if_requested_node_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| { - sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - }) - .unwrap(); - - // 1 node disconnects => we still can recover secret - sessions[0].on_node_timeout(sessions[1].node()); - assert!(sessions[0].state() == ConsensusSessionState::EstablishingConsensus); - - // 2 node are disconnected => we can not recover secret - sessions[0].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::Failed); - } - - #[test] - fn session_does_not_fail_if_node_with_shadow_point_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| { - sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - && sessions[0] - .data - .lock() - .consensus_session - .computation_job() - .responses() - .len() - == 2 - }) - .unwrap(); - - // disconnects from the node which has already sent us its own shadow point - let disconnected = sessions[0] - .data - .lock() - .consensus_session - .computation_job() - .responses() - .keys() - .filter(|n| *n != sessions[0].node()) - .cloned() - .nth(0) - .unwrap(); - sessions[0].on_node_timeout(&disconnected); - assert_eq!( - sessions[0].state(), - ConsensusSessionState::EstablishingConsensus - ); - } - - #[test] - fn session_restarts_if_confirmed_node_disconnects() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| { - sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - }) - .unwrap(); - - // disconnects from the node which has already confirmed its participation - let disconnected = sessions[0] - .data - .lock() - .consensus_session - .computation_job() - .requests() - .iter() - .cloned() - .nth(0) - .unwrap(); - sessions[0].on_node_timeout(&disconnected); - assert_eq!( - sessions[0].state(), - ConsensusSessionState::EstablishingConsensus - ); - assert!(sessions[0] - .data - .lock() - .consensus_session - .computation_job() - .rejects() - .contains_key(&disconnected)); - assert!(!sessions[0] - .data - .lock() - .consensus_session - .computation_job() - .requests() - .contains(&disconnected)); - } - - #[test] - fn session_does_not_fail_if_non_master_node_disconnects_from_non_master_node() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange_until(&clusters, &sessions, |_, _, _| { - sessions[0].state() == ConsensusSessionState::WaitingForPartialResults - }) - .unwrap(); - - // disconnects from the node which has already confirmed its participation - sessions[1].on_node_timeout(sessions[2].node()); - assert!(sessions[0].state() == ConsensusSessionState::WaitingForPartialResults); - assert!(sessions[1].state() == ConsensusSessionState::ConsensusEstablished); - } - - #[test] - fn complete_dec_session() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 5 of 5 sessions are in Finished state - assert_eq!( - sessions - .iter() - .filter(|s| s.state() == ConsensusSessionState::Finished) - .count(), - 5 - ); - // 2) 1 session has decrypted key value - assert!(sessions - .iter() - .skip(1) - .all(|s| s.decrypted_secret().is_none())); - - assert_eq!( - sessions[0].decrypted_secret().unwrap().unwrap(), - EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - } - ); - } - - #[test] - fn complete_shadow_dec_session() { - let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0] - .initialize(Default::default(), Default::default(), true, false) - .unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 5 of 5 sessions are in Finished state - assert_eq!( - sessions - .iter() - .filter(|s| s.state() == ConsensusSessionState::Finished) - .count(), - 5 - ); - // 2) 1 session has decrypted key value - assert!(sessions - .iter() - .skip(1) - .all(|s| s.decrypted_secret().is_none())); - - let decrypted_secret = sessions[0].decrypted_secret().unwrap().unwrap(); - // check that decrypted_secret != SECRET_PLAIN - assert!(decrypted_secret.decrypted_secret != SECRET_PLAIN.into()); - // check that common point && shadow coefficients are returned - assert!(decrypted_secret.common_point.is_some()); - assert!(decrypted_secret.decrypt_shadows.is_some()); - // check that KS client is able to restore original secret - use crypto::DEFAULT_MAC; - use ethkey::crypto::ecies::decrypt; - let decrypt_shadows: Vec<_> = decrypted_secret - .decrypt_shadows - .unwrap() - .into_iter() - .map(|c| { - Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap() - }) - .collect(); - let decrypted_secret = math::decrypt_with_shadow_coefficients( - decrypted_secret.decrypted_secret, - decrypted_secret.common_point.unwrap(), - decrypt_shadows, - ) - .unwrap(); - assert_eq!(decrypted_secret, SECRET_PLAIN.into()); - } - - #[test] - fn failed_dec_session() { - let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - - // now let's try to do a decryption - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 2 of these nodes are disagree - acl_storages[1].prohibit(public_to_address(key_pair.public()), SessionId::default()); - acl_storages[2].prohibit(public_to_address(key_pair.public()), SessionId::default()); - - assert_eq!( - do_messages_exchange(&clusters, &sessions).unwrap_err(), - Error::ConsensusUnreachable - ); - - // check that 3 nodes have failed state - assert_eq!(sessions[0].state(), ConsensusSessionState::Failed); - assert_eq!( - sessions - .iter() - .filter(|s| s.state() == ConsensusSessionState::Failed) - .count(), - 3 - ); - } - - #[test] - fn complete_dec_session_with_acl_check_failed_on_master() { - let (key_pair, clusters, acl_storages, sessions) = prepare_decryption_sessions(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is disagree - acl_storages[0].prohibit(public_to_address(key_pair.public()), SessionId::default()); - - // now let's try to do a decryption - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 4 of 5 sessions are in Finished state - assert_eq!( - sessions - .iter() - .filter(|s| s.state() == ConsensusSessionState::Finished) - .count(), - 5 - ); - // 2) 1 session has decrypted key value - assert!(sessions - .iter() - .skip(1) - .all(|s| s.decrypted_secret().is_none())); - assert_eq!( - sessions[0].decrypted_secret().unwrap().unwrap(), - EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - } - ); - } - - #[test] - fn decryption_message_fails_when_nonce_is_wrong() { - let (_, _, _, sessions) = prepare_decryption_sessions(); - assert_eq!( - sessions[1].process_message( - sessions[0].node(), - &message::DecryptionMessage::DecryptionSessionCompleted( - message::DecryptionSessionCompleted { - session: SessionId::default().into(), - sub_session: sessions[0].access_key().clone().into(), - session_nonce: 10, - } - ) - ), - Err(Error::ReplayProtection) - ); - } - - #[test] - fn decryption_works_when_delegated_to_other_node() { - let (_, clusters, _, mut sessions) = prepare_decryption_sessions(); - - // let's say node1 doesn't have a share && delegates decryption request to node0 - // initially session is created on node1 => node1 is master for itself, but for other nodes node0 is still master - sessions[1].core.meta.master_node_id = sessions[1].core.meta.self_node_id.clone(); - sessions[1] - .data - .lock() - .consensus_session - .consensus_job_mut() - .executor_mut() - .set_requester( - sessions[0] - .data - .lock() - .consensus_session - .consensus_job() - .executor() - .requester() - .unwrap() - .clone(), - ); - - // now let's try to do a decryption - sessions[1] - .delegate( - sessions[0].core.meta.self_node_id.clone(), - Default::default(), - Default::default(), - false, - false, - ) - .unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // now check that: - // 1) 4 of 5 sessions are in Finished state - assert_eq!( - sessions - .iter() - .filter(|s| s.state() == ConsensusSessionState::Finished) - .count(), - 4 - ); - // 2) 1 session has decrypted key value - assert_eq!( - sessions[1].decrypted_secret().unwrap().unwrap(), - EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - } - ); - } - - #[test] - fn decryption_works_when_share_owners_are_isolated() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - - // we need 4 out of 5 nodes to agree to do a decryption - // let's say that 1 of these nodes (master) is isolated - let isolated_node_id = sessions[4].core.meta.self_node_id.clone(); - for cluster in &clusters { - cluster.remove_node(&isolated_node_id); - } - - // now let's try to do a decryption - sessions[0] - .initialize(Default::default(), Default::default(), false, false) - .unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - assert_eq!( - sessions[0].decrypted_secret().unwrap().unwrap(), - EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - } - ); - } - - #[test] - fn decryption_result_restored_on_all_nodes_if_broadcast_session_is_completed() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), false, true) - .unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // decryption result must be the same and available on 4 nodes - let result = sessions[0].decrypted_secret(); - assert!(result.clone().unwrap().is_ok()); - assert_eq!( - result.clone().unwrap().unwrap(), - EncryptedDocumentKeyShadow { - decrypted_secret: SECRET_PLAIN.into(), - common_point: None, - decrypt_shadows: None, - } - ); - assert_eq!( - 3, - sessions - .iter() - .skip(1) - .filter(|s| s.decrypted_secret() == result) - .count() - ); - assert_eq!( - 1, - sessions - .iter() - .skip(1) - .filter(|s| s.decrypted_secret().is_none()) - .count() - ); - } - - #[test] - fn decryption_shadows_restored_on_all_nodes_if_shadow_broadcast_session_is_completed() { - let (key_pair, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Default::default(), Default::default(), true, true) - .unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // decryption shadows must be the same and available on 4 nodes - let broadcast_shadows = sessions[0].broadcast_shadows(); - assert!(broadcast_shadows.is_some()); - assert_eq!( - 3, - sessions - .iter() - .skip(1) - .filter(|s| s.broadcast_shadows() == broadcast_shadows) - .count() - ); - assert_eq!( - 1, - sessions - .iter() - .skip(1) - .filter(|s| s.broadcast_shadows().is_none()) - .count() - ); - - // 4 nodes must be able to recover original secret - use crypto::DEFAULT_MAC; - use ethkey::crypto::ecies::decrypt; - let result = sessions[0].decrypted_secret().unwrap().unwrap(); - assert_eq!( - 3, - sessions - .iter() - .skip(1) - .filter(|s| s.decrypted_secret() == Some(Ok(result.clone()))) - .count() - ); - let decrypt_shadows: Vec<_> = result - .decrypt_shadows - .unwrap() - .into_iter() - .map(|c| { - Secret::from_slice(&decrypt(key_pair.secret(), &DEFAULT_MAC, &c).unwrap()).unwrap() - }) - .collect(); - let decrypted_secret = math::decrypt_with_shadow_coefficients( - result.decrypted_secret, - result.common_point.unwrap(), - decrypt_shadows, - ) - .unwrap(); - assert_eq!(decrypted_secret, SECRET_PLAIN.into()); - } - - #[test] - fn decryption_session_origin_is_known_to_all_initialized_nodes() { - let (_, clusters, _, sessions) = prepare_decryption_sessions(); - sessions[0] - .initialize(Some(1.into()), Default::default(), true, true) - .unwrap(); - do_messages_exchange(&clusters, &sessions).unwrap(); - - // all session must have origin set - assert_eq!( - 5, - sessions - .iter() - .filter(|s| s.origin() == Some(1.into())) - .count() - ); - } -} diff --git a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs b/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs deleted file mode 100644 index 09176fd0c..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/encryption_session.rs +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::Address; -use ethkey::Public; -use key_server_cluster::{ - cluster::Cluster, - cluster_sessions::ClusterSession, - message::{ - ConfirmEncryptionInitialization, EncryptionMessage, EncryptionSessionError, - InitializeEncryptionSession, Message, - }, - DocumentKeyShare, Error, KeyStorage, NodeId, Requester, ServerKeyId, SessionId, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::BTreeMap, - fmt::{Debug, Error as FmtError, Formatter}, - sync::Arc, - time, -}; - -/// Encryption (distributed key generation) session. -/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: -/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf -/// Brief overview: -/// 1) initialization: master node (which has received request for storing the secret) initializes the session on all other nodes -/// 2) master node sends common_point + encrypted_point to all other nodes -/// 3) common_point + encrypted_point are saved on all nodes -/// 4) in case of error, previous values are restored -pub struct SessionImpl { - /// Unique session id. - id: SessionId, - /// Public identifier of this node. - self_node_id: NodeId, - /// Encrypted data. - encrypted_data: Option, - /// Key storage. - key_storage: Arc, - /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, - /// Session nonce. - nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, - /// Mutable session data. - data: Mutex, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// SessionImpl identifier. - pub id: SessionId, - /// Id of node, on which this session is running. - pub self_node_id: Public, - /// Encrypted data (result of running generation_session::SessionImpl). - pub encrypted_data: Option, - /// Key storage. - pub key_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, -} - -/// Mutable data of encryption (distributed key generation) session. -#[derive(Debug)] -struct SessionData { - /// Current state of the session. - state: SessionState, - /// Nodes-specific data. - nodes: BTreeMap, - /// Encryption session result. - result: Option>, -} - -/// Mutable node-specific data. -#[derive(Debug, Clone)] -struct NodeData { - // === Values, filled during initialization phase === - /// Flags marking that node has confirmed session initialization. - pub initialization_confirmed: bool, -} - -/// Encryption (distributed key generation) session state. -#[derive(Debug, Clone, PartialEq)] -pub enum SessionState { - // === Initialization states === - /// Every node starts in this state. - WaitingForInitialization, - /// Master node waits for every other node to confirm initialization. - WaitingForInitializationConfirm, - - // === Final states of the session === - /// Encryption data is saved. - Finished, - /// Failed to save encryption data. - Failed, -} - -impl SessionImpl { - /// Create new encryption session. - pub fn new(params: SessionParams) -> Result { - check_encrypted_data(params.encrypted_data.as_ref())?; - - Ok(SessionImpl { - id: params.id, - self_node_id: params.self_node_id, - encrypted_data: params.encrypted_data, - key_storage: params.key_storage, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - nodes: BTreeMap::new(), - result: None, - }), - }) - } - - /// Get this node Id. - pub fn node(&self) -> &NodeId { - &self.self_node_id - } - - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Result<(), Error> { - Self::wait_session(&self.completed, &self.data, timeout, |data| { - data.result.clone() - }) - .expect("wait_session returns Some if called without timeout; qed") - } - - /// Start new session initialization. This must be called on master node. - pub fn initialize( - &self, - requester: Requester, - common_point: Public, - encrypted_point: Public, - ) -> Result<(), Error> { - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // update state - data.state = SessionState::WaitingForInitializationConfirm; - data.nodes.extend(self.cluster.nodes().into_iter().map(|n| { - ( - n, - NodeData { - initialization_confirmed: &n == self.node(), - }, - ) - })); - - // TODO [Sec]: id signature is not enough here, as it was already used in key generation - // TODO [Reliability]: there could be situation when some nodes have failed to store encrypted data - // => potential problems during restore. some confirmation step is needed (2pc)? - // save encryption data - if let Some(encrypted_data) = self.encrypted_data.clone() { - let requester_address = requester - .address(&self.id) - .map_err(Error::InsufficientRequesterData)?; - update_encrypted_data( - &self.key_storage, - self.id.clone(), - encrypted_data, - requester_address, - common_point.clone(), - encrypted_point.clone(), - )?; - } - - // start initialization - if data.nodes.len() > 1 { - self.cluster.broadcast(Message::Encryption( - EncryptionMessage::InitializeEncryptionSession(InitializeEncryptionSession { - session: self.id.clone().into(), - session_nonce: self.nonce, - requester: requester.into(), - common_point: common_point.into(), - encrypted_point: encrypted_point.into(), - }), - )) - } else { - data.state = SessionState::Finished; - data.result = Some(Ok(())); - self.completed.notify_all(); - - Ok(()) - } - } - - /// When session initialization message is received. - pub fn on_initialize_session( - &self, - sender: NodeId, - message: &InitializeEncryptionSession, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // check that the requester is the author of the encrypted data - if let Some(encrypted_data) = self.encrypted_data.clone() { - let requester: Requester = message.requester.clone().into(); - let requester_address = requester - .address(&self.id) - .map_err(Error::InsufficientRequesterData)?; - update_encrypted_data( - &self.key_storage, - self.id.clone(), - encrypted_data, - requester_address, - message.common_point.clone().into(), - message.encrypted_point.clone().into(), - )?; - } - - // update state - data.state = SessionState::Finished; - - // send confirmation back to master node - self.cluster.send( - &sender, - Message::Encryption(EncryptionMessage::ConfirmEncryptionInitialization( - ConfirmEncryptionInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - }, - )), - ) - } - - /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization( - &self, - sender: NodeId, - message: &ConfirmEncryptionInitialization, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); - - // check if all nodes have confirmed initialization - data.nodes - .get_mut(&sender) - .expect("message is received from cluster; nodes contains all cluster nodes; qed") - .initialization_confirmed = true; - if !data.nodes.values().all(|n| n.initialization_confirmed) { - return Ok(()); - } - - // update state - data.state = SessionState::Finished; - data.result = Some(Ok(())); - self.completed.notify_all(); - - Ok(()) - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionId; - - fn type_name() -> &'static str { - "encryption" - } - - fn id(&self) -> SessionId { - self.id.clone() - } - - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.state == SessionState::Failed || data.state == SessionState::Finished - } - - fn on_node_timeout(&self, node: &NodeId) { - let mut data = self.data.lock(); - - warn!( - "{}: encryption session failed because {} connection has timeouted", - self.node(), - node - ); - - data.state = SessionState::Failed; - data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - fn on_session_timeout(&self) { - let mut data = self.data.lock(); - - warn!("{}: encryption session failed with timeout", self.node()); - - data.state = SessionState::Failed; - data.result = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in encryption session is considered fatal - // => broadcast error if error occured on this node - if *node == self.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self.cluster.broadcast(Message::Encryption( - EncryptionMessage::EncryptionSessionError(EncryptionSessionError { - session: self.id.clone().into(), - session_nonce: self.nonce, - error: error.clone().into(), - }), - )); - } - - let mut data = self.data.lock(); - - warn!( - "{}: encryption session failed with error: {} from {}", - self.node(), - error, - node - ); - - data.state = SessionState::Failed; - data.result = Some(Err(error)); - self.completed.notify_all(); - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - if Some(self.nonce) != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &Message::Encryption(ref message) => match message { - &EncryptionMessage::InitializeEncryptionSession(ref message) => { - self.on_initialize_session(sender.clone(), message) - } - &EncryptionMessage::ConfirmEncryptionInitialization(ref message) => { - self.on_confirm_initialization(sender.clone(), message) - } - &EncryptionMessage::EncryptionSessionError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - } - }, - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl Debug for SessionImpl { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - write!(f, "Encryption session {} on {}", self.id, self.self_node_id) - } -} - -/// Check that common_point and encrypted point are not yet set in key share. -pub fn check_encrypted_data(key_share: Option<&DocumentKeyShare>) -> Result<(), Error> { - if let Some(key_share) = key_share { - // check that common_point and encrypted_point are still not set yet - if key_share.common_point.is_some() || key_share.encrypted_point.is_some() { - return Err(Error::DocumentKeyAlreadyStored); - } - } - - Ok(()) -} - -/// Update key share with encrypted document key. -pub fn update_encrypted_data( - key_storage: &Arc, - key_id: ServerKeyId, - mut key_share: DocumentKeyShare, - author: Address, - common_point: Public, - encrypted_point: Public, -) -> Result<(), Error> { - // author must be the same - if key_share.author != author { - return Err(Error::AccessDenied); - } - - // save encryption data - key_share.common_point = Some(common_point); - key_share.encrypted_point = Some(encrypted_point); - key_storage.update(key_id, key_share) -} diff --git a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs b/secret-store/src/key_server_cluster/client_sessions/generation_session.rs deleted file mode 100644 index aebba1616..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/generation_session.rs +++ /dev/null @@ -1,1571 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::Address; -use ethkey::{Public, Secret}; -use key_server_cluster::{ - cluster::Cluster, - cluster_sessions::ClusterSession, - math, - message::{ - CompleteInitialization, ConfirmInitialization, GenerationMessage, InitializeSession, - KeysDissemination, Message, PublicKeyShare, SessionCompleted, SessionError, - }, - DocumentKeyShare, DocumentKeyShareVersion, Error, KeyStorage, NodeId, SessionId, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, - fmt::{Debug, Error as FmtError, Formatter}, - sync::Arc, - time::Duration, -}; - -/// Distributed key generation session. -/// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: -/// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.124.4128&rep=rep1&type=pdf -/// Brief overview: -/// 1) initialization: master node (which has received request for generating joint public + secret) initializes the session on all other nodes -/// 2) key dissemination (KD): all nodes are generating secret + public values and send these to appropriate nodes -/// 3) key verification (KV): all nodes are checking values, received for other nodes -/// 4) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key -pub struct SessionImpl { - /// Unique session id. - id: SessionId, - /// Public identifier of this node. - self_node_id: NodeId, - /// Key storage. - key_storage: Option>, - /// Cluster which allows this node to send messages to other nodes in the cluster. - cluster: Arc, - /// Session-level nonce. - nonce: u64, - /// SessionImpl completion condvar. - completed: Condvar, - /// Mutable session data. - data: Mutex, -} - -/// SessionImpl creation parameters -pub struct SessionParams { - /// SessionImpl identifier. - pub id: SessionId, - /// Id of node, on which this session is running. - pub self_node_id: Public, - /// Key storage. - pub key_storage: Option>, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: Option, -} - -/// Mutable data of distributed key generation session. -#[derive(Debug)] -struct SessionData { - /// Current state of the session. - state: SessionState, - /// Simulate faulty behaviour? - simulate_faulty_behaviour: bool, - - // === Values, filled when session initialization just starts === - /// Reference to the node, which has started this session. - master: Option, - /// Address of the creator of the session. - author: Option
, - - // === Values, filled when session initialization is completed === - /// Session origin (if any). - origin: Option
, - /// Is zero secret generation session? - is_zero: Option, - /// Threshold value for this DKG. Only `threshold + 1` will be able to collectively recreate joint secret, - /// and thus - decrypt message, encrypted with joint public. - threshold: Option, - /// Random point, jointly generated by every node in the cluster. - derived_point: Option, - /// Nodes-specific data. - nodes: BTreeMap, - - // === Values, filled during KD phase === - /// Polynom1. - polynom1: Option>, - /// Value of polynom1[0], generated by this node. - secret_coeff: Option, - - // === Values, filled during KG phase === - /// Secret share, which this node holds. Persistent + private. - secret_share: Option, - - /// === Values, filled when DKG session is completed successfully === - /// Key share. - key_share: Option>, - /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public_and_secret: Option>, -} - -/// Mutable node-specific data. -#[derive(Debug, Clone)] -struct NodeData { - /// Random unique scalar. Persistent. - pub id_number: Secret, - - // === Values, filled during KD phase === - /// Secret value1, which has been received from this node. - pub secret1: Option, - /// Secret value2, which has been received from this node. - pub secret2: Option, - /// Public values, which have been received from this node. - pub publics: Option>, - - // === Values, filled during KG phase === - /// Public share, which has been received from this node. - pub public_share: Option, - - // === Values, filled during completion phase === - /// Flags marking that node has confirmed session completion (generated key is stored). - pub completion_confirmed: bool, -} - -/// Schedule for visiting other nodes of cluster. -#[derive(Debug, Clone, PartialEq)] -pub struct EveryOtherNodeVisitor { - /// Already visited nodes. - visited: BTreeSet, - /// Not yet visited nodes. - unvisited: VecDeque, - /// Nodes, which are currently visited. - in_progress: BTreeSet, -} - -/// Distributed key generation session state. -#[derive(Debug, Clone, PartialEq)] -pub enum SessionState { - // === Initialization states === - /// Every node starts in this state. - WaitingForInitialization, - /// Master node asks every other node to confirm initialization. - /// Derived point is generated by all nodes in the cluster. - WaitingForInitializationConfirm(EveryOtherNodeVisitor), - /// Slave nodes are in this state until initialization completion is reported by master node. - WaitingForInitializationComplete, - - // === KD phase states === - /// Node is waiting for generated keys from every other node. - WaitingForKeysDissemination, - - // === KG phase states === - /// Node is waiting for joint public key share to be received from every other node. - WaitingForPublicKeyShare, - - // === Generation phase states === - /// Node is waiting for session completion/session completion confirmation. - WaitingForGenerationConfirmation, - - // === Final states of the session === - /// Joint public key generation is completed. - Finished, - /// Joint public key generation is failed. - Failed, -} - -pub enum InitializationNodes { - RandomNumbers(BTreeSet), - SpecificNumbers(BTreeMap), -} - -impl InitializationNodes { - pub fn set(&self) -> BTreeSet { - match *self { - InitializationNodes::RandomNumbers(ref nodes) => nodes.clone(), - InitializationNodes::SpecificNumbers(ref nodes) => nodes.keys().cloned().collect(), - } - } -} - -impl From> for InitializationNodes { - fn from(nodes: BTreeSet) -> Self { - InitializationNodes::RandomNumbers(nodes) - } -} - -impl From> for InitializationNodes { - fn from(nodes: BTreeMap) -> Self { - InitializationNodes::SpecificNumbers(nodes) - } -} - -impl SessionImpl { - /// Create new generation session. - pub fn new(params: SessionParams) -> Self { - SessionImpl { - id: params.id, - self_node_id: params.self_node_id, - key_storage: params.key_storage, - cluster: params.cluster, - // when nonce.is_nonce(), generation session is wrapped - // => nonce is checked somewhere else && we can pass any value - nonce: params.nonce.unwrap_or_default(), - completed: Condvar::new(), - data: Mutex::new(SessionData { - state: SessionState::WaitingForInitialization, - simulate_faulty_behaviour: false, - master: None, - author: None, - origin: None, - is_zero: None, - threshold: None, - derived_point: None, - nodes: BTreeMap::new(), - polynom1: None, - secret_coeff: None, - secret_share: None, - key_share: None, - joint_public_and_secret: None, - }), - } - } - - /// Get this node Id. - pub fn node(&self) -> &NodeId { - &self.self_node_id - } - - /// Get derived point. - #[cfg(test)] - pub fn derived_point(&self) -> Option { - self.data.lock().derived_point.clone() - } - - /// Simulate faulty generation session behaviour. - pub fn simulate_faulty_behaviour(&self) { - self.data.lock().simulate_faulty_behaviour = true; - } - - /// Get session state. - pub fn state(&self) -> SessionState { - self.data.lock().state.clone() - } - - /// Get session origin. - pub fn origin(&self) -> Option
{ - self.data.lock().origin.clone() - } - - /// Wait for session completion. - pub fn wait(&self, timeout: Option) -> Option> { - Self::wait_session(&self.completed, &self.data, timeout, |data| { - data.joint_public_and_secret - .clone() - .map(|r| r.map(|r| r.0.clone())) - }) - } - - /// Get generated public and secret (if any). - pub fn joint_public_and_secret(&self) -> Option> { - self.data.lock().joint_public_and_secret.clone() - } - - /// Start new session initialization. This must be called on master node. - pub fn initialize( - &self, - origin: Option
, - author: Address, - is_zero: bool, - threshold: usize, - nodes: InitializationNodes, - ) -> Result<(), Error> { - check_cluster_nodes(self.node(), &nodes.set())?; - check_threshold(threshold, &nodes.set())?; - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // update state - data.master = Some(self.node().clone()); - data.author = Some(author.clone()); - data.origin = origin.clone(); - data.is_zero = Some(is_zero); - data.threshold = Some(threshold); - match nodes { - InitializationNodes::RandomNumbers(nodes) => { - for node_id in nodes { - // generate node identification parameter - let node_id_number = math::generate_random_scalar()?; - data.nodes - .insert(node_id, NodeData::with_id_number(node_id_number)); - } - } - InitializationNodes::SpecificNumbers(nodes) => { - for (node_id, node_id_number) in nodes { - data.nodes - .insert(node_id, NodeData::with_id_number(node_id_number)); - } - } - } - - let mut visit_policy = EveryOtherNodeVisitor::new(self.node(), data.nodes.keys().cloned()); - let derived_point = math::generate_random_point()?; - match visit_policy.next_node() { - Some(next_node) => { - data.state = SessionState::WaitingForInitializationConfirm(visit_policy); - - // start initialization - self.cluster.send(&next_node, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { - session: self.id.clone().into(), - session_nonce: self.nonce, - origin: origin.map(Into::into), - author: author.into(), - nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), - is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"), - threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: derived_point.into(), - }))) - } - None => { - drop(data); - self.complete_initialization(derived_point)?; - self.disseminate_keys()?; - self.verify_keys()?; - self.complete_generation()?; - - self.data.lock().state = SessionState::Finished; - self.completed.notify_all(); - - Ok(()) - } - } - } - - /// Process single message. - pub fn process_message( - &self, - sender: &NodeId, - message: &GenerationMessage, - ) -> Result<(), Error> { - if self.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &GenerationMessage::InitializeSession(ref message) => { - self.on_initialize_session(sender.clone(), message) - } - &GenerationMessage::ConfirmInitialization(ref message) => { - self.on_confirm_initialization(sender.clone(), message) - } - &GenerationMessage::CompleteInitialization(ref message) => { - self.on_complete_initialization(sender.clone(), message) - } - &GenerationMessage::KeysDissemination(ref message) => { - self.on_keys_dissemination(sender.clone(), message) - } - &GenerationMessage::PublicKeyShare(ref message) => { - self.on_public_key_share(sender.clone(), message) - } - &GenerationMessage::SessionError(ref message) => { - self.on_session_error(sender, message.error.clone()); - Ok(()) - } - &GenerationMessage::SessionCompleted(ref message) => { - self.on_session_completed(sender.clone(), message) - } - } - } - - /// When session initialization message is received. - pub fn on_initialize_session( - &self, - sender: NodeId, - message: &InitializeSession, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - // check message - let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); - check_threshold(message.threshold, &nodes_ids)?; - check_cluster_nodes(self.node(), &nodes_ids)?; - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitialization { - return Err(Error::InvalidStateForRequest); - } - - // update derived point with random scalar - let mut derived_point = message.derived_point.clone().into(); - math::update_random_point(&mut derived_point)?; - - // send confirmation back to master node - self.cluster.send( - &sender, - Message::Generation(GenerationMessage::ConfirmInitialization( - ConfirmInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - derived_point: derived_point.into(), - }, - )), - )?; - - // update state - data.master = Some(sender); - data.author = Some(message.author.clone().into()); - data.state = SessionState::WaitingForInitializationComplete; - data.nodes = message - .nodes - .iter() - .map(|(id, number)| { - ( - id.clone().into(), - NodeData::with_id_number(number.clone().into()), - ) - }) - .collect(); - data.origin = message.origin.clone().map(Into::into); - data.is_zero = Some(message.is_zero); - data.threshold = Some(message.threshold); - - Ok(()) - } - - /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization( - &self, - sender: NodeId, - message: &ConfirmInitialization, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); - - // check state && select new node to be initialized - let next_receiver = match data.state { - SessionState::WaitingForInitializationConfirm(ref mut visit_policy) => { - if !visit_policy.mark_visited(&sender) { - return Err(Error::InvalidStateForRequest); - } - - visit_policy.next_node() - } - _ => return Err(Error::InvalidStateForRequest), - }; - - // proceed message - if let Some(next_receiver) = next_receiver { - return self.cluster.send(&next_receiver, Message::Generation(GenerationMessage::InitializeSession(InitializeSession { - session: self.id.clone().into(), - session_nonce: self.nonce, - origin: data.origin.clone().map(Into::into), - author: data.author.as_ref().expect("author is filled on initialization step; confrm initialization follows initialization; qed").clone().into(), - nodes: data.nodes.iter().map(|(k, v)| (k.clone().into(), v.id_number.clone().into())).collect(), - is_zero: data.is_zero.expect("is_zero is filled in initialization phase; KD phase follows initialization phase; qed"), - threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: message.derived_point.clone().into(), - }))); - } - - // now it is time for keys dissemination (KD) phase - drop(data); - self.complete_initialization(message.derived_point.clone().into())?; - self.disseminate_keys() - } - - /// When session initialization completion message is received. - pub fn on_complete_initialization( - &self, - sender: NodeId, - message: &CompleteInitialization, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForInitializationComplete { - return Err(Error::InvalidStateForRequest); - } - if data.master != Some(sender) { - return Err(Error::InvalidMessage); - } - - // remember passed data - data.derived_point = Some(message.derived_point.clone().into()); - - // now it is time for keys dissemination (KD) phase - drop(data); - self.disseminate_keys() - } - - /// When keys dissemination message is received. - pub fn on_keys_dissemination( - &self, - sender: NodeId, - message: &KeysDissemination, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - - // simulate failure, if required - if data.simulate_faulty_behaviour { - return Err(Error::Internal("simulated error".into())); - } - - // check state - if data.state != SessionState::WaitingForKeysDissemination { - match data.state { - SessionState::WaitingForInitializationComplete - | SessionState::WaitingForInitializationConfirm(_) => { - return Err(Error::TooEarlyForRequest) - } - _ => return Err(Error::InvalidStateForRequest), - } - } - debug_assert!(data.nodes.contains_key(&sender)); - - // check message - let is_zero = data.is_zero.expect( - "is_zero is filled in initialization phase; KD phase follows initialization phase; qed", - ); - let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); - if !is_zero && message.publics.len() != threshold + 1 { - return Err(Error::InvalidMessage); - } - - // update node data - { - let node_data = data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.secret1.is_some() - || node_data.secret2.is_some() - || node_data.publics.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - node_data.secret1 = Some(message.secret1.clone().into()); - node_data.secret2 = Some(message.secret2.clone().into()); - node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); - } - - // check if we have received keys from every other node - if data.nodes.iter().any(|(node_id, node_data)| { - node_id != self.node() - && (node_data.publics.is_none() - || node_data.secret1.is_none() - || node_data.secret2.is_none()) - }) { - return Ok(()); - } - - drop(data); - self.verify_keys() - } - - /// When public key share is received. - pub fn on_public_key_share( - &self, - sender: NodeId, - message: &PublicKeyShare, - ) -> Result<(), Error> { - let mut data = self.data.lock(); - - // check state - if data.state != SessionState::WaitingForPublicKeyShare { - match data.state { - SessionState::WaitingForInitializationComplete - | SessionState::WaitingForKeysDissemination => { - return Err(Error::TooEarlyForRequest) - } - _ => return Err(Error::InvalidStateForRequest), - } - } - - // update node data with received public share - { - let node_data = &mut data.nodes.get_mut(&sender).ok_or(Error::InvalidMessage)?; - if node_data.public_share.is_some() { - return Err(Error::InvalidMessage); - } - - node_data.public_share = Some(message.public_share.clone().into()); - } - - // if there's also nodes, which has not sent us their public shares - do nothing - if data - .nodes - .iter() - .any(|(node_id, node_data)| node_id != self.node() && node_data.public_share.is_none()) - { - return Ok(()); - } - - drop(data); - self.complete_generation() - } - - /// When session completion message is received. - pub fn on_session_completed( - &self, - sender: NodeId, - message: &SessionCompleted, - ) -> Result<(), Error> { - debug_assert!(self.id == *message.session); - debug_assert!(&sender != self.node()); - - let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); - - // check state - if data.state != SessionState::WaitingForGenerationConfirmation { - match data.state { - SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - } - - // if we are not masters, save result and respond with confirmation - if data.master.as_ref() != Some(self.node()) { - // check that we have received message from master - if data.master.as_ref() != Some(&sender) { - return Err(Error::InvalidMessage); - } - - // calculate joint public key - let is_zero = data.is_zero.expect("is_zero is filled in initialization phase; KG phase follows initialization phase; qed"); - let joint_public = if !is_zero { - let public_shares = data.nodes.values().map(|n| { - n.public_share - .as_ref() - .expect("keys received on KD phase; KG phase follows KD phase; qed") - }); - math::compute_joint_public(public_shares)? - } else { - Default::default() - }; - - // save encrypted data to key storage - let encrypted_data = DocumentKeyShare { - author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), - threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), - public: joint_public, - common_point: None, - encrypted_point: None, - versions: vec![DocumentKeyShareVersion::new( - data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), - data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), - )], - }; - - if let Some(ref key_storage) = self.key_storage { - key_storage.insert(self.id.clone(), encrypted_data.clone())?; - } - - // then respond with confirmation - data.state = SessionState::Finished; - return self.cluster.send( - &sender, - Message::Generation(GenerationMessage::SessionCompleted(SessionCompleted { - session: self.id.clone().into(), - session_nonce: self.nonce, - })), - ); - } - - // remember that we have received confirmation from sender node - { - let sender_node = data - .nodes - .get_mut(&sender) - .expect("node is always qualified by himself; qed"); - if sender_node.completion_confirmed { - return Err(Error::InvalidMessage); - } - - sender_node.completion_confirmed = true; - } - - // check if we have received confirmations from all cluster nodes - if data - .nodes - .iter() - .any(|(_, node_data)| !node_data.completion_confirmed) - { - return Ok(()); - } - - // we have received enough confirmations => complete session - data.state = SessionState::Finished; - self.completed.notify_all(); - - Ok(()) - } - - /// Complete initialization (when all other nodex has responded with confirmation) - fn complete_initialization(&self, mut derived_point: Public) -> Result<(), Error> { - // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut derived_point)?; - - // remember derived point - let mut data = self.data.lock(); - data.derived_point = Some(derived_point.clone().into()); - - // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::Generation( - GenerationMessage::CompleteInitialization(CompleteInitialization { - session: self.id.clone().into(), - session_nonce: self.nonce, - derived_point: derived_point.into(), - }), - )) - } - - /// Keys dissemination (KD) phase - fn disseminate_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // pick 2t + 2 random numbers as polynomial coefficients for 2 polynoms - let threshold = data.threshold.expect("threshold is filled on initialization phase; KD phase follows initialization phase; qed"); - let is_zero = data.is_zero.expect( - "is_zero is filled on initialization phase; KD phase follows initialization phase; qed", - ); - let mut polynom1 = math::generate_random_polynom(threshold)?; - if is_zero { - polynom1[0] = math::zero_scalar(); - } - let polynom2 = math::generate_random_polynom(threshold)?; - data.polynom1 = Some(polynom1.clone()); - data.secret_coeff = Some(polynom1[0].clone()); - - // compute t+1 public values - let publics = match is_zero { - false => math::public_values_generation( - threshold, - data.derived_point - .as_ref() - .expect("keys dissemination occurs after derived point is agreed; qed"), - &polynom1, - &polynom2, - )?, - true => Default::default(), - }; - - // compute secret values for every other node - for (node, node_data) in data.nodes.iter_mut() { - let secret1 = math::compute_polynom(&polynom1, &node_data.id_number)?; - let secret2 = math::compute_polynom(&polynom2, &node_data.id_number)?; - - // send a message containing secret1 && secret2 to other node - if node != self.node() { - self.cluster.send( - &node, - Message::Generation(GenerationMessage::KeysDissemination(KeysDissemination { - session: self.id.clone().into(), - session_nonce: self.nonce, - secret1: secret1.into(), - secret2: secret2.into(), - publics: publics.iter().cloned().map(Into::into).collect(), - })), - )?; - } else { - node_data.secret1 = Some(secret1); - node_data.secret2 = Some(secret2); - node_data.publics = Some(publics.clone()); - } - } - - // update state - data.state = SessionState::WaitingForKeysDissemination; - - Ok(()) - } - - /// Keys verification (KV) phase - fn verify_keys(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // key verification (KV) phase: check that other nodes have passed correct secrets - let threshold = data.threshold.expect("threshold is filled in initialization phase; KV phase follows initialization phase; qed"); - let is_zero = data.is_zero.expect( - "is_zero is filled in initialization phase; KV phase follows initialization phase; qed", - ); - let self_public_share = { - if !is_zero { - let derived_point = data.derived_point.clone().expect("derived point generated on initialization phase; KV phase follows initialization phase; qed"); - let number_id = data.nodes[self.node()].id_number.clone(); - for (_, node_data) in data - .nodes - .iter_mut() - .filter(|&(node_id, _)| node_id != self.node()) - { - let secret1 = node_data - .secret1 - .as_ref() - .expect("keys received on KD phase; KV phase follows KD phase; qed"); - let secret2 = node_data - .secret2 - .as_ref() - .expect("keys received on KD phase; KV phase follows KD phase; qed"); - let publics = node_data - .publics - .as_ref() - .expect("keys received on KD phase; KV phase follows KD phase; qed"); - let is_key_verification_ok = math::keys_verification( - threshold, - &derived_point, - &number_id, - secret1, - secret2, - publics, - )?; - - if !is_key_verification_ok { - // node has sent us incorrect values. In original ECDKG protocol we should have sent complaint here. - return Err(Error::InvalidMessage); - } - } - - // calculate public share - let self_public_share = { - let self_secret_coeff = data.secret_coeff.as_ref().expect( - "secret_coeff is generated on KD phase; KG phase follows KD phase; qed", - ); - math::compute_public_share(self_secret_coeff)? - }; - - self_public_share - } else { - // TODO [Trust]: add verification when available - Default::default() - } - }; - - // calculate self secret + public shares - let self_secret_share = { - let secret_values_iter = data.nodes.values().map(|n| { - n.secret1 - .as_ref() - .expect("keys received on KD phase; KG phase follows KD phase; qed") - }); - math::compute_secret_share(secret_values_iter)? - }; - - // update state - data.state = SessionState::WaitingForPublicKeyShare; - data.secret_share = Some(self_secret_share); - let self_node = data - .nodes - .get_mut(self.node()) - .expect("node is always qualified by himself; qed"); - self_node.public_share = Some(self_public_share.clone()); - - // broadcast self public key share - self.cluster - .broadcast(Message::Generation(GenerationMessage::PublicKeyShare( - PublicKeyShare { - session: self.id.clone().into(), - session_nonce: self.nonce, - public_share: self_public_share.into(), - }, - ))) - } - - /// Complete generation - fn complete_generation(&self) -> Result<(), Error> { - let mut data = self.data.lock(); - - // calculate joint public key - let is_zero = data.is_zero.expect( - "is_zero is filled in initialization phase; KG phase follows initialization phase; qed", - ); - let joint_public = if !is_zero { - let public_shares = data.nodes.values().map(|n| { - n.public_share - .as_ref() - .expect("keys received on KD phase; KG phase follows KD phase; qed") - }); - math::compute_joint_public(public_shares)? - } else { - Default::default() - }; - - // prepare key data - let secret_share = data - .secret_share - .as_ref() - .expect("secret_share is filled in KG phase; we are at the end of KG phase; qed") - .clone(); - let encrypted_data = DocumentKeyShare { - author: data.author.as_ref().expect("author is filled in initialization phase; KG phase follows initialization phase; qed").clone(), - threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), - public: joint_public.clone(), - common_point: None, - encrypted_point: None, - versions: vec![DocumentKeyShareVersion::new( - data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), - secret_share.clone(), - )], - }; - - // if we are at the slave node - wait for session completion - let secret_coeff = data.secret_coeff.as_ref().expect("secret coeff is selected on initialization phase; current phase follows initialization; qed").clone(); - if data.master.as_ref() != Some(self.node()) { - data.key_share = Some(Ok(encrypted_data)); - data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); - data.state = SessionState::WaitingForGenerationConfirmation; - return Ok(()); - } - - // then save encrypted data to the key storage - if let Some(ref key_storage) = self.key_storage { - key_storage.insert(self.id.clone(), encrypted_data.clone())?; - } - - // then distribute encrypted data to every other node - self.cluster - .broadcast(Message::Generation(GenerationMessage::SessionCompleted( - SessionCompleted { - session: self.id.clone().into(), - session_nonce: self.nonce, - }, - )))?; - - // then wait for confirmation from all other nodes - { - let self_node = data - .nodes - .get_mut(self.node()) - .expect("node is always qualified by himself; qed"); - self_node.completion_confirmed = true; - } - data.key_share = Some(Ok(encrypted_data)); - data.joint_public_and_secret = Some(Ok((joint_public, secret_coeff, secret_share))); - data.state = SessionState::WaitingForGenerationConfirmation; - - Ok(()) - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionId; - - fn type_name() -> &'static str { - "generation" - } - - fn id(&self) -> SessionId { - self.id.clone() - } - - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.state == SessionState::Failed || data.state == SessionState::Finished - } - - fn on_node_timeout(&self, node: &NodeId) { - let mut data = self.data.lock(); - - // all nodes are required for generation session - // => fail without check - warn!( - "{}: generation session failed because {} connection has timeouted", - self.node(), - node - ); - - data.state = SessionState::Failed; - data.key_share = Some(Err(Error::NodeDisconnected)); - data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - fn on_session_timeout(&self) { - let mut data = self.data.lock(); - - warn!("{}: generation session failed with timeout", self.node()); - - data.state = SessionState::Failed; - data.key_share = Some(Err(Error::NodeDisconnected)); - data.joint_public_and_secret = Some(Err(Error::NodeDisconnected)); - self.completed.notify_all(); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - // error in generation session is considered fatal - // => broadcast error if error occured on this node - if *node == self.self_node_id { - // do not bother processing send error, as we already processing error - let _ = self - .cluster - .broadcast(Message::Generation(GenerationMessage::SessionError( - SessionError { - session: self.id.clone().into(), - session_nonce: self.nonce, - error: error.clone().into(), - }, - ))); - } - - let mut data = self.data.lock(); - data.state = SessionState::Failed; - data.key_share = Some(Err(error.clone())); - data.joint_public_and_secret = Some(Err(error)); - self.completed.notify_all(); - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::Generation(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl EveryOtherNodeVisitor { - pub fn new(self_id: &NodeId, nodes: I) -> Self - where - I: Iterator, - { - EveryOtherNodeVisitor { - visited: BTreeSet::new(), - unvisited: nodes.filter(|n| n != self_id).collect(), - in_progress: BTreeSet::new(), - } - } - - pub fn next_node(&mut self) -> Option { - let next_node = self.unvisited.pop_front(); - if let Some(ref next_node) = next_node { - self.in_progress.insert(next_node.clone()); - } - next_node - } - - pub fn mark_visited(&mut self, node: &NodeId) -> bool { - if !self.in_progress.remove(node) { - return false; - } - self.visited.insert(node.clone()) - } -} - -impl NodeData { - fn with_id_number(node_id_number: Secret) -> Self { - NodeData { - id_number: node_id_number, - secret1: None, - secret2: None, - publics: None, - public_share: None, - completion_confirmed: false, - } - } -} - -impl Debug for SessionImpl { - fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { - write!(f, "Generation session {} on {}", self.id, self.self_node_id) - } -} - -fn check_cluster_nodes(self_node_id: &NodeId, nodes: &BTreeSet) -> Result<(), Error> { - assert!(nodes.contains(self_node_id)); - Ok(()) -} - -fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), Error> { - // at least threshold + 1 nodes are required to collectively decrypt message - if threshold >= nodes.len() { - return Err(Error::NotEnoughNodesForThreshold); - } - - Ok(()) -} - -#[cfg(test)] -pub mod tests { - use ethereum_types::H256; - use ethkey::{Generator, KeyPair, Random, Secret}; - use key_server_cluster::{ - cluster::tests::{make_clusters_and_preserve_sessions, MessageLoop as ClusterMessageLoop}, - cluster_sessions::ClusterSession, - generation_session::{SessionImpl, SessionState}, - math, - math::tests::do_encryption_and_decryption, - message::{ - self, ConfirmInitialization, GenerationMessage, KeysDissemination, Message, - PublicKeyShare, - }, - Error, KeyStorage, NodeId, - }; - use std::sync::Arc; - - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); - - impl MessageLoop { - pub fn new(num_nodes: usize) -> Self { - MessageLoop(make_clusters_and_preserve_sessions(num_nodes)) - } - - pub fn init(self, threshold: usize) -> Result { - self.0 - .cluster(0) - .client() - .new_generation_session(Default::default(), None, Default::default(), threshold) - .map(|_| self) - } - - pub fn session_at(&self, idx: usize) -> Arc { - self.0.sessions(idx).generation_sessions.first().unwrap() - } - - pub fn session_of(&self, node: &NodeId) -> Arc { - self.0 - .sessions_of(node) - .generation_sessions - .first() - .unwrap() - } - - pub fn take_message_confirm_initialization( - &self, - ) -> (NodeId, NodeId, ConfirmInitialization) { - match self.0.take_message() { - Some(( - from, - to, - Message::Generation(GenerationMessage::ConfirmInitialization(msg)), - )) => (from, to, msg), - _ => panic!("unexpected"), - } - } - - pub fn take_message_keys_dissemination(&self) -> (NodeId, NodeId, KeysDissemination) { - match self.0.take_message() { - Some(( - from, - to, - Message::Generation(GenerationMessage::KeysDissemination(msg)), - )) => (from, to, msg), - _ => panic!("unexpected"), - } - } - - pub fn take_message_public_key_share(&self) -> (NodeId, NodeId, PublicKeyShare) { - match self.0.take_message() { - Some((from, to, Message::Generation(GenerationMessage::PublicKeyShare(msg)))) => { - (from, to, msg) - } - _ => panic!("unexpected"), - } - } - - pub fn nodes_id_numbers(&self) -> Vec { - let session = self.session_at(0); - let session_data = session.data.lock(); - session_data - .nodes - .values() - .map(|n| n.id_number.clone()) - .collect() - } - - pub fn nodes_secret_shares(&self) -> Vec { - (0..self.0.nodes().len()) - .map(|i| { - let session = self.session_at(i); - let session_data = session.data.lock(); - session_data.secret_share.as_ref().unwrap().clone() - }) - .collect() - } - - pub fn compute_key_pair(&self) -> KeyPair { - let t = self - .0 - .key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .threshold; - let secret_shares = self.nodes_secret_shares(); - let id_numbers = self.nodes_id_numbers(); - let secret_shares = secret_shares.iter().take(t + 1).collect::>(); - let id_numbers = id_numbers.iter().take(t + 1).collect::>(); - let joint_secret = - math::compute_joint_secret_from_shares(t, &secret_shares, &id_numbers).unwrap(); - - KeyPair::from_secret(joint_secret).unwrap() - } - - pub fn key_version(&self) -> H256 { - self.0 - .key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .last() - .unwrap() - .hash - } - } - - #[test] - fn initializes_in_cluster_of_single_node() { - MessageLoop::new(1).init(0).unwrap(); - } - - #[test] - fn fails_to_initialize_if_threshold_is_wrong() { - assert_eq!( - MessageLoop::new(2).init(2).unwrap_err(), - Error::NotEnoughNodesForThreshold - ); - } - - #[test] - fn fails_to_initialize_when_already_initialized() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert_eq!( - ml.session_at(0).initialize( - Default::default(), - Default::default(), - false, - 0, - ml.0.nodes().into() - ), - Err(Error::InvalidStateForRequest), - ); - } - - #[test] - fn fails_to_accept_initialization_when_already_initialized() { - let ml = MessageLoop::new(2).init(0).unwrap(); - let (from, to, msg) = ml.0.take_message().unwrap(); - ml.0.process_message(from, to, msg.clone()); - assert_eq!( - ml.session_of(&to).on_message(&from, &msg), - Err(Error::InvalidStateForRequest), - ); - } - - #[test] - fn slave_updates_derived_point_on_initialization() { - let ml = MessageLoop::new(2).init(0).unwrap(); - let original_point = match ml.0.take_message().unwrap() { - (from, to, Message::Generation(GenerationMessage::InitializeSession(msg))) => { - let original_point = msg.derived_point.clone(); - let msg = Message::Generation(GenerationMessage::InitializeSession(msg)); - ml.0.process_message(from, to, msg); - original_point - } - _ => panic!("unexpected"), - }; - - match ml.0.take_message().unwrap() { - (_, _, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { - assert!(original_point != msg.derived_point) - } - _ => panic!("unexpected"), - } - } - - #[test] - fn fails_to_accept_initialization_confirmation_if_already_accepted_from_the_same_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); - - let (from, to, msg) = ml.take_message_confirm_initialization(); - ml.0.process_message( - from, - to, - Message::Generation(GenerationMessage::ConfirmInitialization(msg.clone())), - ); - assert_eq!( - ml.session_of(&to).on_confirm_initialization(from, &msg), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn fails_to_accept_initialization_confirmation_if_initialization_already_completed() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - assert_eq!( - ml.session_at(0).on_confirm_initialization( - ml.0.node(1), - &message::ConfirmInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn master_updates_derived_point_on_initialization_completion() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - let original_point = match ml.0.take_message().unwrap() { - (from, to, Message::Generation(GenerationMessage::ConfirmInitialization(msg))) => { - let original_point = msg.derived_point.clone(); - let msg = Message::Generation(GenerationMessage::ConfirmInitialization(msg)); - ml.session_of(&to).on_message(&from, &msg).unwrap(); - original_point - } - _ => panic!("unexpected"), - }; - - assert!(ml.session_at(0).derived_point().unwrap() != original_point.into()); - } - - #[test] - fn fails_to_complete_initialization_if_not_waiting_for_it() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - assert_eq!( - ml.session_at(0).on_complete_initialization( - ml.0.node(1), - &message::CompleteInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn fails_to_complete_initialization_from_non_master_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - ml.0.take_and_process_message(); - assert_eq!( - ml.session_at(1).on_complete_initialization( - ml.0.node(2), - &message::CompleteInitialization { - session: Default::default(), - session_nonce: 0, - derived_point: math::generate_random_point().unwrap().into(), - } - ), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert_eq!( - ml.session_at(0).on_keys_dissemination( - ml.0.node(1), - &message::KeysDissemination { - session: Default::default(), - session_nonce: 0, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - } - ), - Err(Error::TooEarlyForRequest) - ); - } - - #[test] - fn fails_to_accept_keys_dissemination_if_wrong_number_of_publics_passed() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - - let (from, to, mut msg) = ml.take_message_keys_dissemination(); - msg.publics.clear(); - assert_eq!( - ml.session_of(&to).on_keys_dissemination(from, &msg), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn fails_to_accept_keys_dissemination_second_time_from_the_same_node() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - - let (from, to, msg) = ml.take_message_keys_dissemination(); - ml.0.process_message( - from, - to, - Message::Generation(GenerationMessage::KeysDissemination(msg.clone())), - ); - assert_eq!( - ml.session_of(&to).on_keys_dissemination(from, &msg), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { - let ml = MessageLoop::new(3).init(1).unwrap(); - assert_eq!( - ml.session_at(0).on_public_key_share( - ml.0.node(1), - &message::PublicKeyShare { - session: Default::default(), - session_nonce: 0, - public_share: math::generate_random_point().unwrap().into(), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn should_not_accept_public_key_share_when_receiving_twice() { - let ml = MessageLoop::new(3).init(0).unwrap(); - ml.0.take_and_process_message(); // m -> s1: InitializeSession - ml.0.take_and_process_message(); // m -> s2: InitializeSession - ml.0.take_and_process_message(); // s1 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // s2 -> m: ConfirmInitialization - ml.0.take_and_process_message(); // m -> s1: CompleteInitialization - ml.0.take_and_process_message(); // m -> s2: CompleteInitialization - ml.0.take_and_process_message(); // m -> s1: KeysDissemination - ml.0.take_and_process_message(); // m -> s2: KeysDissemination - ml.0.take_and_process_message(); // s1 -> m: KeysDissemination - ml.0.take_and_process_message(); // s1 -> s2: KeysDissemination - ml.0.take_and_process_message(); // s2 -> m: KeysDissemination - ml.0.take_and_process_message(); // s2 -> s1: KeysDissemination - - let (from, to, msg) = ml.take_message_public_key_share(); - ml.0.process_message( - from, - to, - Message::Generation(GenerationMessage::PublicKeyShare(msg.clone())), - ); - assert_eq!( - ml.session_of(&to).on_public_key_share(from, &msg), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn encryption_fails_on_session_timeout() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert!(ml.session_at(0).joint_public_and_secret().is_none()); - ml.session_at(0).on_session_timeout(); - assert_eq!( - ml.session_at(0).joint_public_and_secret().unwrap(), - Err(Error::NodeDisconnected) - ); - } - - #[test] - fn encryption_fails_on_node_timeout() { - let ml = MessageLoop::new(2).init(0).unwrap(); - assert!(ml.session_at(0).joint_public_and_secret().is_none()); - ml.session_at(0).on_node_timeout(&ml.0.node(1)); - assert_eq!( - ml.session_at(0).joint_public_and_secret().unwrap(), - Err(Error::NodeDisconnected) - ); - } - - #[test] - fn complete_enc_dec_session() { - let test_cases = [(0, 5), (2, 5), (3, 5)]; - for &(threshold, num_nodes) in &test_cases { - let ml = MessageLoop::new(num_nodes).init(threshold).unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); - - // check that all nodes has finished joint public generation - let joint_public_key = ml - .session_at(0) - .joint_public_and_secret() - .unwrap() - .unwrap() - .0; - for i in 0..num_nodes { - let session = ml.session_at(i); - assert_eq!(session.state(), SessionState::Finished); - assert_eq!( - session.joint_public_and_secret().map(|p| p.map(|p| p.0)), - Some(Ok(joint_public_key)) - ); - } - - // now let's encrypt some secret (which is a point on EC) - let document_secret_plain = Random.generate().unwrap().public().clone(); - let all_nodes_id_numbers = ml.nodes_id_numbers(); - let all_nodes_secret_shares = ml.nodes_secret_shares(); - let document_secret_decrypted = do_encryption_and_decryption( - threshold, - &joint_public_key, - &all_nodes_id_numbers, - &all_nodes_secret_shares, - None, - document_secret_plain.clone(), - ) - .0; - assert_eq!(document_secret_plain, document_secret_decrypted); - } - } - - #[test] - fn generation_message_fails_when_nonce_is_wrong() { - let ml = MessageLoop::new(2).init(0).unwrap(); - ml.0.take_and_process_message(); - - let msg = message::GenerationMessage::KeysDissemination(message::KeysDissemination { - session: Default::default(), - session_nonce: 10, - secret1: math::generate_random_scalar().unwrap().into(), - secret2: math::generate_random_scalar().unwrap().into(), - publics: vec![math::generate_random_point().unwrap().into()], - }); - assert_eq!( - ml.session_at(1) - .process_message(&ml.0.node(0), &msg) - .unwrap_err(), - Error::ReplayProtection - ); - } -} diff --git a/secret-store/src/key_server_cluster/client_sessions/mod.rs b/secret-store/src/key_server_cluster/client_sessions/mod.rs deleted file mode 100644 index 4ac68dcc1..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -pub mod decryption_session; -pub mod encryption_session; -pub mod generation_session; -pub mod signing_session_ecdsa; -pub mod signing_session_schnorr; diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs deleted file mode 100644 index 1c066a461..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_ecdsa.rs +++ /dev/null @@ -1,1626 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::H256; -use ethkey::{sign, Public, Secret, Signature}; -use key_server_cluster::{ - cluster::Cluster, - cluster_sessions::{ClusterSession, SessionIdWithSubSession}, - generation_session::{ - SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, - SessionState as GenerationSessionState, - }, - jobs::{ - consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, - job_session::JobTransport, - key_access_job::KeyAccessJob, - signing_job_ecdsa::{ - EcdsaPartialSigningRequest, EcdsaPartialSigningResponse, EcdsaSigningJob, - }, - }, - math, - message::{ - ConfirmConsensusInitialization, ConsensusMessage, EcdsaInversionNonceGenerationMessage, - EcdsaInversionZeroGenerationMessage, EcdsaPartialSignature, EcdsaRequestPartialSignature, - EcdsaSignatureNonceGenerationMessage, EcdsaSigningConsensusMessage, - EcdsaSigningInversedNonceCoeffShare, EcdsaSigningMessage, EcdsaSigningSessionCompleted, - EcdsaSigningSessionDelegation, EcdsaSigningSessionDelegationCompleted, - EcdsaSigningSessionError, GenerationMessage, InitializeConsensusSession, Message, - }, - AclStorage, DocumentKeyShare, Error, NodeId, Requester, SessionId, SessionMeta, -}; -use parking_lot::{Condvar, Mutex}; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet}, - sync::Arc, -}; - -/// Distributed ECDSA-signing session. -/// Based on "A robust threshold elliptic curve digital signature providing a new verifiable secret sharing scheme" paper. -/// WARNING: can only be used if 2*t < N is true for key generation scheme -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, -} - -/// Immutable session data. -struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Signing session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Signing consensus session type. -type SigningConsensusSession = - ConsensusSession; - -/// Mutable session data. -struct SessionData { - /// Session state. - pub state: SessionState, - /// Message hash. - pub message_hash: Option, - /// Key version to use for decryption. - pub version: Option, - /// Consensus-based signing session. - pub consensus_session: SigningConsensusSession, - /// Signature nonce generation session. - pub sig_nonce_generation_session: Option, - /// Inversion nonce generation session. - pub inv_nonce_generation_session: Option, - /// Inversion zero generation session. - pub inv_zero_generation_session: Option, - /// Inversed nonce coefficient shares. - pub inversed_nonce_coeff_shares: Option>, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, -} - -/// Signing session state. -#[derive(Debug, PartialEq)] -pub enum SessionState { - /// Consensus is establishing. - ConsensusEstablishing, - /// Nonces (signature, inversion && zero) are generating. - NoncesGenerating, - /// Waiting for inversed nonce shares. - WaitingForInversedNonceShares, - /// State when signature is computing. - SignatureComputing, -} - -/// Session creation parameters -pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, -} - -/// Signing consensus transport. -struct SigningConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, -} - -/// Signing key generation transport. -struct NonceGenerationTransport< - F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, -> { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, - /// Other nodes ids. - other_nodes_ids: BTreeSet, - /// Message mapping function. - map: F, -} - -/// Signing job transport -struct SigningJobTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, -} - -/// Session delegation status. -enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), -} - -impl SessionImpl { - /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!( - params.meta.threshold, - params - .key_share - .as_ref() - .map(|ks| ks.threshold) - .unwrap_or_default() - ); - - let consensus_transport = SigningConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - // this session requires responses from 2 * t nodes - meta: SessionMeta { - id: params.meta.id, - master_node_id: params.meta.master_node_id, - self_node_id: params.meta.self_node_id, - threshold: params.meta.threshold * 2, - configured_nodes_count: params.meta.configured_nodes_count, - connected_nodes_count: params.meta.connected_nodes_count, - }, - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master( - params.meta.id.clone(), - params.acl_storage.clone(), - requester, - ), - None => { - KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) - } - }, - consensus_transport: consensus_transport, - })?; - - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - message_hash: None, - version: None, - consensus_session: consensus_session, - sig_nonce_generation_session: None, - inv_nonce_generation_session: None, - inv_zero_generation_session: None, - inversed_nonce_coeff_shares: None, - delegation_status: None, - result: None, - }), - }) - } - - /// Wait for session completion. - pub fn wait(&self) -> Result { - Self::wait_session(&self.core.completed, &self.data, None, |data| { - data.result.clone() - }) - .expect("wait_session returns Some if called without timeout; qed") - } - - /// Delegate session to other node. - pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(false); - self.core.cluster.send(&master, Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionDelegation(EcdsaSigningSessionDelegation { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - requester: data.consensus_session.consensus_job().executor().requester() - .expect("requester is passed to master node on creation; session can be delegated from master node only; qed") - .clone().into(), - version: version.into(), - message_hash: message_hash.into(), - })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) - } - - /// Initialize signing session on master node. - pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; - - // select nodes to participate in consensus etablish session - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version - .id_numbers - .keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = - data.delegation_status.as_ref() - { - consensus_nodes.remove(&delegation_master); - } - - // start consensus establish sesssion - data.consensus_session - .consensus_job_mut() - .transport_mut() - .version = Some(version.clone()); - data.version = Some(version.clone()); - data.message_hash = Some(message_hash); - data.consensus_session.initialize(consensus_nodes)?; - - // consensus established => threshold is 0 => we can generate signature on this node - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - data.result = Some(sign(&key_version.secret_share, &message_hash).map_err(Into::into)); - self.core.completed.notify_all(); - } - - Ok(()) - } - - /// Process signing message. - pub fn process_message( - &self, - sender: &NodeId, - message: &EcdsaSigningMessage, - ) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &EcdsaSigningMessage::EcdsaSigningConsensusMessage(ref message) => { - self.on_consensus_message(sender, message) - } - &EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage(ref message) => { - self.on_signature_nonce_generation_message(sender, message) - } - &EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage(ref message) => { - self.on_inversion_nonce_generation_message(sender, message) - } - &EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage(ref message) => { - self.on_inversion_zero_generation_message(sender, message) - } - &EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare(ref message) => { - self.on_inversed_nonce_coeff_share(sender, message) - } - &EcdsaSigningMessage::EcdsaRequestPartialSignature(ref message) => { - self.on_partial_signature_requested(sender, message) - } - &EcdsaSigningMessage::EcdsaPartialSignature(ref message) => { - self.on_partial_signature(sender, message) - } - &EcdsaSigningMessage::EcdsaSigningSessionError(ref message) => { - self.process_node_error(Some(&sender), message.error.clone()) - } - &EcdsaSigningMessage::EcdsaSigningSessionCompleted(ref message) => { - self.on_session_completed(sender, message) - } - &EcdsaSigningMessage::EcdsaSigningSessionDelegation(ref message) => { - self.on_session_delegated(sender, message) - } - &EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted(ref message) => { - self.on_session_delegation_completed(sender, message) - } - } - } - - /// When session is delegated to this node. - pub fn on_session_delegated( - &self, - sender: &NodeId, - message: &EcdsaSigningSessionDelegation, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom( - sender.clone(), - message.session_nonce, - )); - } - - self.initialize( - message.version.clone().into(), - message.message_hash.clone().into(), - ) - } - - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed( - &self, - sender: &NodeId, - message: &EcdsaSigningSessionDelegationCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } - - Self::set_signing_result(&self.core, &mut *data, Ok(message.signature.clone().into())); - - Ok(()) - } - - /// When consensus-related message is received. - pub fn on_consensus_message( - &self, - sender: &NodeId, - message: &EcdsaSigningConsensusMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - let is_establishing_consensus = - data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self - .core - .key_share - .as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(has_key_share); - data.version = Some(version); - } - data.consensus_session - .on_consensus_message(&sender, &message.message)?; - - let is_consensus_established = - data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id - || !is_establishing_consensus - || !is_consensus_established - { - return Ok(()); - } - - let key_share = - self.core.key_share.as_ref().expect( - "this is master node; master node is selected so that it has key version; qed", - ); - let key_version = key_share.version(data.version.as_ref().expect( - "this is master node; master node is selected so that it has key version; qed", - ))?; - - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - let consensus_group_map: BTreeMap<_, _> = consensus_group - .iter() - .map(|n| (n.clone(), key_version.id_numbers[n].clone())) - .collect(); - - // start generation of signature nonce - let sig_nonce_generation_session = Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - |s, k, n, m| { - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( - EcdsaSignatureNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - ); - sig_nonce_generation_session.initialize( - Default::default(), - Default::default(), - false, - key_share.threshold, - consensus_group_map.clone().into(), - )?; - data.sig_nonce_generation_session = Some(sig_nonce_generation_session); - - // start generation of inversed nonce computation session - let inv_nonce_generation_session = Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - move |s, k, n, m| { - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( - EcdsaInversionNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - ); - inv_nonce_generation_session.initialize( - Default::default(), - Default::default(), - false, - key_share.threshold, - consensus_group_map.clone().into(), - )?; - data.inv_nonce_generation_session = Some(inv_nonce_generation_session); - - // start generation of zero-secret shares for inversed nonce computation session - let inv_zero_generation_session = Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - move |s, k, n, m| { - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( - EcdsaInversionZeroGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - ); - inv_zero_generation_session.initialize( - Default::default(), - Default::default(), - true, - key_share.threshold * 2, - consensus_group_map.clone().into(), - )?; - data.inv_zero_generation_session = Some(inv_zero_generation_session); - - data.state = SessionState::NoncesGenerating; - - Ok(()) - } - - /// When signature nonce generation message is received. - pub fn on_signature_nonce_generation_message( - &self, - sender: &NodeId, - message: &EcdsaSignatureNonceGenerationMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = - message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.sig_nonce_generation_session = Some(Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - |s, k, n, m| { - EcdsaSigningMessage::EcdsaSignatureNonceGenerationMessage( - EcdsaSignatureNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - )); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data - .sig_nonce_generation_session - .as_ref() - .ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversion nonce generation message is received. - pub fn on_inversion_nonce_generation_message( - &self, - sender: &NodeId, - message: &EcdsaInversionNonceGenerationMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = - message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.inv_nonce_generation_session = Some(Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - |s, k, n, m| { - EcdsaSigningMessage::EcdsaInversionNonceGenerationMessage( - EcdsaInversionNonceGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - )); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data - .inv_nonce_generation_session - .as_ref() - .ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversion zero generation message is received. - pub fn on_inversion_zero_generation_message( - &self, - sender: &NodeId, - message: &EcdsaInversionZeroGenerationMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = - message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - data.inv_zero_generation_session = Some(Self::start_generation_session( - &self.core, - &other_consensus_group_nodes, - |s, k, n, m| { - EcdsaSigningMessage::EcdsaInversionZeroGenerationMessage( - EcdsaInversionZeroGenerationMessage { - session: s.into(), - sub_session: k.into(), - session_nonce: n, - message: m, - }, - ) - }, - )); - - data.state = SessionState::NoncesGenerating; - } - - { - let generation_session = data - .inv_zero_generation_session - .as_ref() - .ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - if !Self::check_nonces_generated(&*data) { - return Ok(()); - } - - Self::send_inversed_nonce_coeff_share(&self.core, &mut *data)?; - data.state = if self.core.meta.master_node_id != self.core.meta.self_node_id { - SessionState::SignatureComputing - } else { - SessionState::WaitingForInversedNonceShares - }; - - Ok(()) - } - - /// When inversed nonce share is received. - pub fn on_inversed_nonce_coeff_share( - &self, - sender: &NodeId, - message: &EcdsaSigningInversedNonceCoeffShare, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if self.core.meta.self_node_id != self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - match data.state { - SessionState::WaitingForInversedNonceShares => (), - SessionState::NoncesGenerating => return Err(Error::TooEarlyForRequest), - _ => return Err(Error::InvalidStateForRequest), - } - - let inversed_nonce_coeff = { - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - { - let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_mut() - .expect("we are in WaitingForInversedNonceShares state; inversed_nonce_coeff_shares are filled before this state; qed"); - match inversed_nonce_coeff_shares.entry(sender.clone()) { - Entry::Occupied(_) => return Err(Error::InvalidStateForRequest), - Entry::Vacant(entry) => { - entry.insert(message.inversed_nonce_coeff_share.clone().into()); - } - } - - if consensus_group - .iter() - .any(|n| !inversed_nonce_coeff_shares.contains_key(n)) - { - return Ok(()); - } - } - - Self::compute_inversed_nonce_coeff(&self.core, &*data)? - }; - - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash - .expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed"); - - let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; - let sig_nonce_public = data - .sig_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .0; - let inv_nonce_share = data - .inv_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .2; - - self.core.disseminate_jobs( - &mut data.consensus_session, - &version, - sig_nonce_public, - inv_nonce_share, - inversed_nonce_coeff, - message_hash, - ) - } - - /// When partial signature is requested. - pub fn on_partial_signature_requested( - &self, - sender: &NodeId, - message: &EcdsaRequestPartialSignature, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let mut data = self.data.lock(); - - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if data.state != SessionState::SignatureComputing { - return Err(Error::InvalidStateForRequest); - } - - let nonce_exists_proof = "nonce is generated before signature is computed; we are in SignatureComputing state; qed"; - let sig_nonce_public = data - .sig_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .0; - let inv_nonce_share = data - .inv_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .2; - - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let key_version = key_share.version(&version)?.hash.clone(); - - let signing_job = EcdsaSigningJob::new_on_slave( - key_share.clone(), - key_version, - sig_nonce_public, - inv_nonce_share, - )?; - let signing_transport = self.core.signing_transport(); - - data.consensus_session - .on_job_request( - sender, - EcdsaPartialSigningRequest { - id: message.request_id.clone().into(), - inversed_nonce_coeff: message.inversed_nonce_coeff.clone().into(), - message_hash: message.message_hash.clone().into(), - }, - signing_job, - signing_transport, - ) - .map(|_| ()) - } - - /// When partial signature is received. - pub fn on_partial_signature( - &self, - sender: &NodeId, - message: &EcdsaPartialSignature, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - data.consensus_session.on_job_response( - sender, - EcdsaPartialSigningResponse { - request_id: message.request_id.clone().into(), - partial_signature_s: message.partial_signature_s.clone().into(), - }, - )?; - - if data.consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } - - // send compeltion signal to all nodes, except for rejected nodes - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send( - &node, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionCompleted( - EcdsaSigningSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - }, - )), - )?; - } - - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); - - Ok(()) - } - - /// When session is completed. - pub fn on_session_completed( - &self, - sender: &NodeId, - message: &EcdsaSigningSessionCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - self.data - .lock() - .consensus_session - .on_session_completed(sender) - } - - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node - .map(|n| n == &self.core.meta.self_node_id) - .unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } - - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => Ok(()), - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - - let message_hash = data.message_hash.as_ref().cloned() - .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed"); - - let nonce_exists_proof = "on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when nonces generation has completed; qed"; - let sig_nonce_public = data - .sig_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .0; - let inv_nonce_share = data - .inv_nonce_generation_session - .as_ref() - .expect(nonce_exists_proof) - .joint_public_and_secret() - .expect(nonce_exists_proof)? - .2; - - let inversed_nonce_coeff = Self::compute_inversed_nonce_coeff(&self.core, &*data)?; - - let disseminate_result = self.core.disseminate_jobs( - &mut data.consensus_session, - &version, - sig_nonce_public, - inv_nonce_share, - inversed_nonce_coeff, - message_hash, - ); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!( - "{}: ECDSA signing session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - Err(err) => { - warn!( - "{}: ECDSA signing session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - - /// Start generation session. - fn start_generation_session( - core: &SessionCore, - other_consensus_group_nodes: &BTreeSet, - map_message: F, - ) -> GenerationSession - where - F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage - + Send - + Sync - + 'static, - { - GenerationSession::new(GenerationSessionParams { - id: core.meta.id.clone(), - self_node_id: core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(NonceGenerationTransport { - id: core.meta.id.clone(), - access_key: core.access_key.clone(), - nonce: core.nonce, - cluster: core.cluster.clone(), - other_nodes_ids: other_consensus_group_nodes.clone(), - map: map_message, - }), - nonce: None, - }) - } - - /// Set signing session result. - fn set_signing_result( - core: &SessionCore, - data: &mut SessionData, - result: Result, - ) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() - { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(signature) => core.cluster.send( - &master, - Message::EcdsaSigning( - EcdsaSigningMessage::EcdsaSigningSessionDelegationCompleted( - EcdsaSigningSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - signature: signature.clone().into(), - }, - ), - ), - ), - Err(error) => core.cluster.send( - &master, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError( - EcdsaSigningSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }, - )), - ), - }; - } - - data.result = Some(result); - core.completed.notify_all(); - } - - /// Check if all nonces are generated. - fn check_nonces_generated(data: &SessionData) -> bool { - let expect_proof = - "check_nonces_generated is called when som nonce-gen session is completed; - all nonce-gen sessions are created at once; qed"; - let sig_nonce_generation_session = data - .sig_nonce_generation_session - .as_ref() - .expect(expect_proof); - let inv_nonce_generation_session = data - .inv_nonce_generation_session - .as_ref() - .expect(expect_proof); - let inv_zero_generation_session = data - .inv_zero_generation_session - .as_ref() - .expect(expect_proof); - - sig_nonce_generation_session.state() == GenerationSessionState::Finished - && inv_nonce_generation_session.state() == GenerationSessionState::Finished - && inv_zero_generation_session.state() == GenerationSessionState::Finished - } - - /// Broadcast inversed nonce share. - fn send_inversed_nonce_coeff_share( - core: &SessionCore, - data: &mut SessionData, - ) -> Result<(), Error> { - let proof = "inversed nonce coeff share is sent after nonces generation is completed; qed"; - - let sig_nonce_generation_session = data.sig_nonce_generation_session.as_ref().expect(proof); - let sig_nonce = sig_nonce_generation_session - .joint_public_and_secret() - .expect(proof) - .expect(proof) - .2; - - let inv_nonce_generation_session = data.inv_nonce_generation_session.as_ref().expect(proof); - let inv_nonce = inv_nonce_generation_session - .joint_public_and_secret() - .expect(proof) - .expect(proof) - .2; - - let inv_zero_generation_session = data.inv_zero_generation_session.as_ref().expect(proof); - let inv_zero = inv_zero_generation_session - .joint_public_and_secret() - .expect(proof) - .expect(proof) - .2; - - let inversed_nonce_coeff_share = - math::compute_ecdsa_inversed_secret_coeff_share(&sig_nonce, &inv_nonce, &inv_zero)?; - if core.meta.self_node_id == core.meta.master_node_id { - let mut inversed_nonce_coeff_shares = BTreeMap::new(); - inversed_nonce_coeff_shares - .insert(core.meta.self_node_id.clone(), inversed_nonce_coeff_share); - data.inversed_nonce_coeff_shares = Some(inversed_nonce_coeff_shares); - Ok(()) - } else { - core.cluster.send( - &core.meta.master_node_id, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningInversedNonceCoeffShare( - EcdsaSigningInversedNonceCoeffShare { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: core.nonce, - inversed_nonce_coeff_share: inversed_nonce_coeff_share.into(), - }, - )), - ) - } - } - - /// Compute inversed nonce coefficient on master node. - fn compute_inversed_nonce_coeff( - core: &SessionCore, - data: &SessionData, - ) -> Result { - let proof = - "inversed nonce coeff is computed on master node; key version exists on master node"; - let key_share = core.key_share.as_ref().expect(proof); - let key_version = key_share - .version(data.version.as_ref().expect(proof)) - .expect(proof); - - let proof = "inversed nonce coeff is computed after all shares are received; qed"; - let inversed_nonce_coeff_shares = data.inversed_nonce_coeff_shares.as_ref().expect(proof); - - math::compute_ecdsa_inversed_secret_coeff_from_shares( - key_share.threshold, - &inversed_nonce_coeff_shares - .keys() - .map(|n| key_version.id_numbers[n].clone()) - .collect::>(), - &inversed_nonce_coeff_shares - .values() - .cloned() - .collect::>(), - ) - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; - - fn type_name() -> &'static str { - "ecdsa_signing" - } - - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } - - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } - - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } - - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningSessionError( - EcdsaSigningSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }, - )); - - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core - .cluster - .send(&self.core.meta.master_node_id, message) - }; - } - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::EcdsaSigning(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl NonceGenerationTransport -where - F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, -{ - fn map_message(&self, message: Message) -> Result { - match message { - Message::Generation(message) => Ok(Message::EcdsaSigning((self.map)( - self.id.clone(), - self.access_key.clone(), - self.nonce, - message, - ))), - _ => Err(Error::InvalidMessage), - } - } -} - -impl Cluster for NonceGenerationTransport -where - F: Fn(SessionId, Secret, u64, GenerationMessage) -> EcdsaSigningMessage + Send + Sync, -{ - fn broadcast(&self, message: Message) -> Result<(), Error> { - let message = self.map_message(message)?; - for to in &self.other_nodes_ids { - self.cluster.send(to, message.clone())?; - } - Ok(()) - } - - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(self.other_nodes_ids.contains(to)); - self.cluster.send(to, self.map_message(message)?) - } - - fn is_connected(&self, node: &NodeId) -> bool { - self.cluster.is_connected(node) - } - - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } - - fn configured_nodes_count(&self) -> usize { - self.cluster.configured_nodes_count() - } - - fn connected_nodes_count(&self) -> usize { - self.cluster.connected_nodes_count() - } -} - -impl SessionCore { - pub fn signing_transport(&self) -> SigningJobTransport { - SigningJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - cluster: self.cluster.clone(), - } - } - - pub fn disseminate_jobs( - &self, - consensus_session: &mut SigningConsensusSession, - version: &H256, - nonce_public: Public, - inv_nonce_share: Secret, - inversed_nonce_coeff: Secret, - message_hash: H256, - ) -> Result<(), Error> { - let key_share = match self.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let key_version = key_share.version(version)?.hash.clone(); - let signing_job = EcdsaSigningJob::new_on_master( - key_share.clone(), - key_version, - nonce_public, - inv_nonce_share, - inversed_nonce_coeff, - message_hash, - )?; - consensus_session - .disseminate_jobs(signing_job, self.signing_transport(), false) - .map(|_| ()) - } -} - -impl JobTransport for SigningConsensusTransport { - type PartialJobRequest = Requester; - type PartialJobResponse = bool; - - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() - .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send( - node, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( - EcdsaSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::InitializeConsensusSession( - InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }, - ), - }, - )), - ) - } - - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send( - node, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaSigningConsensusMessage( - EcdsaSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { - is_confirmed: response, - }, - ), - }, - )), - ) - } -} - -impl JobTransport for SigningJobTransport { - type PartialJobRequest = EcdsaPartialSigningRequest; - type PartialJobResponse = EcdsaPartialSigningResponse; - - fn send_partial_request( - &self, - node: &NodeId, - request: EcdsaPartialSigningRequest, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaRequestPartialSignature( - EcdsaRequestPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - inversed_nonce_coeff: request.inversed_nonce_coeff.into(), - message_hash: request.message_hash.into(), - }, - )), - ) - } - - fn send_partial_response( - &self, - node: &NodeId, - response: EcdsaPartialSigningResponse, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::EcdsaSigning(EcdsaSigningMessage::EcdsaPartialSignature( - EcdsaPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - partial_signature_s: response.partial_signature_s.into(), - }, - )), - ) - } -} - -#[cfg(test)] -mod tests { - use ethereum_types::H256; - use ethkey::{self, public_to_address, verify_public, Generator, Public, Random}; - use key_server_cluster::{ - cluster::tests::MessageLoop as ClusterMessageLoop, - generation_session::tests::MessageLoop as GenerationMessageLoop, - signing_session_ecdsa::SessionImpl, Error, KeyStorage, SessionId, - }; - use std::sync::Arc; - - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); - - impl MessageLoop { - pub fn new(num_nodes: usize, threshold: usize) -> Result { - let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; - ml.0.loop_until(|| ml.0.is_empty()); // complete generation session - - Ok(MessageLoop(ml.0)) - } - - pub fn init_with_version( - self, - key_version: Option, - ) -> Result<(Self, Public, H256), Error> { - let message_hash = H256::random(); - let requester = Random.generate().unwrap(); - let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); - self.0 - .cluster(0) - .client() - .new_ecdsa_signing_session( - Default::default(), - signature.into(), - key_version, - message_hash, - ) - .map(|_| (self, *requester.public(), message_hash)) - } - - pub fn init(self) -> Result<(Self, Public, H256), Error> { - let key_version = self - .0 - .key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .last() - .unwrap() - .hash; - self.init_with_version(Some(key_version)) - } - - pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(None) - } - - pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { - self.0.isolate(1); - self.init() - } - - pub fn session_at(&self, idx: usize) -> Arc { - self.0.sessions(idx).ecdsa_signing_sessions.first().unwrap() - } - - pub fn ensure_completed(&self) { - self.0.loop_until(|| self.0.is_empty()); - assert!(self.session_at(0).wait().is_ok()); - } - } - - #[test] - fn failed_gen_ecdsa_sign_session_when_threshold_is_too_low() { - let test_cases = [(1, 2), (2, 4), (3, 6), (4, 6)]; - for &(threshold, num_nodes) in &test_cases { - assert_eq!( - MessageLoop::new(num_nodes, threshold) - .unwrap() - .init() - .unwrap_err(), - Error::ConsensusUnreachable - ); - } - } - - #[test] - fn complete_gen_ecdsa_sign_session() { - let test_cases = [(0, 1), (2, 5), (2, 6), (3, 11), (4, 11)]; - for &(threshold, num_nodes) in &test_cases { - let (ml, _, message) = MessageLoop::new(num_nodes, threshold) - .unwrap() - .init() - .unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); - - let signer_public = - ml.0.key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .public; - let signature = ml.session_at(0).wait().unwrap(); - assert!(verify_public(&signer_public, &signature, &message).unwrap()); - } - } - - #[test] - fn ecdsa_complete_signing_session_with_single_node_failing() { - let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); - - // we need at least 3-of-4 nodes to agree to reach consensus - // let's say 1 of 4 nodes disagee - ml.0.acl_storage(1) - .prohibit(public_to_address(&requester), Default::default()); - - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } - - #[test] - fn ecdsa_complete_signing_session_with_acl_check_failed_on_master() { - let (ml, requester, _) = MessageLoop::new(4, 1).unwrap().init().unwrap(); - - // we need at least 3-of-4 nodes to agree to reach consensus - // let's say 1 of 4 nodes (here: master) disagee - ml.0.acl_storage(0) - .prohibit(public_to_address(&requester), Default::default()); - - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } - - #[test] - fn ecdsa_signing_works_when_delegated_to_other_node() { - MessageLoop::new(4, 1) - .unwrap() - .init_delegated() - .unwrap() - .0 - .ensure_completed(); - } - - #[test] - fn ecdsa_signing_works_when_share_owners_are_isolated() { - MessageLoop::new(6, 2) - .unwrap() - .init_with_isolated() - .unwrap() - .0 - .ensure_completed(); - } -} diff --git a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs b/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs deleted file mode 100644 index e307240d9..000000000 --- a/secret-store/src/key_server_cluster/client_sessions/signing_session_schnorr.rs +++ /dev/null @@ -1,1478 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::H256; -use ethkey::{Public, Secret}; -use key_server_cluster::{ - cluster::Cluster, - cluster_sessions::{ClusterSession, SessionIdWithSubSession}, - generation_session::{ - SessionImpl as GenerationSession, SessionParams as GenerationSessionParams, - SessionState as GenerationSessionState, - }, - jobs::{ - consensus_session::{ConsensusSession, ConsensusSessionParams, ConsensusSessionState}, - job_session::JobTransport, - key_access_job::KeyAccessJob, - signing_job_schnorr::{ - SchnorrPartialSigningRequest, SchnorrPartialSigningResponse, SchnorrSigningJob, - }, - }, - message::{ - ConfirmConsensusInitialization, ConsensusMessage, GenerationMessage, - InitializeConsensusSession, Message, SchnorrPartialSignature, - SchnorrRequestPartialSignature, SchnorrSigningConsensusMessage, - SchnorrSigningGenerationMessage, SchnorrSigningMessage, SchnorrSigningSessionCompleted, - SchnorrSigningSessionDelegation, SchnorrSigningSessionDelegationCompleted, - SchnorrSigningSessionError, - }, - AclStorage, DocumentKeyShare, Error, NodeId, Requester, SessionId, SessionMeta, -}; -use parking_lot::{Condvar, Mutex}; -use std::{collections::BTreeSet, sync::Arc}; - -/// Distributed Schnorr-signing session. -/// Based on "Efficient Multi-Party Digital Signature using Adaptive Secret Sharing for Low-Power Devices in Wireless Network" paper. -/// Brief overview: -/// 1) initialization: master node (which has received request for signing the message) requests all other nodes to sign the message -/// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the private key -/// 3) partial signing: every node which has succussfully checked access for the requestor do a partial signing -/// 4) signing: master node receives all partial signatures of the secret and computes the signature -pub struct SessionImpl { - /// Session core. - core: SessionCore, - /// Session data. - data: Mutex, -} - -/// Immutable session data. -struct SessionCore { - /// Session metadata. - pub meta: SessionMeta, - /// Signing session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// Cluster which allows this node to send messages to other nodes in the cluster. - pub cluster: Arc, - /// Session-level nonce. - pub nonce: u64, - /// SessionImpl completion condvar. - pub completed: Condvar, -} - -/// Signing consensus session type. -type SigningConsensusSession = ConsensusSession< - KeyAccessJob, - SigningConsensusTransport, - SchnorrSigningJob, - SigningJobTransport, ->; - -/// Mutable session data. -struct SessionData { - /// Session state. - pub state: SessionState, - /// Message hash. - pub message_hash: Option, - /// Key version to use for decryption. - pub version: Option, - /// Consensus-based signing session. - pub consensus_session: SigningConsensusSession, - /// Session key generation session. - pub generation_session: Option, - /// Delegation status. - pub delegation_status: Option, - /// Decryption result. - pub result: Option>, -} - -/// Signing session state. -#[derive(Debug, PartialEq)] -#[cfg_attr(test, derive(Clone, Copy))] -pub enum SessionState { - /// State when consensus is establishing. - ConsensusEstablishing, - /// State when session key is generating. - SessionKeyGeneration, - /// State when signature is computing. - SignatureComputing, -} - -/// Session creation parameters -pub struct SessionParams { - /// Session metadata. - pub meta: SessionMeta, - /// Session access key. - pub access_key: Secret, - /// Key share. - pub key_share: Option, - /// ACL storage. - pub acl_storage: Arc, - /// Cluster - pub cluster: Arc, - /// Session nonce. - pub nonce: u64, -} - -/// Signing consensus transport. -struct SigningConsensusTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Selected key version (on master node). - version: Option, - /// Cluster. - cluster: Arc, -} - -/// Signing key generation transport. -struct SessionKeyGenerationTransport { - /// Session access key. - access_key: Secret, - /// Cluster. - cluster: Arc, - /// Session-level nonce. - nonce: u64, - /// Other nodes ids. - other_nodes_ids: BTreeSet, -} - -/// Signing job transport -struct SigningJobTransport { - /// Session id. - id: SessionId, - /// Session access key. - access_key: Secret, - /// Session-level nonce. - nonce: u64, - /// Cluster. - cluster: Arc, -} - -/// Session delegation status. -enum DelegationStatus { - /// Delegated to other node. - DelegatedTo(NodeId), - /// Delegated from other node. - DelegatedFrom(NodeId, u64), -} - -impl SessionImpl { - /// Create new signing session. - pub fn new(params: SessionParams, requester: Option) -> Result { - debug_assert_eq!( - params.meta.threshold, - params - .key_share - .as_ref() - .map(|ks| ks.threshold) - .unwrap_or_default() - ); - - let consensus_transport = SigningConsensusTransport { - id: params.meta.id.clone(), - access_key: params.access_key.clone(), - nonce: params.nonce, - version: None, - cluster: params.cluster.clone(), - }; - let consensus_session = ConsensusSession::new(ConsensusSessionParams { - meta: params.meta.clone(), - consensus_executor: match requester { - Some(requester) => KeyAccessJob::new_on_master( - params.meta.id.clone(), - params.acl_storage.clone(), - requester, - ), - None => { - KeyAccessJob::new_on_slave(params.meta.id.clone(), params.acl_storage.clone()) - } - }, - consensus_transport: consensus_transport, - })?; - - Ok(SessionImpl { - core: SessionCore { - meta: params.meta, - access_key: params.access_key, - key_share: params.key_share, - cluster: params.cluster, - nonce: params.nonce, - completed: Condvar::new(), - }, - data: Mutex::new(SessionData { - state: SessionState::ConsensusEstablishing, - message_hash: None, - version: None, - consensus_session: consensus_session, - generation_session: None, - delegation_status: None, - result: None, - }), - }) - } - - /// Get session state. - #[cfg(test)] - pub fn state(&self) -> SessionState { - self.data.lock().state - } - - /// Wait for session completion. - pub fn wait(&self) -> Result<(Secret, Secret), Error> { - Self::wait_session(&self.core.completed, &self.data, None, |data| { - data.result.clone() - }) - .expect("wait_session returns Some if called without timeout; qed") - } - - /// Delegate session to other node. - pub fn delegate(&self, master: NodeId, version: H256, message_hash: H256) -> Result<(), Error> { - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(false); - self.core.cluster.send(&master, Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionDelegation(SchnorrSigningSessionDelegation { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - requester: data.consensus_session.consensus_job().executor().requester() - .expect("requester is passed to master node on creation; session can be delegated from master node only; qed") - .clone().into(), - version: version.into(), - message_hash: message_hash.into(), - })))?; - data.delegation_status = Some(DelegationStatus::DelegatedTo(master)); - Ok(()) - } - - /// Initialize signing session on master node. - pub fn initialize(&self, version: H256, message_hash: H256) -> Result<(), Error> { - debug_assert_eq!(self.core.meta.self_node_id, self.core.meta.master_node_id); - - // check if version exists - let key_version = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share.version(&version)?, - }; - - let mut data = self.data.lock(); - let non_isolated_nodes = self.core.cluster.nodes(); - let mut consensus_nodes: BTreeSet<_> = key_version - .id_numbers - .keys() - .filter(|n| non_isolated_nodes.contains(*n)) - .cloned() - .chain(::std::iter::once(self.core.meta.self_node_id.clone())) - .collect(); - if let Some(&DelegationStatus::DelegatedFrom(delegation_master, _)) = - data.delegation_status.as_ref() - { - consensus_nodes.remove(&delegation_master); - } - - data.consensus_session - .consensus_job_mut() - .transport_mut() - .version = Some(version.clone()); - data.version = Some(version.clone()); - data.message_hash = Some(message_hash); - data.consensus_session.initialize(consensus_nodes)?; - - if data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished { - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: BTreeSet::new(), - }), - nonce: None, - }); - generation_session.initialize( - Default::default(), - Default::default(), - false, - 0, - vec![self.core.meta.self_node_id.clone()] - .into_iter() - .collect::>() - .into(), - )?; - - debug_assert_eq!(generation_session.state(), GenerationSessionState::Finished); - let joint_public_and_secret = generation_session - .joint_public_and_secret() - .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - data.generation_session = Some(generation_session); - data.state = SessionState::SignatureComputing; - - self.core.disseminate_jobs( - &mut data.consensus_session, - &version, - joint_public_and_secret.0, - joint_public_and_secret.1, - message_hash, - )?; - - debug_assert!(data.consensus_session.state() == ConsensusSessionState::Finished); - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); - } - - Ok(()) - } - - /// Process signing message. - pub fn process_message( - &self, - sender: &NodeId, - message: &SchnorrSigningMessage, - ) -> Result<(), Error> { - if self.core.nonce != message.session_nonce() { - return Err(Error::ReplayProtection); - } - - match message { - &SchnorrSigningMessage::SchnorrSigningConsensusMessage(ref message) => { - self.on_consensus_message(sender, message) - } - &SchnorrSigningMessage::SchnorrSigningGenerationMessage(ref message) => { - self.on_generation_message(sender, message) - } - &SchnorrSigningMessage::SchnorrRequestPartialSignature(ref message) => { - self.on_partial_signature_requested(sender, message) - } - &SchnorrSigningMessage::SchnorrPartialSignature(ref message) => { - self.on_partial_signature(sender, message) - } - &SchnorrSigningMessage::SchnorrSigningSessionError(ref message) => { - self.process_node_error(Some(&sender), message.error.clone()) - } - &SchnorrSigningMessage::SchnorrSigningSessionCompleted(ref message) => { - self.on_session_completed(sender, message) - } - &SchnorrSigningMessage::SchnorrSigningSessionDelegation(ref message) => { - self.on_session_delegated(sender, message) - } - &SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted(ref message) => { - self.on_session_delegation_completed(sender, message) - } - } - } - - /// When session is delegated to this node. - pub fn on_session_delegated( - &self, - sender: &NodeId, - message: &SchnorrSigningSessionDelegation, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - { - let mut data = self.data.lock(); - if data.consensus_session.state() != ConsensusSessionState::WaitingForInitialization - || data.delegation_status.is_some() - { - return Err(Error::InvalidStateForRequest); - } - - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_requester(message.requester.clone().into()); - data.delegation_status = Some(DelegationStatus::DelegatedFrom( - sender.clone(), - message.session_nonce, - )); - } - - self.initialize( - message.version.clone().into(), - message.message_hash.clone().into(), - ) - } - - /// When delegated session is completed on other node. - pub fn on_session_delegation_completed( - &self, - sender: &NodeId, - message: &SchnorrSigningSessionDelegationCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Err(Error::InvalidStateForRequest); - } - - let mut data = self.data.lock(); - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(ref node)) if node == sender => (), - _ => return Err(Error::InvalidMessage), - } - - Self::set_signing_result( - &self.core, - &mut *data, - Ok(( - message.signature_c.clone().into(), - message.signature_s.clone().into(), - )), - ); - - Ok(()) - } - - /// When consensus-related message is received. - pub fn on_consensus_message( - &self, - sender: &NodeId, - message: &SchnorrSigningConsensusMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - let is_establishing_consensus = - data.consensus_session.state() == ConsensusSessionState::EstablishingConsensus; - - if let &ConsensusMessage::InitializeConsensusSession(ref msg) = &message.message { - let version = msg.version.clone().into(); - let has_key_share = self - .core - .key_share - .as_ref() - .map(|ks| ks.version(&version).is_ok()) - .unwrap_or(false); - data.consensus_session - .consensus_job_mut() - .executor_mut() - .set_has_key_share(has_key_share); - data.version = Some(version); - } - data.consensus_session - .on_consensus_message(&sender, &message.message)?; - - let is_consensus_established = - data.consensus_session.state() == ConsensusSessionState::ConsensusEstablished; - if self.core.meta.self_node_id != self.core.meta.master_node_id - || !is_establishing_consensus - || !is_consensus_established - { - return Ok(()); - } - - let consensus_group = data.consensus_session.select_consensus_group()?.clone(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: other_consensus_group_nodes, - }), - nonce: None, - }); - - generation_session.initialize( - Default::default(), - Default::default(), - false, - key_share.threshold, - consensus_group.into(), - )?; - data.generation_session = Some(generation_session); - data.state = SessionState::SessionKeyGeneration; - - Ok(()) - } - - /// When session key related message is received. - pub fn on_generation_message( - &self, - sender: &NodeId, - message: &SchnorrSigningGenerationMessage, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - - if let &GenerationMessage::InitializeSession(ref message) = &message.message { - if &self.core.meta.master_node_id != sender { - match data.delegation_status.as_ref() { - Some(&DelegationStatus::DelegatedTo(s)) if s == *sender => (), - _ => return Err(Error::InvalidMessage), - } - } - - let consensus_group: BTreeSet = - message.nodes.keys().cloned().map(Into::into).collect(); - let mut other_consensus_group_nodes = consensus_group.clone(); - other_consensus_group_nodes.remove(&self.core.meta.self_node_id); - - let generation_session = GenerationSession::new(GenerationSessionParams { - id: self.core.meta.id.clone(), - self_node_id: self.core.meta.self_node_id.clone(), - key_storage: None, - cluster: Arc::new(SessionKeyGenerationTransport { - access_key: self.core.access_key.clone(), - cluster: self.core.cluster.clone(), - nonce: self.core.nonce, - other_nodes_ids: other_consensus_group_nodes, - }), - nonce: None, - }); - data.generation_session = Some(generation_session); - data.state = SessionState::SessionKeyGeneration; - } - - { - let generation_session = data - .generation_session - .as_ref() - .ok_or(Error::InvalidStateForRequest)?; - let is_key_generating = generation_session.state() != GenerationSessionState::Finished; - generation_session.process_message(sender, &message.message)?; - - let is_key_generated = generation_session.state() == GenerationSessionState::Finished; - if !is_key_generating || !is_key_generated { - return Ok(()); - } - } - - data.state = SessionState::SignatureComputing; - if self.core.meta.master_node_id != self.core.meta.self_node_id { - return Ok(()); - } - - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash - .expect("we are on master node; on master node message_hash is filled in initialize(); on_generation_message follows initialize; qed"); - let joint_public_and_secret = data.generation_session.as_ref() - .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") - .joint_public_and_secret() - .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - self.core.disseminate_jobs( - &mut data.consensus_session, - &version, - joint_public_and_secret.0, - joint_public_and_secret.1, - message_hash, - ) - } - - /// When partial signature is requested. - pub fn on_partial_signature_requested( - &self, - sender: &NodeId, - message: &SchnorrRequestPartialSignature, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let key_share = match self.core.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let mut data = self.data.lock(); - - if sender != &self.core.meta.master_node_id { - return Err(Error::InvalidMessage); - } - if data.state != SessionState::SignatureComputing { - return Err(Error::InvalidStateForRequest); - } - - let joint_public_and_secret = data.generation_session.as_ref() - .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed") - .joint_public_and_secret() - .expect("session key is generated before signature is computed; we are in SignatureComputing state; qed")?; - let key_version = key_share - .version(data.version.as_ref().ok_or(Error::InvalidMessage)?)? - .hash - .clone(); - let signing_job = SchnorrSigningJob::new_on_slave( - self.core.meta.self_node_id.clone(), - key_share.clone(), - key_version, - joint_public_and_secret.0, - joint_public_and_secret.1, - )?; - let signing_transport = self.core.signing_transport(); - - data.consensus_session - .on_job_request( - sender, - SchnorrPartialSigningRequest { - id: message.request_id.clone().into(), - message_hash: message.message_hash.clone().into(), - other_nodes_ids: message.nodes.iter().cloned().map(Into::into).collect(), - }, - signing_job, - signing_transport, - ) - .map(|_| ()) - } - - /// When partial signature is received. - pub fn on_partial_signature( - &self, - sender: &NodeId, - message: &SchnorrPartialSignature, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - let mut data = self.data.lock(); - data.consensus_session.on_job_response( - sender, - SchnorrPartialSigningResponse { - request_id: message.request_id.clone().into(), - partial_signature: message.partial_signature.clone().into(), - }, - )?; - - if data.consensus_session.state() != ConsensusSessionState::Finished { - return Ok(()); - } - - // send compeltion signal to all nodes, except for rejected nodes - for node in data.consensus_session.consensus_non_rejected_nodes() { - self.core.cluster.send( - &node, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionCompleted( - SchnorrSigningSessionCompleted { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - }, - )), - )?; - } - - let result = data.consensus_session.result()?; - Self::set_signing_result(&self.core, &mut *data, Ok(result)); - - Ok(()) - } - - /// When session is completed. - pub fn on_session_completed( - &self, - sender: &NodeId, - message: &SchnorrSigningSessionCompleted, - ) -> Result<(), Error> { - debug_assert!(self.core.meta.id == *message.session); - debug_assert!(self.core.access_key == *message.sub_session); - debug_assert!(sender != &self.core.meta.self_node_id); - - self.data - .lock() - .consensus_session - .on_session_completed(sender) - } - - /// Process error from the other node. - fn process_node_error(&self, node: Option<&NodeId>, error: Error) -> Result<(), Error> { - let mut data = self.data.lock(); - let is_self_node_error = node - .map(|n| n == &self.core.meta.self_node_id) - .unwrap_or(false); - // error is always fatal if coming from this node - if is_self_node_error { - Self::set_signing_result(&self.core, &mut *data, Err(error.clone())); - return Err(error); - } - - match { - match node { - Some(node) => data.consensus_session.on_node_error(node, error.clone()), - None => data.consensus_session.on_session_timeout(), - } - } { - Ok(false) => Ok(()), - Ok(true) => { - let version = data.version.as_ref().ok_or(Error::InvalidMessage)?.clone(); - let message_hash = data.message_hash.as_ref().cloned() - .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed"); - let joint_public_and_secret = data.generation_session.as_ref() - .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed") - .joint_public_and_secret() - .expect("on_node_error returned true; this means that jobs must be REsent; this means that jobs already have been sent; jobs are sent when message_hash.is_some(); qed")?; - let disseminate_result = self.core.disseminate_jobs( - &mut data.consensus_session, - &version, - joint_public_and_secret.0, - joint_public_and_secret.1, - message_hash, - ); - match disseminate_result { - Ok(()) => Ok(()), - Err(err) => { - warn!( - "{}: signing session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - Err(err) => { - warn!( - "{}: signing session failed with error: {:?} from {:?}", - &self.core.meta.self_node_id, error, node - ); - Self::set_signing_result(&self.core, &mut *data, Err(err.clone())); - Err(err) - } - } - } - - /// Set signing session result. - fn set_signing_result( - core: &SessionCore, - data: &mut SessionData, - result: Result<(Secret, Secret), Error>, - ) { - if let Some(DelegationStatus::DelegatedFrom(master, nonce)) = data.delegation_status.take() - { - // error means can't communicate => ignore it - let _ = match result.as_ref() { - Ok(signature) => core.cluster.send( - &master, - Message::SchnorrSigning( - SchnorrSigningMessage::SchnorrSigningSessionDelegationCompleted( - SchnorrSigningSessionDelegationCompleted { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - signature_c: signature.0.clone().into(), - signature_s: signature.1.clone().into(), - }, - ), - ), - ), - Err(error) => core.cluster.send( - &master, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningSessionError( - SchnorrSigningSessionError { - session: core.meta.id.clone().into(), - sub_session: core.access_key.clone().into(), - session_nonce: nonce, - error: error.clone().into(), - }, - )), - ), - }; - } - - data.result = Some(result); - core.completed.notify_all(); - } -} - -impl ClusterSession for SessionImpl { - type Id = SessionIdWithSubSession; - - fn type_name() -> &'static str { - "signing" - } - - fn id(&self) -> SessionIdWithSubSession { - SessionIdWithSubSession::new(self.core.meta.id.clone(), self.core.access_key.clone()) - } - - fn is_finished(&self) -> bool { - let data = self.data.lock(); - data.consensus_session.state() == ConsensusSessionState::Failed - || data.consensus_session.state() == ConsensusSessionState::Finished - || data.result.is_some() - } - - fn on_node_timeout(&self, node: &NodeId) { - // ignore error, only state matters - let _ = self.process_node_error(Some(node), Error::NodeDisconnected); - } - - fn on_session_timeout(&self) { - // ignore error, only state matters - let _ = self.process_node_error(None, Error::NodeDisconnected); - } - - fn on_session_error(&self, node: &NodeId, error: Error) { - let is_fatal = self.process_node_error(Some(node), error.clone()).is_err(); - let is_this_node_error = *node == self.core.meta.self_node_id; - if is_fatal || is_this_node_error { - // error in signing session is non-fatal, if occurs on slave node - // => either respond with error - // => or broadcast error - let message = Message::SchnorrSigning( - SchnorrSigningMessage::SchnorrSigningSessionError(SchnorrSigningSessionError { - session: self.core.meta.id.clone().into(), - sub_session: self.core.access_key.clone().into(), - session_nonce: self.core.nonce, - error: error.clone().into(), - }), - ); - - // do not bother processing send error, as we already processing error - let _ = if self.core.meta.master_node_id == self.core.meta.self_node_id { - self.core.cluster.broadcast(message) - } else { - self.core - .cluster - .send(&self.core.meta.master_node_id, message) - }; - } - } - - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error> { - match *message { - Message::SchnorrSigning(ref message) => self.process_message(sender, message), - _ => unreachable!("cluster checks message to be correct before passing; qed"), - } - } -} - -impl SessionKeyGenerationTransport { - fn map_message(&self, message: Message) -> Result { - match message { - Message::Generation(message) => Ok(Message::SchnorrSigning( - SchnorrSigningMessage::SchnorrSigningGenerationMessage( - SchnorrSigningGenerationMessage { - session: message.session_id().clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: message, - }, - ), - )), - _ => Err(Error::InvalidMessage), - } - } -} - -impl Cluster for SessionKeyGenerationTransport { - fn broadcast(&self, message: Message) -> Result<(), Error> { - let message = self.map_message(message)?; - for to in &self.other_nodes_ids { - self.cluster.send(to, message.clone())?; - } - Ok(()) - } - - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(self.other_nodes_ids.contains(to)); - self.cluster.send(to, self.map_message(message)?) - } - - fn is_connected(&self, node: &NodeId) -> bool { - self.cluster.is_connected(node) - } - - fn nodes(&self) -> BTreeSet { - self.cluster.nodes() - } - - fn configured_nodes_count(&self) -> usize { - self.cluster.configured_nodes_count() - } - - fn connected_nodes_count(&self) -> usize { - self.cluster.connected_nodes_count() - } -} - -impl SessionCore { - pub fn signing_transport(&self) -> SigningJobTransport { - SigningJobTransport { - id: self.meta.id.clone(), - access_key: self.access_key.clone(), - nonce: self.nonce, - cluster: self.cluster.clone(), - } - } - - pub fn disseminate_jobs( - &self, - consensus_session: &mut SigningConsensusSession, - version: &H256, - session_public: Public, - session_secret_share: Secret, - message_hash: H256, - ) -> Result<(), Error> { - let key_share = match self.key_share.as_ref() { - None => return Err(Error::InvalidMessage), - Some(key_share) => key_share, - }; - - let key_version = key_share.version(version)?.hash.clone(); - let signing_job = SchnorrSigningJob::new_on_master( - self.meta.self_node_id.clone(), - key_share.clone(), - key_version, - session_public, - session_secret_share, - message_hash, - )?; - consensus_session - .disseminate_jobs(signing_job, self.signing_transport(), false) - .map(|_| ()) - } -} - -impl JobTransport for SigningConsensusTransport { - type PartialJobRequest = Requester; - type PartialJobResponse = bool; - - fn send_partial_request(&self, node: &NodeId, request: Requester) -> Result<(), Error> { - let version = self.version.as_ref() - .expect("send_partial_request is called on initialized master node only; version is filled in before initialization starts on master node; qed"); - self.cluster.send( - node, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( - SchnorrSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::InitializeConsensusSession( - InitializeConsensusSession { - requester: request.into(), - version: version.clone().into(), - }, - ), - }, - )), - ) - } - - fn send_partial_response(&self, node: &NodeId, response: bool) -> Result<(), Error> { - self.cluster.send( - node, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrSigningConsensusMessage( - SchnorrSigningConsensusMessage { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - message: ConsensusMessage::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { - is_confirmed: response, - }, - ), - }, - )), - ) - } -} - -impl JobTransport for SigningJobTransport { - type PartialJobRequest = SchnorrPartialSigningRequest; - type PartialJobResponse = SchnorrPartialSigningResponse; - - fn send_partial_request( - &self, - node: &NodeId, - request: SchnorrPartialSigningRequest, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrRequestPartialSignature( - SchnorrRequestPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: request.id.into(), - message_hash: request.message_hash.into(), - nodes: request - .other_nodes_ids - .into_iter() - .map(Into::into) - .collect(), - }, - )), - ) - } - - fn send_partial_response( - &self, - node: &NodeId, - response: SchnorrPartialSigningResponse, - ) -> Result<(), Error> { - self.cluster.send( - node, - Message::SchnorrSigning(SchnorrSigningMessage::SchnorrPartialSignature( - SchnorrPartialSignature { - session: self.id.clone().into(), - sub_session: self.access_key.clone().into(), - session_nonce: self.nonce, - request_id: response.request_id.into(), - partial_signature: response.partial_signature.into(), - }, - )), - ) - } -} - -#[cfg(test)] -mod tests { - use acl_storage::DummyAclStorage; - use ethereum_types::{Address, H256}; - use ethkey::{self, public_to_address, Generator, Public, Random, Secret}; - use key_server_cluster::{ - cluster::tests::MessageLoop as ClusterMessageLoop, - generation_session::tests::MessageLoop as GenerationMessageLoop, - math, - message::{ - ConfirmConsensusInitialization, ConfirmInitialization, ConsensusMessage, - GenerationMessage, InitializeSession, SchnorrRequestPartialSignature, - SchnorrSigningConsensusMessage, SchnorrSigningGenerationMessage, SchnorrSigningMessage, - }, - signing_session_schnorr::{SessionImpl, SessionParams, SessionState}, - Error, KeyStorage, Requester, SessionId, SessionMeta, - }; - use std::{collections::BTreeMap, str::FromStr, sync::Arc}; - - #[derive(Debug)] - pub struct MessageLoop(pub ClusterMessageLoop); - - impl MessageLoop { - pub fn new(num_nodes: usize, threshold: usize) -> Result { - let ml = GenerationMessageLoop::new(num_nodes).init(threshold)?; - ml.0.loop_until(|| ml.0.is_empty()); // complete generation session - - Ok(MessageLoop(ml.0)) - } - - pub fn into_session(&self, at_node: usize) -> SessionImpl { - let requester = Some(Requester::Signature( - ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), - )); - SessionImpl::new( - SessionParams { - meta: SessionMeta { - id: SessionId::default(), - self_node_id: self.0.node(at_node), - master_node_id: self.0.node(0), - threshold: self - .0 - .key_storage(at_node) - .get(&Default::default()) - .unwrap() - .unwrap() - .threshold, - configured_nodes_count: self.0.nodes().len(), - connected_nodes_count: self.0.nodes().len(), - }, - access_key: Random.generate().unwrap().secret().clone(), - key_share: self - .0 - .key_storage(at_node) - .get(&Default::default()) - .unwrap(), - acl_storage: Arc::new(DummyAclStorage::default()), - cluster: self.0.cluster(0).view().unwrap(), - nonce: 0, - }, - requester, - ) - .unwrap() - } - - pub fn init_with_version( - self, - key_version: Option, - ) -> Result<(Self, Public, H256), Error> { - let message_hash = H256::random(); - let requester = Random.generate().unwrap(); - let signature = ethkey::sign(requester.secret(), &SessionId::default()).unwrap(); - self.0 - .cluster(0) - .client() - .new_schnorr_signing_session( - Default::default(), - signature.into(), - key_version, - message_hash, - ) - .map(|_| (self, *requester.public(), message_hash)) - } - - pub fn init(self) -> Result<(Self, Public, H256), Error> { - let key_version = self.key_version(); - self.init_with_version(Some(key_version)) - } - - pub fn init_delegated(self) -> Result<(Self, Public, H256), Error> { - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(None) - } - - pub fn init_with_isolated(self) -> Result<(Self, Public, H256), Error> { - self.0.isolate(1); - self.init() - } - - pub fn init_without_share(self) -> Result<(Self, Public, H256), Error> { - let key_version = self.key_version(); - self.0.key_storage(0).remove(&Default::default()).unwrap(); - self.init_with_version(Some(key_version)) - } - - pub fn session_at(&self, idx: usize) -> Arc { - self.0 - .sessions(idx) - .schnorr_signing_sessions - .first() - .unwrap() - } - - pub fn ensure_completed(&self) { - self.0.loop_until(|| self.0.is_empty()); - assert!(self.session_at(0).wait().is_ok()); - } - - pub fn key_version(&self) -> H256 { - self.0 - .key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .versions - .iter() - .last() - .unwrap() - .hash - } - } - - #[test] - fn schnorr_complete_gen_sign_session() { - let test_cases = [(0, 1), (0, 5), (2, 5), (3, 5)]; - for &(threshold, num_nodes) in &test_cases { - let (ml, _, message) = MessageLoop::new(num_nodes, threshold) - .unwrap() - .init() - .unwrap(); - ml.0.loop_until(|| ml.0.is_empty()); - - let signer_public = - ml.0.key_storage(0) - .get(&Default::default()) - .unwrap() - .unwrap() - .public; - let signature = ml.session_at(0).wait().unwrap(); - assert!(math::verify_schnorr_signature(&signer_public, &signature, &message).unwrap()); - } - } - - #[test] - fn schnorr_constructs_in_cluster_of_single_node() { - MessageLoop::new(1, 0).unwrap().init().unwrap(); - } - - #[test] - fn schnorr_fails_to_initialize_if_does_not_have_a_share() { - assert!(MessageLoop::new(2, 1) - .unwrap() - .init_without_share() - .is_err()); - } - - #[test] - fn schnorr_fails_to_initialize_if_threshold_is_wrong() { - let mut ml = MessageLoop::new(3, 2).unwrap(); - ml.0.exclude(2); - assert_eq!(ml.init().unwrap_err(), Error::ConsensusUnreachable); - } - - #[test] - fn schnorr_fails_to_initialize_when_already_initialized() { - let (ml, _, _) = MessageLoop::new(1, 0).unwrap().init().unwrap(); - assert_eq!( - ml.session_at(0).initialize(ml.key_version(), 777.into()), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn schnorr_does_not_fail_when_consensus_message_received_after_consensus_established() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - - // consensus is established - let session = ml.session_at(0); - ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); - - // but 3rd node continues to send its messages - // this should not fail session - let consensus_group = session - .data - .lock() - .consensus_session - .select_consensus_group() - .unwrap() - .clone(); - let mut had_3rd_message = false; - while let Some((from, to, message)) = ml.0.take_message() { - if !consensus_group.contains(&from) { - had_3rd_message = true; - ml.0.process_message(from, to, message); - } - } - assert!(had_3rd_message); - } - - #[test] - fn schnorr_fails_when_consensus_message_is_received_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!( - session.on_consensus_message( - &ml.0.node(1), - &SchnorrSigningConsensusMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: ConsensusMessage::ConfirmConsensusInitialization( - ConfirmConsensusInitialization { is_confirmed: true } - ), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn schnorr_fails_when_generation_message_is_received_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!( - session.on_generation_message( - &ml.0.node(1), - &SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { - session: SessionId::default().into(), - session_nonce: 0, - derived_point: Public::default().into(), - }), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn schnorr_fails_when_generation_sesson_is_initialized_by_slave_node() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - let session = ml.session_at(0); - ml.0.loop_until(|| session.state() == SessionState::SessionKeyGeneration); - - let slave2_id = ml.0.node(2); - let slave1_session = ml.session_at(1); - - assert_eq!( - slave1_session.on_generation_message( - &slave2_id, - &SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - message: GenerationMessage::InitializeSession(InitializeSession { - session: SessionId::default().into(), - session_nonce: 0, - origin: None, - author: Address::default().into(), - nodes: BTreeMap::new(), - is_zero: false, - threshold: 1, - derived_point: Public::default().into(), - }) - } - ), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn schnorr_fails_when_signature_requested_when_not_initialized() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(1); - assert_eq!( - session.on_partial_signature_requested( - &ml.0.node(0), - &SchnorrRequestPartialSignature { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - request_id: Secret::from_str( - "0000000000000000000000000000000000000000000000000000000000000001" - ) - .unwrap() - .into(), - message_hash: H256::default().into(), - nodes: Default::default(), - } - ), - Err(Error::InvalidStateForRequest) - ); - } - - #[test] - fn schnorr_fails_when_signature_requested_by_slave_node() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(0); - assert_eq!( - session.on_partial_signature_requested( - &ml.0.node(1), - &SchnorrRequestPartialSignature { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 0, - request_id: Secret::from_str( - "0000000000000000000000000000000000000000000000000000000000000001" - ) - .unwrap() - .into(), - message_hash: H256::default().into(), - nodes: Default::default(), - } - ), - Err(Error::InvalidMessage) - ); - } - - #[test] - fn schnorr_failed_signing_session() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 2 of 3 nodes disagee - ml.0.acl_storage(1) - .prohibit(public_to_address(&requester), SessionId::default()); - ml.0.acl_storage(2) - .prohibit(public_to_address(&requester), SessionId::default()); - - // then consensus is unreachable - ml.0.loop_until(|| ml.0.is_empty()); - assert_eq!( - ml.session_at(0).wait().unwrap_err(), - Error::ConsensusUnreachable - ); - } - - #[test] - fn schnorr_complete_signing_session_with_single_node_failing() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 1 of 3 nodes disagee - ml.0.acl_storage(1) - .prohibit(public_to_address(&requester), SessionId::default()); - - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } - - #[test] - fn schnorr_complete_signing_session_with_acl_check_failed_on_master() { - let (ml, requester, _) = MessageLoop::new(3, 1).unwrap().init().unwrap(); - - // we need at least 2-of-3 nodes to agree to reach consensus - // let's say 1 of 3 nodes disagee - ml.0.acl_storage(0) - .prohibit(public_to_address(&requester), SessionId::default()); - - // then consensus reachable, but single node will disagree - ml.ensure_completed(); - } - - #[test] - fn schnorr_signing_message_fails_when_nonce_is_wrong() { - let ml = MessageLoop::new(3, 1).unwrap(); - let session = ml.into_session(1); - let msg = SchnorrSigningMessage::SchnorrSigningGenerationMessage( - SchnorrSigningGenerationMessage { - session: SessionId::default().into(), - sub_session: session.core.access_key.clone().into(), - session_nonce: 10, - message: GenerationMessage::ConfirmInitialization(ConfirmInitialization { - session: SessionId::default().into(), - session_nonce: 0, - derived_point: Public::default().into(), - }), - }, - ); - assert_eq!( - session.process_message(&ml.0.node(1), &msg), - Err(Error::ReplayProtection) - ); - } - - #[test] - fn schnorr_signing_works_when_delegated_to_other_node() { - let (ml, _, _) = MessageLoop::new(3, 1).unwrap().init_delegated().unwrap(); - ml.ensure_completed(); - } - - #[test] - fn schnorr_signing_works_when_share_owners_are_isolated() { - let (ml, _, _) = MessageLoop::new(3, 1) - .unwrap() - .init_with_isolated() - .unwrap(); - ml.ensure_completed(); - } -} diff --git a/secret-store/src/key_server_cluster/cluster.rs b/secret-store/src/key_server_cluster/cluster.rs deleted file mode 100644 index 05fa63321..000000000 --- a/secret-store/src/key_server_cluster/cluster.rs +++ /dev/null @@ -1,1805 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::{Address, H256}; -use ethkey::{Generator, Public, Random, Signature}; -use key_server_cluster::{ - cluster_connections::{ConnectionManager, ConnectionProvider}, - cluster_connections_net::{ - NetConnectionsContainer, NetConnectionsManager, NetConnectionsManagerConfig, - }, - cluster_message_processor::{MessageProcessor, SessionsMessageProcessor}, - cluster_sessions::{ - create_cluster_view, AdminSession, AdminSessionCreationData, ClusterSession, - ClusterSessions, ClusterSessionsContainer, ClusterSessionsListener, - SessionIdWithSubSession, SERVERS_SET_CHANGE_SESSION_ID, - }, - cluster_sessions_creator::ClusterSessionCreator, - connection_trigger::{ - ConnectionTrigger, ServersSetChangeSessionCreatorConnector, SimpleConnectionTrigger, - }, - connection_trigger_with_migration::ConnectionTriggerWithMigration, - decryption_session::SessionImpl as DecryptionSession, - encryption_session::SessionImpl as EncryptionSession, - generation_session::SessionImpl as GenerationSession, - key_version_negotiation_session::{ - ContinueAction, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, - SessionImpl as KeyVersionNegotiationSession, - }, - message::Message, - signing_session_ecdsa::SessionImpl as EcdsaSigningSession, - signing_session_schnorr::SessionImpl as SchnorrSigningSession, - AclStorage, Error, KeyServerSet, KeyStorage, NodeId, NodeKeyPair, Requester, SessionId, -}; -use parity_runtime::Executor; -use parking_lot::RwLock; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, -}; - -#[cfg(test)] -use key_server_cluster::cluster_connections::tests::{ - new_test_connections, MessagesQueue, TestConnections, -}; - -/// Cluster interface for external clients. -pub trait ClusterClient: Send + Sync { - /// Start new generation session. - fn new_generation_session( - &self, - session_id: SessionId, - origin: Option
, - author: Address, - threshold: usize, - ) -> Result, Error>; - /// Start new encryption session. - fn new_encryption_session( - &self, - session_id: SessionId, - author: Requester, - common_point: Public, - encrypted_point: Public, - ) -> Result, Error>; - /// Start new decryption session. - fn new_decryption_session( - &self, - session_id: SessionId, - origin: Option
, - requester: Requester, - version: Option, - is_shadow_decryption: bool, - is_broadcast_decryption: bool, - ) -> Result, Error>; - /// Start new Schnorr signing session. - fn new_schnorr_signing_session( - &self, - session_id: SessionId, - requester: Requester, - version: Option, - message_hash: H256, - ) -> Result, Error>; - /// Start new ECDSA session. - fn new_ecdsa_signing_session( - &self, - session_id: SessionId, - requester: Requester, - version: Option, - message_hash: H256, - ) -> Result, Error>; - /// Start new key version negotiation session. - fn new_key_version_negotiation_session( - &self, - session_id: SessionId, - ) -> Result>, Error>; - /// Start new servers set change session. - fn new_servers_set_change_session( - &self, - session_id: Option, - migration_id: Option, - new_nodes_set: BTreeSet, - old_set_signature: Signature, - new_set_signature: Signature, - ) -> Result, Error>; - - /// Listen for new generation sessions. - fn add_generation_listener( - &self, - listener: Arc>, - ); - /// Listen for new decryption sessions. - fn add_decryption_listener( - &self, - listener: Arc>, - ); - /// Listen for new key version negotiation sessions. - fn add_key_version_negotiation_listener( - &self, - listener: Arc< - dyn ClusterSessionsListener< - KeyVersionNegotiationSession, - >, - >, - ); - - /// Ask node to make 'faulty' generation sessions. - #[cfg(test)] - fn make_faulty_generation_sessions(&self); - /// Get active generation session with given id. - #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option>; - #[cfg(test)] - fn is_fully_connected(&self) -> bool; - /// Try connect to disconnected nodes. - #[cfg(test)] - fn connect(&self); -} - -/// Cluster access for single session participant. -pub trait Cluster: Send + Sync { - /// Broadcast message to all other nodes. - fn broadcast(&self, message: Message) -> Result<(), Error>; - /// Send message to given node. - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error>; - /// Is connected to given node? - fn is_connected(&self, node: &NodeId) -> bool; - /// Get a set of connected nodes. - fn nodes(&self) -> BTreeSet; - /// Get total count of configured key server nodes (valid at the time of ClusterView creation). - fn configured_nodes_count(&self) -> usize; - /// Get total count of connected key server nodes (valid at the time of ClusterView creation). - fn connected_nodes_count(&self) -> usize; -} - -/// Cluster initialization parameters. -#[derive(Clone)] -pub struct ClusterConfiguration { - /// KeyPair this node holds. - pub self_key_pair: Arc, - /// Cluster nodes set. - pub key_server_set: Arc, - /// Reference to key storage - pub key_storage: Arc, - /// Reference to ACL storage - pub acl_storage: Arc, - /// Administrator public key. - pub admin_public: Option, - /// Do not remove sessions from container. - pub preserve_sessions: bool, -} - -/// Network cluster implementation. -pub struct ClusterCore { - /// Cluster data. - data: Arc>, -} - -/// Network cluster client interface implementation. -pub struct ClusterClientImpl { - /// Cluster data. - data: Arc>, -} - -/// Network cluster view. It is a communication channel, required in single session. -pub struct ClusterView { - configured_nodes_count: usize, - connected_nodes: BTreeSet, - connections: Arc, - self_key_pair: Arc, -} - -/// Cross-thread shareable cluster data. -pub struct ClusterData { - /// Cluster configuration. - pub config: ClusterConfiguration, - /// KeyPair this node holds. - pub self_key_pair: Arc, - /// Connections data. - pub connections: C, - /// Active sessions data. - pub sessions: Arc, - // Messages processor. - pub message_processor: Arc, - /// Link between servers set chnage session and the connections manager. - pub servers_set_change_creator_connector: Arc, -} - -/// Create new network-backed cluster. -pub fn new_network_cluster( - executor: Executor, - config: ClusterConfiguration, - net_config: NetConnectionsManagerConfig, -) -> Result>, Error> { - let mut nodes = config.key_server_set.snapshot().current_set; - let is_isolated = nodes.remove(config.self_key_pair.public()).is_none(); - let connections_data = Arc::new(RwLock::new(NetConnectionsContainer { - is_isolated, - nodes, - connections: BTreeMap::new(), - })); - - let connection_trigger: Box = - match net_config.auto_migrate_enabled { - false => Box::new(SimpleConnectionTrigger::with_config(&config)), - true if config.admin_public.is_none() => { - Box::new(ConnectionTriggerWithMigration::with_config(&config)) - } - true => return Err(Error::Internal( - "secret store admininstrator public key is specified with auto-migration enabled" - .into(), - )), - }; - - let servers_set_change_creator_connector = - connection_trigger.servers_set_change_creator_connector(); - let sessions = Arc::new(ClusterSessions::new( - &config, - servers_set_change_creator_connector.clone(), - )); - let message_processor = Arc::new(SessionsMessageProcessor::new( - config.self_key_pair.clone(), - servers_set_change_creator_connector.clone(), - sessions.clone(), - connections_data.clone(), - )); - - let connections = NetConnectionsManager::new( - executor, - message_processor.clone(), - connection_trigger, - connections_data, - &config, - net_config, - )?; - connections.start()?; - - ClusterCore::new( - sessions, - message_processor, - connections, - servers_set_change_creator_connector, - config, - ) -} - -/// Create new in-memory backed cluster -#[cfg(test)] -pub fn new_test_cluster( - messages: MessagesQueue, - config: ClusterConfiguration, -) -> Result>>, Error> { - let nodes = config.key_server_set.snapshot().current_set; - let connections = new_test_connections( - messages, - *config.self_key_pair.public(), - nodes.keys().cloned().collect(), - ); - - let connection_trigger = Box::new(SimpleConnectionTrigger::with_config(&config)); - let servers_set_change_creator_connector = - connection_trigger.servers_set_change_creator_connector(); - let mut sessions = ClusterSessions::new(&config, servers_set_change_creator_connector.clone()); - if config.preserve_sessions { - sessions.preserve_sessions(); - } - let sessions = Arc::new(sessions); - - let message_processor = Arc::new(SessionsMessageProcessor::new( - config.self_key_pair.clone(), - servers_set_change_creator_connector.clone(), - sessions.clone(), - connections.provider(), - )); - - ClusterCore::new( - sessions, - message_processor, - connections, - servers_set_change_creator_connector, - config, - ) -} - -impl ClusterCore { - pub fn new( - sessions: Arc, - message_processor: Arc, - connections: C, - servers_set_change_creator_connector: Arc, - config: ClusterConfiguration, - ) -> Result, Error> { - Ok(Arc::new(ClusterCore { - data: Arc::new(ClusterData { - self_key_pair: config.self_key_pair.clone(), - connections, - sessions: sessions.clone(), - config, - message_processor, - servers_set_change_creator_connector, - }), - })) - } - - /// Create new client interface. - pub fn client(&self) -> Arc { - Arc::new(ClusterClientImpl::new(self.data.clone())) - } - - /// Run cluster. - pub fn run(&self) -> Result<(), Error> { - self.data.connections.connect(); - Ok(()) - } - - #[cfg(test)] - pub fn view(&self) -> Result, Error> { - let connections = self.data.connections.provider(); - let mut connected_nodes = connections.connected_nodes()?; - let disconnected_nodes = connections.disconnected_nodes(); - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let connected_nodes_count = connected_nodes.len(); - let disconnected_nodes_count = disconnected_nodes.len(); - Ok(Arc::new(ClusterView::new( - self.data.self_key_pair.clone(), - connections, - connected_nodes, - connected_nodes_count + disconnected_nodes_count, - ))) - } -} - -impl ClusterView { - pub fn new( - self_key_pair: Arc, - connections: Arc, - nodes: BTreeSet, - configured_nodes_count: usize, - ) -> Self { - ClusterView { - configured_nodes_count: configured_nodes_count, - connected_nodes: nodes, - connections, - self_key_pair, - } - } -} - -impl Cluster for ClusterView { - fn broadcast(&self, message: Message) -> Result<(), Error> { - for node in self - .connected_nodes - .iter() - .filter(|n| *n != self.self_key_pair.public()) - { - trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, node); - let connection = self - .connections - .connection(node) - .ok_or(Error::NodeDisconnected)?; - connection.send_message(message.clone()); - } - Ok(()) - } - - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - trace!(target: "secretstore_net", "{}: sent message {} to {}", self.self_key_pair.public(), message, to); - let connection = self - .connections - .connection(to) - .ok_or(Error::NodeDisconnected)?; - connection.send_message(message); - Ok(()) - } - - fn is_connected(&self, node: &NodeId) -> bool { - self.connected_nodes.contains(node) - } - - fn nodes(&self) -> BTreeSet { - self.connected_nodes.clone() - } - - fn configured_nodes_count(&self) -> usize { - self.configured_nodes_count - } - - fn connected_nodes_count(&self) -> usize { - self.connected_nodes.len() - } -} - -impl ClusterClientImpl { - pub fn new(data: Arc>) -> Self { - ClusterClientImpl { data: data } - } - - fn create_key_version_negotiation_session( - &self, - session_id: SessionId, - ) -> Result>, Error> - { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - false, - )?; - let session = self.data.sessions.negotiation_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id.clone(), - None, - false, - None, - )?; - match session.initialize(connected_nodes) { - Ok(()) => Ok(session), - Err(error) => { - self.data - .sessions - .negotiation_sessions - .remove(&session.id()); - Err(error) - } - } - } -} - -impl ClusterClient for ClusterClientImpl { - fn new_generation_session( - &self, - session_id: SessionId, - origin: Option
, - author: Address, - threshold: usize, - ) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - true, - )?; - let session = self.data.sessions.generation_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id, - None, - false, - None, - )?; - process_initialization_result( - session.initialize(origin, author, false, threshold, connected_nodes.into()), - session, - &self.data.sessions.generation_sessions, - ) - } - - fn new_encryption_session( - &self, - session_id: SessionId, - requester: Requester, - common_point: Public, - encrypted_point: Public, - ) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - true, - )?; - let session = self.data.sessions.encryption_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id, - None, - false, - None, - )?; - process_initialization_result( - session.initialize(requester, common_point, encrypted_point), - session, - &self.data.sessions.encryption_sessions, - ) - } - - fn new_decryption_session( - &self, - session_id: SessionId, - origin: Option
, - requester: Requester, - version: Option, - is_shadow_decryption: bool, - is_broadcast_decryption: bool, - ) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - false, - )?; - let session = self.data.sessions.decryption_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id.clone(), - None, - false, - Some(requester), - )?; - - let initialization_result = match version { - Some(version) => session.initialize( - origin, - version, - is_shadow_decryption, - is_broadcast_decryption, - ), - None => self - .create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::Decrypt( - session.clone(), - origin, - is_shadow_decryption, - is_broadcast_decryption, - )); - self.data - .message_processor - .try_continue_session(Some(version_session)); - }), - }; - - process_initialization_result( - initialization_result, - session, - &self.data.sessions.decryption_sessions, - ) - } - - fn new_schnorr_signing_session( - &self, - session_id: SessionId, - requester: Requester, - version: Option, - message_hash: H256, - ) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - false, - )?; - let session = self.data.sessions.schnorr_signing_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id.clone(), - None, - false, - Some(requester), - )?; - - let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), - None => self - .create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::SchnorrSign( - session.clone(), - message_hash, - )); - self.data - .message_processor - .try_continue_session(Some(version_session)); - }), - }; - - process_initialization_result( - initialization_result, - session, - &self.data.sessions.schnorr_signing_sessions, - ) - } - - fn new_ecdsa_signing_session( - &self, - session_id: SessionId, - requester: Requester, - version: Option, - message_hash: H256, - ) -> Result, Error> { - let mut connected_nodes = self.data.connections.provider().connected_nodes()?; - connected_nodes.insert(self.data.self_key_pair.public().clone()); - - let access_key = Random.generate()?.secret().clone(); - let session_id = SessionIdWithSubSession::new(session_id, access_key); - let cluster = create_cluster_view( - self.data.self_key_pair.clone(), - self.data.connections.provider(), - false, - )?; - let session = self.data.sessions.ecdsa_signing_sessions.insert( - cluster, - self.data.self_key_pair.public().clone(), - session_id.clone(), - None, - false, - Some(requester), - )?; - - let initialization_result = match version { - Some(version) => session.initialize(version, message_hash), - None => self - .create_key_version_negotiation_session(session_id.id.clone()) - .map(|version_session| { - version_session.set_continue_action(ContinueAction::EcdsaSign( - session.clone(), - message_hash, - )); - self.data - .message_processor - .try_continue_session(Some(version_session)); - }), - }; - - process_initialization_result( - initialization_result, - session, - &self.data.sessions.ecdsa_signing_sessions, - ) - } - - fn new_key_version_negotiation_session( - &self, - session_id: SessionId, - ) -> Result>, Error> - { - let session = self.create_key_version_negotiation_session(session_id)?; - Ok(session) - } - - fn new_servers_set_change_session( - &self, - session_id: Option, - migration_id: Option, - new_nodes_set: BTreeSet, - old_set_signature: Signature, - new_set_signature: Signature, - ) -> Result, Error> { - new_servers_set_change_session( - self.data.self_key_pair.clone(), - &self.data.sessions, - self.data.connections.provider(), - self.data.servers_set_change_creator_connector.clone(), - ServersSetChangeParams { - session_id, - migration_id, - new_nodes_set, - old_set_signature, - new_set_signature, - }, - ) - } - - fn add_generation_listener( - &self, - listener: Arc>, - ) { - self.data - .sessions - .generation_sessions - .add_listener(listener); - } - - fn add_decryption_listener( - &self, - listener: Arc>, - ) { - self.data - .sessions - .decryption_sessions - .add_listener(listener); - } - - fn add_key_version_negotiation_listener( - &self, - listener: Arc< - dyn ClusterSessionsListener< - KeyVersionNegotiationSession, - >, - >, - ) { - self.data - .sessions - .negotiation_sessions - .add_listener(listener); - } - - #[cfg(test)] - fn make_faulty_generation_sessions(&self) { - self.data.sessions.make_faulty_generation_sessions(); - } - - #[cfg(test)] - fn generation_session(&self, session_id: &SessionId) -> Option> { - self.data - .sessions - .generation_sessions - .get(session_id, false) - } - - #[cfg(test)] - fn is_fully_connected(&self) -> bool { - self.data - .connections - .provider() - .disconnected_nodes() - .is_empty() - } - - #[cfg(test)] - fn connect(&self) { - self.data.connections.connect() - } -} - -pub struct ServersSetChangeParams { - pub session_id: Option, - pub migration_id: Option, - pub new_nodes_set: BTreeSet, - pub old_set_signature: Signature, - pub new_set_signature: Signature, -} - -pub fn new_servers_set_change_session( - self_key_pair: Arc, - sessions: &ClusterSessions, - connections: Arc, - servers_set_change_creator_connector: Arc, - params: ServersSetChangeParams, -) -> Result, Error> { - let session_id = match params.session_id { - Some(session_id) if session_id == *SERVERS_SET_CHANGE_SESSION_ID => session_id, - Some(_) => return Err(Error::InvalidMessage), - None => *SERVERS_SET_CHANGE_SESSION_ID, - }; - - let cluster = create_cluster_view(self_key_pair.clone(), connections, true)?; - let creation_data = AdminSessionCreationData::ServersSetChange( - params.migration_id, - params.new_nodes_set.clone(), - ); - let session = sessions.admin_sessions.insert( - cluster, - *self_key_pair.public(), - session_id, - None, - true, - Some(creation_data), - )?; - let initialization_result = session - .as_servers_set_change() - .expect("servers set change session is created; qed") - .initialize( - params.new_nodes_set, - params.old_set_signature, - params.new_set_signature, - ); - - if initialization_result.is_ok() { - servers_set_change_creator_connector.set_key_servers_set_change_session(session.clone()); - } - - process_initialization_result(initialization_result, session, &sessions.admin_sessions) -} - -fn process_initialization_result( - result: Result<(), Error>, - session: Arc, - sessions: &ClusterSessionsContainer, -) -> Result, Error> -where - S: ClusterSession, - SC: ClusterSessionCreator, -{ - match result { - Ok(()) if session.is_finished() => { - sessions.remove(&session.id()); - Ok(session) - } - Ok(()) => Ok(session), - Err(error) => { - sessions.remove(&session.id()); - Err(error) - } - } -} - -#[cfg(test)] -pub mod tests { - use ethereum_types::{Address, H256}; - use ethkey::{sign, Generator, Public, Random, Signature}; - use key_server_cluster::{ - cluster::{new_test_cluster, Cluster, ClusterClient, ClusterConfiguration, ClusterCore}, - cluster_connections::{ - tests::{MessagesQueue, TestConnections}, - ConnectionManager, - }, - cluster_sessions::{ - AdminSession, ClusterSession, ClusterSessions, ClusterSessionsListener, - }, - decryption_session::SessionImpl as DecryptionSession, - encryption_session::SessionImpl as EncryptionSession, - generation_session::{ - SessionImpl as GenerationSession, SessionState as GenerationSessionState, - }, - key_version_negotiation_session::{ - IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, - SessionImpl as KeyVersionNegotiationSession, - }, - message::Message, - signing_session_ecdsa::SessionImpl as EcdsaSigningSession, - signing_session_schnorr::SessionImpl as SchnorrSigningSession, - DummyAclStorage, DummyKeyStorage, Error, MapKeyServerSet, NodeId, NodeKeyPair, - PlainNodeKeyPair, Requester, SessionId, - }; - use parking_lot::{Mutex, RwLock}; - use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, - }; - - #[derive(Default)] - pub struct DummyClusterClient { - pub generation_requests_count: AtomicUsize, - } - - #[derive(Debug)] - pub struct DummyCluster { - id: NodeId, - data: RwLock, - } - - #[derive(Debug, Default)] - struct DummyClusterData { - nodes: BTreeSet, - messages: VecDeque<(NodeId, Message)>, - } - - impl ClusterClient for DummyClusterClient { - fn new_generation_session( - &self, - _session_id: SessionId, - _origin: Option
, - _author: Address, - _threshold: usize, - ) -> Result, Error> { - self.generation_requests_count - .fetch_add(1, Ordering::Relaxed); - Err(Error::Internal("test-error".into())) - } - fn new_encryption_session( - &self, - _session_id: SessionId, - _requester: Requester, - _common_point: Public, - _encrypted_point: Public, - ) -> Result, Error> { - unimplemented!("test-only") - } - fn new_decryption_session( - &self, - _session_id: SessionId, - _origin: Option
, - _requester: Requester, - _version: Option, - _is_shadow_decryption: bool, - _is_broadcast_session: bool, - ) -> Result, Error> { - unimplemented!("test-only") - } - fn new_schnorr_signing_session( - &self, - _session_id: SessionId, - _requester: Requester, - _version: Option, - _message_hash: H256, - ) -> Result, Error> { - unimplemented!("test-only") - } - fn new_ecdsa_signing_session( - &self, - _session_id: SessionId, - _requester: Requester, - _version: Option, - _message_hash: H256, - ) -> Result, Error> { - unimplemented!("test-only") - } - - fn new_key_version_negotiation_session( - &self, - _session_id: SessionId, - ) -> Result>, Error> - { - unimplemented!("test-only") - } - fn new_servers_set_change_session( - &self, - _session_id: Option, - _migration_id: Option, - _new_nodes_set: BTreeSet, - _old_set_signature: Signature, - _new_set_signature: Signature, - ) -> Result, Error> { - unimplemented!("test-only") - } - - fn add_generation_listener( - &self, - _listener: Arc>, - ) { - } - fn add_decryption_listener( - &self, - _listener: Arc>, - ) { - } - fn add_key_version_negotiation_listener( - &self, - _listener: Arc< - dyn ClusterSessionsListener< - KeyVersionNegotiationSession, - >, - >, - ) { - } - - fn make_faulty_generation_sessions(&self) { - unimplemented!("test-only") - } - fn generation_session(&self, _session_id: &SessionId) -> Option> { - unimplemented!("test-only") - } - fn is_fully_connected(&self) -> bool { - true - } - fn connect(&self) {} - } - - impl DummyCluster { - pub fn new(id: NodeId) -> Self { - DummyCluster { - id: id, - data: RwLock::new(DummyClusterData::default()), - } - } - - pub fn node(&self) -> NodeId { - self.id.clone() - } - - pub fn add_node(&self, node: NodeId) { - self.data.write().nodes.insert(node); - } - - pub fn add_nodes>(&self, nodes: I) { - self.data.write().nodes.extend(nodes) - } - - pub fn remove_node(&self, node: &NodeId) { - self.data.write().nodes.remove(node); - } - - pub fn take_message(&self) -> Option<(NodeId, Message)> { - self.data.write().messages.pop_front() - } - } - - impl Cluster for DummyCluster { - fn broadcast(&self, message: Message) -> Result<(), Error> { - let mut data = self.data.write(); - let all_nodes: Vec<_> = data - .nodes - .iter() - .cloned() - .filter(|n| n != &self.id) - .collect(); - for node in all_nodes { - data.messages.push_back((node, message.clone())); - } - Ok(()) - } - - fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { - debug_assert!(&self.id != to); - self.data.write().messages.push_back((to.clone(), message)); - Ok(()) - } - - fn is_connected(&self, node: &NodeId) -> bool { - let data = self.data.read(); - &self.id == node || data.nodes.contains(node) - } - - fn nodes(&self) -> BTreeSet { - self.data.read().nodes.iter().cloned().collect() - } - - fn configured_nodes_count(&self) -> usize { - self.data.read().nodes.len() - } - - fn connected_nodes_count(&self) -> usize { - self.data.read().nodes.len() - } - } - - /// Test message loop. - pub struct MessageLoop { - messages: MessagesQueue, - preserve_sessions: bool, - key_pairs_map: BTreeMap>, - acl_storages_map: BTreeMap>, - key_storages_map: BTreeMap>, - clusters_map: BTreeMap>>>, - } - - impl ::std::fmt::Debug for MessageLoop { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(f, "MessageLoop({})", self.clusters_map.len()) - } - } - - impl MessageLoop { - /// Returns set of all nodes ids. - pub fn nodes(&self) -> BTreeSet { - self.clusters_map.keys().cloned().collect() - } - - /// Returns nodes id by its index. - pub fn node(&self, idx: usize) -> NodeId { - *self.clusters_map.keys().nth(idx).unwrap() - } - - /// Returns key pair of the node by its idx. - pub fn node_key_pair(&self, idx: usize) -> &Arc { - self.key_pairs_map.values().nth(idx).unwrap() - } - - /// Get cluster reference by its index. - pub fn cluster(&self, idx: usize) -> &Arc>> { - self.clusters_map.values().nth(idx).unwrap() - } - - /// Get keys storage reference by its index. - pub fn key_storage(&self, idx: usize) -> &Arc { - self.key_storages_map.values().nth(idx).unwrap() - } - - /// Get keys storage reference by node id. - pub fn key_storage_of(&self, node: &NodeId) -> &Arc { - &self.key_storages_map[node] - } - - /// Replace key storage of the node by its id. - pub fn replace_key_storage_of(&mut self, node: &NodeId, key_storage: Arc) { - *self.key_storages_map.get_mut(node).unwrap() = key_storage; - } - - /// Get ACL storage reference by its index. - pub fn acl_storage(&self, idx: usize) -> &Arc { - self.acl_storages_map.values().nth(idx).unwrap() - } - - /// Get sessions container reference by its index. - pub fn sessions(&self, idx: usize) -> &Arc { - &self.cluster(idx).data.sessions - } - - /// Get sessions container reference by node id. - pub fn sessions_of(&self, node: &NodeId) -> &Arc { - &self.clusters_map[node].data.sessions - } - - /// Isolate node from others. - pub fn isolate(&self, idx: usize) { - let node = self.node(idx); - for (i, cluster) in self.clusters_map.values().enumerate() { - if i == idx { - cluster.data.connections.isolate(); - } else { - cluster.data.connections.disconnect(node); - } - } - } - - /// Exclude node from cluster. - pub fn exclude(&mut self, idx: usize) { - let node = self.node(idx); - for (i, cluster) in self.clusters_map.values().enumerate() { - if i != idx { - cluster.data.connections.exclude(node); - } - } - self.key_storages_map.remove(&node); - self.acl_storages_map.remove(&node); - self.key_pairs_map.remove(&node); - self.clusters_map.remove(&node); - } - - /// Include new node to the cluster. - pub fn include(&mut self, node_key_pair: Arc) -> usize { - let key_storage = Arc::new(DummyKeyStorage::default()); - let acl_storage = Arc::new(DummyAclStorage::default()); - let cluster_params = ClusterConfiguration { - self_key_pair: node_key_pair.clone(), - key_server_set: Arc::new(MapKeyServerSet::new( - false, - self.nodes() - .iter() - .chain(::std::iter::once(node_key_pair.public())) - .map(|n| (*n, format!("127.0.0.1:{}", 13).parse().unwrap())) - .collect(), - )), - key_storage: key_storage.clone(), - acl_storage: acl_storage.clone(), - admin_public: None, - preserve_sessions: self.preserve_sessions, - }; - let cluster = new_test_cluster(self.messages.clone(), cluster_params).unwrap(); - - for cluster in self.clusters_map.values() { - cluster - .data - .connections - .include(node_key_pair.public().clone()); - } - self.acl_storages_map - .insert(*node_key_pair.public(), acl_storage); - self.key_storages_map - .insert(*node_key_pair.public(), key_storage); - self.clusters_map.insert(*node_key_pair.public(), cluster); - self.key_pairs_map - .insert(*node_key_pair.public(), node_key_pair.clone()); - self.clusters_map - .keys() - .position(|k| k == node_key_pair.public()) - .unwrap() - } - - /// Is empty message queue? - pub fn is_empty(&self) -> bool { - self.messages.lock().is_empty() - } - - /// Takes next message from the queue. - pub fn take_message(&self) -> Option<(NodeId, NodeId, Message)> { - self.messages.lock().pop_front() - } - - /// Process single message. - pub fn process_message(&self, from: NodeId, to: NodeId, message: Message) { - let cluster_data = &self.clusters_map[&to].data; - let connection = cluster_data - .connections - .provider() - .connection(&from) - .unwrap(); - cluster_data - .message_processor - .process_connection_message(connection, message); - } - - /// Take next message and process it. - pub fn take_and_process_message(&self) -> bool { - let (from, to, message) = match self.take_message() { - Some((from, to, message)) => (from, to, message), - None => return false, - }; - - self.process_message(from, to, message); - true - } - - /// Loops until `predicate` returns `true` or there are no messages in the queue. - pub fn loop_until(&self, predicate: F) - where - F: Fn() -> bool, - { - while !predicate() { - if !self.take_and_process_message() { - panic!("message queue is empty but goal is not achieved"); - } - } - } - } - - pub fn make_clusters(num_nodes: usize) -> MessageLoop { - do_make_clusters(num_nodes, false) - } - - pub fn make_clusters_and_preserve_sessions(num_nodes: usize) -> MessageLoop { - do_make_clusters(num_nodes, true) - } - - fn do_make_clusters(num_nodes: usize, preserve_sessions: bool) -> MessageLoop { - let ports_begin = 0; - let messages = Arc::new(Mutex::new(VecDeque::new())); - let key_pairs: Vec<_> = (0..num_nodes) - .map(|_| Arc::new(PlainNodeKeyPair::new(Random.generate().unwrap()))) - .collect(); - let key_storages: Vec<_> = (0..num_nodes) - .map(|_| Arc::new(DummyKeyStorage::default())) - .collect(); - let acl_storages: Vec<_> = (0..num_nodes) - .map(|_| Arc::new(DummyAclStorage::default())) - .collect(); - let cluster_params: Vec<_> = (0..num_nodes) - .map(|i| ClusterConfiguration { - self_key_pair: key_pairs[i].clone(), - key_server_set: Arc::new(MapKeyServerSet::new( - false, - key_pairs - .iter() - .enumerate() - .map(|(j, kp)| { - ( - *kp.public(), - format!("127.0.0.1:{}", ports_begin + j as u16) - .parse() - .unwrap(), - ) - }) - .collect(), - )), - key_storage: key_storages[i].clone(), - acl_storage: acl_storages[i].clone(), - admin_public: None, - preserve_sessions, - }) - .collect(); - let clusters: Vec<_> = cluster_params - .into_iter() - .map(|params| new_test_cluster(messages.clone(), params).unwrap()) - .collect(); - - let clusters_map = clusters - .iter() - .map(|c| (*c.data.config.self_key_pair.public(), c.clone())) - .collect(); - let key_pairs_map = key_pairs.into_iter().map(|kp| (*kp.public(), kp)).collect(); - let key_storages_map = clusters - .iter() - .zip(key_storages.into_iter()) - .map(|(c, ks)| (*c.data.config.self_key_pair.public(), ks)) - .collect(); - let acl_storages_map = clusters - .iter() - .zip(acl_storages.into_iter()) - .map(|(c, acls)| (*c.data.config.self_key_pair.public(), acls)) - .collect(); - MessageLoop { - preserve_sessions, - messages, - key_pairs_map, - acl_storages_map, - key_storages_map, - clusters_map, - } - } - - #[test] - fn cluster_wont_start_generation_session_if_not_fully_connected() { - let ml = make_clusters(3); - ml.cluster(0) - .data - .connections - .disconnect(*ml.cluster(0).data.self_key_pair.public()); - match ml.cluster(0).client().new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) { - Err(Error::NodeDisconnected) => (), - Err(e) => panic!("unexpected error {:?}", e), - _ => panic!("unexpected success"), - } - } - - #[test] - fn error_in_generation_session_broadcasted_to_all_other_nodes() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // ask one of nodes to produce faulty generation sessions - ml.cluster(1).client().make_faulty_generation_sessions(); - - // start && wait for generation session to fail - let session = ml - .cluster(0) - .client() - .new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) - .unwrap(); - ml.loop_until(|| { - session.joint_public_and_secret().is_some() - && ml - .cluster(0) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_err()); - - // check that faulty session is either removed from all nodes, or nonexistent (already removed) - for i in 1..3 { - if let Some(session) = ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - { - // wait for both session completion && session removal (session completion event is fired - // before session is removed from its own container by cluster) - ml.loop_until(|| { - session.joint_public_and_secret().is_some() - && ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_err()); - } - } - } - - #[test] - fn generation_session_completion_signalled_if_failed_on_master() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // ask one of nodes to produce faulty generation sessions - ml.cluster(0).client().make_faulty_generation_sessions(); - - // start && wait for generation session to fail - let session = ml - .cluster(0) - .client() - .new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) - .unwrap(); - ml.loop_until(|| { - session.joint_public_and_secret().is_some() - && ml - .cluster(0) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_err()); - - // check that faulty session is either removed from all nodes, or nonexistent (already removed) - for i in 1..3 { - if let Some(session) = ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - { - let session = session.clone(); - // wait for both session completion && session removal (session completion event is fired - // before session is removed from its own container by cluster) - ml.loop_until(|| { - session.joint_public_and_secret().is_some() - && ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_err()); - } - } - } - - #[test] - fn generation_session_is_removed_when_succeeded() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // start && wait for generation session to complete - let session = ml - .cluster(0) - .client() - .new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) - .unwrap(); - ml.loop_until(|| { - (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml - .cluster(0) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // check that on non-master nodes session is either: - // already removed - // or it is removed right after completion - for i in 1..3 { - if let Some(session) = ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - { - // run to completion if completion message is still on the way - // AND check that it is actually removed from cluster sessions - ml.loop_until(|| { - (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml - .cluster(i) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - } - } - } - - #[test] - fn sessions_are_removed_when_initialization_fails() { - let ml = make_clusters(3); - let client = ml.cluster(0).client(); - - // generation session - { - // try to start generation session => fail in initialization - assert_eq!( - client - .new_generation_session(SessionId::default(), None, Default::default(), 100) - .map(|_| ()), - Err(Error::NotEnoughNodesForThreshold) - ); - - // try to start generation session => fails in initialization - assert_eq!( - client - .new_generation_session(SessionId::default(), None, Default::default(), 100) - .map(|_| ()), - Err(Error::NotEnoughNodesForThreshold) - ); - - assert!(ml.cluster(0).data.sessions.generation_sessions.is_empty()); - } - - // decryption session - { - // try to start decryption session => fails in initialization - assert_eq!( - client - .new_decryption_session( - Default::default(), - Default::default(), - Default::default(), - Some(Default::default()), - false, - false - ) - .map(|_| ()), - Err(Error::InvalidMessage) - ); - - // try to start generation session => fails in initialization - assert_eq!( - client - .new_decryption_session( - Default::default(), - Default::default(), - Default::default(), - Some(Default::default()), - false, - false - ) - .map(|_| ()), - Err(Error::InvalidMessage) - ); - - assert!(ml.cluster(0).data.sessions.decryption_sessions.is_empty()); - assert!(ml.cluster(0).data.sessions.negotiation_sessions.is_empty()); - } - } - - #[test] - fn schnorr_signing_session_completes_if_node_does_not_have_a_share() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(3); - - // start && wait for generation session to complete - let session = ml - .cluster(0) - .client() - .new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) - .unwrap(); - ml.loop_until(|| { - (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml - .cluster(0) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // now remove share from node2 - assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); - ml.cluster(2) - .data - .config - .key_storage - .remove(&Default::default()) - .unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = ml - .cluster(0) - .client() - .new_schnorr_signing_session( - Default::default(), - signature.into(), - None, - Default::default(), - ) - .unwrap(); - let session = ml - .cluster(0) - .data - .sessions - .schnorr_signing_sessions - .first() - .unwrap(); - - ml.loop_until(|| { - session.is_finished() - && (0..3).all(|i| { - ml.cluster(i) - .data - .sessions - .schnorr_signing_sessions - .is_empty() - }) - }); - session0.wait().unwrap(); - - // and try to sign message with generated key using node that has no key share - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = ml - .cluster(2) - .client() - .new_schnorr_signing_session( - Default::default(), - signature.into(), - None, - Default::default(), - ) - .unwrap(); - let session = ml - .cluster(2) - .data - .sessions - .schnorr_signing_sessions - .first() - .unwrap(); - - ml.loop_until(|| { - session.is_finished() - && (0..3).all(|i| { - ml.cluster(i) - .data - .sessions - .schnorr_signing_sessions - .is_empty() - }) - }); - session2.wait().unwrap(); - - // now remove share from node1 - ml.cluster(1) - .data - .config - .key_storage - .remove(&Default::default()) - .unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = ml - .cluster(0) - .client() - .new_schnorr_signing_session( - Default::default(), - signature.into(), - None, - Default::default(), - ) - .unwrap(); - let session = ml - .cluster(0) - .data - .sessions - .schnorr_signing_sessions - .first() - .unwrap(); - - ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); - } - - #[test] - fn ecdsa_signing_session_completes_if_node_does_not_have_a_share() { - let _ = ::env_logger::try_init(); - let ml = make_clusters(4); - - // start && wait for generation session to complete - let session = ml - .cluster(0) - .client() - .new_generation_session( - SessionId::default(), - Default::default(), - Default::default(), - 1, - ) - .unwrap(); - ml.loop_until(|| { - (session.state() == GenerationSessionState::Finished - || session.state() == GenerationSessionState::Failed) - && ml - .cluster(0) - .client() - .generation_session(&SessionId::default()) - .is_none() - }); - assert!(session.joint_public_and_secret().unwrap().is_ok()); - - // now remove share from node2 - assert!((0..3).all(|i| ml.cluster(i).data.sessions.generation_sessions.is_empty())); - ml.cluster(2) - .data - .config - .key_storage - .remove(&Default::default()) - .unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session0 = ml - .cluster(0) - .client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) - .unwrap(); - let session = ml - .cluster(0) - .data - .sessions - .ecdsa_signing_sessions - .first() - .unwrap(); - - ml.loop_until(|| { - session.is_finished() - && (0..3).all(|i| { - ml.cluster(i) - .data - .sessions - .ecdsa_signing_sessions - .is_empty() - }) - }); - session0.wait().unwrap(); - - // and try to sign message with generated key using node that has no key share - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session2 = ml - .cluster(2) - .client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) - .unwrap(); - let session = ml - .cluster(2) - .data - .sessions - .ecdsa_signing_sessions - .first() - .unwrap(); - ml.loop_until(|| { - session.is_finished() - && (0..3).all(|i| { - ml.cluster(i) - .data - .sessions - .ecdsa_signing_sessions - .is_empty() - }) - }); - session2.wait().unwrap(); - - // now remove share from node1 - ml.cluster(1) - .data - .config - .key_storage - .remove(&Default::default()) - .unwrap(); - - // and try to sign message with generated key - let signature = sign(Random.generate().unwrap().secret(), &Default::default()).unwrap(); - let session1 = ml - .cluster(0) - .client() - .new_ecdsa_signing_session(Default::default(), signature.into(), None, H256::random()) - .unwrap(); - let session = ml - .cluster(0) - .data - .sessions - .ecdsa_signing_sessions - .first() - .unwrap(); - ml.loop_until(|| session.is_finished()); - session1.wait().unwrap_err(); - } -} diff --git a/secret-store/src/key_server_cluster/cluster_connections.rs b/secret-store/src/key_server_cluster/cluster_connections.rs deleted file mode 100644 index 97b7b85fb..000000000 --- a/secret-store/src/key_server_cluster/cluster_connections.rs +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use key_server_cluster::{message::Message, Error, NodeId}; -use std::{collections::BTreeSet, sync::Arc}; - -/// Connection to the single node. Provides basic information about connected node and -/// allows sending messages to this node. -pub trait Connection: Send + Sync { - /// Is this inbound connection? This only matters when both nodes are simultaneously establishing - /// two connections to each other. The agreement is that the inbound connection from the node with - /// lower NodeId is used and the other connection is closed. - fn is_inbound(&self) -> bool; - /// Returns id of the connected node. - fn node_id(&self) -> &NodeId; - /// Returns 'address' of the node to use in traces. - fn node_address(&self) -> String; - /// Send message to the connected node. - fn send_message(&self, message: Message); -} - -/// Connections manager. Responsible for keeping us connected to all required nodes. -pub trait ConnectionManager: 'static + Send + Sync { - /// Returns shared reference to connections provider. - fn provider(&self) -> Arc; - /// Try to reach all disconnected nodes immediately. This method is exposed mostly for - /// tests, where all 'nodes' are starting listening for incoming connections first and - /// only after this, they're actually start connecting to each other. - fn connect(&self); -} - -/// Connections provider. Holds all active connections and the set of nodes that we need to -/// connect to. At any moment connection could be lost and the set of connected/disconnected -/// nodes could change (at behalf of the connection manager). -/// Clone operation should be cheap (Arc). -pub trait ConnectionProvider: Send + Sync { - /// Returns the set of currently connected nodes. Error is returned when our node is - /// not a part of the cluster ('isolated' node). - fn connected_nodes(&self) -> Result, Error>; - /// Returns the set of currently disconnected nodes. - fn disconnected_nodes(&self) -> BTreeSet; - /// Returns the reference to the active node connection or None if the node is not connected. - fn connection(&self, node: &NodeId) -> Option>; -} - -#[cfg(test)] -pub mod tests { - use super::{Connection, ConnectionManager, ConnectionProvider}; - use key_server_cluster::{message::Message, Error, NodeId}; - use parking_lot::Mutex; - use std::{ - collections::{BTreeSet, VecDeque}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - }; - - /// Shared messages queue. - pub type MessagesQueue = Arc>>; - - /// Single node connections. - pub struct TestConnections { - node: NodeId, - is_isolated: AtomicBool, - connected_nodes: Mutex>, - disconnected_nodes: Mutex>, - messages: MessagesQueue, - } - - /// Single connection. - pub struct TestConnection { - from: NodeId, - to: NodeId, - messages: MessagesQueue, - } - - impl TestConnections { - pub fn isolate(&self) { - let connected_nodes = - ::std::mem::replace(&mut *self.connected_nodes.lock(), Default::default()); - self.is_isolated.store(true, Ordering::Relaxed); - self.disconnected_nodes.lock().extend(connected_nodes) - } - - pub fn disconnect(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().insert(node); - } - - pub fn exclude(&self, node: NodeId) { - self.connected_nodes.lock().remove(&node); - self.disconnected_nodes.lock().remove(&node); - } - - pub fn include(&self, node: NodeId) { - self.connected_nodes.lock().insert(node); - } - } - - impl ConnectionManager for Arc { - fn provider(&self) -> Arc { - self.clone() - } - - fn connect(&self) {} - } - - impl ConnectionProvider for TestConnections { - fn connected_nodes(&self) -> Result, Error> { - match self.is_isolated.load(Ordering::Relaxed) { - false => Ok(self.connected_nodes.lock().clone()), - true => Err(Error::NodeDisconnected), - } - } - - fn disconnected_nodes(&self) -> BTreeSet { - self.disconnected_nodes.lock().clone() - } - - fn connection(&self, node: &NodeId) -> Option> { - match self.connected_nodes.lock().contains(node) { - true => Some(Arc::new(TestConnection { - from: self.node, - to: *node, - messages: self.messages.clone(), - })), - false => None, - } - } - } - - impl Connection for TestConnection { - fn is_inbound(&self) -> bool { - false - } - - fn node_id(&self) -> &NodeId { - &self.to - } - - fn node_address(&self) -> String { - format!("{}", self.to) - } - - fn send_message(&self, message: Message) { - self.messages - .lock() - .push_back((self.from, self.to, message)) - } - } - - pub fn new_test_connections( - messages: MessagesQueue, - node: NodeId, - mut nodes: BTreeSet, - ) -> Arc { - let is_isolated = !nodes.remove(&node); - Arc::new(TestConnections { - node, - is_isolated: AtomicBool::new(is_isolated), - connected_nodes: Mutex::new(nodes), - disconnected_nodes: Default::default(), - messages, - }) - } -} diff --git a/secret-store/src/key_server_cluster/cluster_connections_net.rs b/secret-store/src/key_server_cluster/cluster_connections_net.rs deleted file mode 100644 index 7e89a91ec..000000000 --- a/secret-store/src/key_server_cluster/cluster_connections_net.rs +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethkey::KeyPair; -use futures::{future, Future, Stream}; -use key_server_cluster::{ - cluster_connections::{Connection, ConnectionManager, ConnectionProvider}, - cluster_message_processor::MessageProcessor, - connection_trigger::{ConnectionTrigger, Maintain}, - io::{ - read_encrypted_message, write_encrypted_message, DeadlineStatus, ReadMessage, - SharedTcpStream, WriteMessage, - }, - message::{self, ClusterMessage, Message}, - net::{ - accept_connection as io_accept_connection, connect as io_connect, - Connection as IoConnection, - }, - ClusterConfiguration, Error, NodeId, NodeKeyPair, -}; -use parity_runtime::Executor; -use parking_lot::{Mutex, RwLock}; -use std::{ - collections::{btree_map::Entry, BTreeMap, BTreeSet}, - io, - net::{IpAddr, SocketAddr}, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::{ - net::{TcpListener, TcpStream}, - timer::{timeout::Error as TimeoutError, Interval}, -}; -use tokio_io::IoFuture; - -/// Empty future. -pub type BoxedEmptyFuture = Box + Send>; - -/// Maintain interval (seconds). Every MAINTAIN_INTERVAL seconds node: -/// 1) checks if connected nodes are responding to KeepAlive messages -/// 2) tries to connect to disconnected nodes -/// 3) checks if enc/dec sessions are time-outed -const MAINTAIN_INTERVAL: u64 = 10; - -/// When no messages have been received from node within KEEP_ALIVE_SEND_INTERVAL seconds, -/// we must send KeepAlive message to the node to check if it still responds to messages. -const KEEP_ALIVE_SEND_INTERVAL: Duration = Duration::from_secs(30); -/// When no messages have been received from node within KEEP_ALIVE_DISCONNECT_INTERVAL seconds, -/// we must treat this node as non-responding && disconnect from it. -const KEEP_ALIVE_DISCONNECT_INTERVAL: Duration = Duration::from_secs(60); - -/// Network connection manager configuration. -pub struct NetConnectionsManagerConfig { - /// Allow connecting to 'higher' nodes. - pub allow_connecting_to_higher_nodes: bool, - /// Interface to listen to. - pub listen_address: (String, u16), - /// True if we should autostart key servers set change session when servers set changes? - /// This will only work when servers set is configured using KeyServerSet contract. - pub auto_migrate_enabled: bool, -} - -/// Network connections manager. -pub struct NetConnectionsManager { - /// Address we're listening for incoming connections. - listen_address: SocketAddr, - /// Shared cluster connections data reference. - data: Arc, -} - -/// Network connections data. Shared among NetConnectionsManager and spawned futures. -struct NetConnectionsData { - /// Allow connecting to 'higher' nodes. - allow_connecting_to_higher_nodes: bool, - /// Reference to tokio task executor. - executor: Executor, - /// Key pair of this node. - self_key_pair: Arc, - /// Network messages processor. - message_processor: Arc, - /// Connections trigger. - trigger: Mutex>, - /// Mutable connection data. - container: Arc>, -} - -/// Network connections container. This is the only mutable data of NetConnectionsManager. -/// The set of nodes is mutated by the connection trigger and the connections set is also -/// mutated by spawned futures. -pub struct NetConnectionsContainer { - /// Is this node isolated from cluster? - pub is_isolated: bool, - /// Current key servers set. - pub nodes: BTreeMap, - /// Active connections to key servers. - pub connections: BTreeMap>, -} - -/// Network connection to single key server node. -pub struct NetConnection { - executor: Executor, - /// Id of the peer node. - node_id: NodeId, - /// Address of the peer node. - node_address: SocketAddr, - /// Is this inbound (true) or outbound (false) connection? - is_inbound: bool, - /// Key pair that is used to encrypt connection' messages. - key: KeyPair, - /// Last message time. - last_message_time: RwLock, - /// Underlying TCP stream. - stream: SharedTcpStream, -} - -impl NetConnectionsManager { - /// Create new network connections manager. - pub fn new( - executor: Executor, - message_processor: Arc, - trigger: Box, - container: Arc>, - config: &ClusterConfiguration, - net_config: NetConnectionsManagerConfig, - ) -> Result { - let listen_address = - make_socket_address(&net_config.listen_address.0, net_config.listen_address.1)?; - - Ok(NetConnectionsManager { - listen_address, - data: Arc::new(NetConnectionsData { - allow_connecting_to_higher_nodes: net_config.allow_connecting_to_higher_nodes, - executor, - message_processor, - self_key_pair: config.self_key_pair.clone(), - trigger: Mutex::new(trigger), - container, - }), - }) - } - - /// Start listening for connections and schedule connections maintenance. - pub fn start(&self) -> Result<(), Error> { - net_listen(&self.listen_address, self.data.clone())?; - net_schedule_maintain(self.data.clone()); - Ok(()) - } -} - -impl ConnectionManager for NetConnectionsManager { - fn provider(&self) -> Arc { - self.data.container.clone() - } - - fn connect(&self) { - net_connect_disconnected(self.data.clone()); - } -} - -impl ConnectionProvider for RwLock { - fn connected_nodes(&self) -> Result, Error> { - let connections = self.read(); - if connections.is_isolated { - return Err(Error::NodeDisconnected); - } - - Ok(connections.connections.keys().cloned().collect()) - } - - fn disconnected_nodes(&self) -> BTreeSet { - let connections = self.read(); - connections - .nodes - .keys() - .filter(|node_id| !connections.connections.contains_key(node_id)) - .cloned() - .collect() - } - - fn connection(&self, node: &NodeId) -> Option> { - match self.read().connections.get(node).cloned() { - Some(connection) => Some(connection), - None => None, - } - } -} - -impl NetConnection { - /// Create new connection. - pub fn new(executor: Executor, is_inbound: bool, connection: IoConnection) -> NetConnection { - NetConnection { - executor, - node_id: connection.node_id, - node_address: connection.address, - is_inbound: is_inbound, - stream: connection.stream, - key: connection.key, - last_message_time: RwLock::new(Instant::now()), - } - } - - /// Get last message time. - pub fn last_message_time(&self) -> Instant { - *self.last_message_time.read() - } - - /// Update last message time - pub fn set_last_message_time(&self, last_message_time: Instant) { - *self.last_message_time.write() = last_message_time - } - - /// Returns future that sends encrypted message over this connection. - pub fn send_message_future(&self, message: Message) -> WriteMessage { - write_encrypted_message(self.stream.clone(), &self.key, message) - } - - /// Returns future that reads encrypted message from this connection. - pub fn read_message_future(&self) -> ReadMessage { - read_encrypted_message(self.stream.clone(), self.key.clone()) - } -} - -impl Connection for NetConnection { - fn is_inbound(&self) -> bool { - self.is_inbound - } - - fn node_id(&self) -> &NodeId { - &self.node_id - } - - fn node_address(&self) -> String { - format!("{}", self.node_address) - } - - fn send_message(&self, message: Message) { - execute( - &self.executor, - self.send_message_future(message).then(|_| Ok(())), - ); - } -} - -impl NetConnectionsData { - /// Executes closure for each active connection. - pub fn active_connections(&self) -> Vec> { - self.container - .read() - .connections - .values() - .cloned() - .collect() - } - - /// Executes closure for each disconnected node. - pub fn disconnected_nodes(&self) -> Vec<(NodeId, SocketAddr)> { - let container = self.container.read(); - container - .nodes - .iter() - .filter(|(node_id, _)| !container.connections.contains_key(node_id)) - .map(|(node_id, addr)| (*node_id, *addr)) - .collect() - } - - /// Try to insert new connection. Returns true if connection has been inserted. - /// Returns false (and ignores connections) if: - /// - we do not expect connection from this node - /// - we are already connected to the node and existing connection 'supersede' - /// new connection by agreement - pub fn insert(&self, connection: Arc) -> bool { - let node = *connection.node_id(); - let mut container = self.container.write(); - if !container.nodes.contains_key(&node) { - trace!(target: "secretstore_net", "{}: ignoring unknown connection from {} at {}", - self.self_key_pair.public(), node, connection.node_address()); - return false; - } - - if container.connections.contains_key(&node) { - // we have already connected to the same node - // the agreement is that node with lower id must establish connection to node with higher id - if (*self.self_key_pair.public() < node && connection.is_inbound()) - || (*self.self_key_pair.public() > node && !connection.is_inbound()) - { - return false; - } - } - - trace!(target: "secretstore_net", - "{}: inserting connection to {} at {}. Connected to {} of {} nodes", - self.self_key_pair.public(), node, connection.node_address(), - container.connections.len() + 1, container.nodes.len()); - container.connections.insert(node, connection); - - true - } - - /// Tries to remove connection. Returns true if connection has been removed. - /// Returns false if we do not know this connection. - pub fn remove(&self, connection: &NetConnection) -> bool { - let node_id = *connection.node_id(); - let is_inbound = connection.is_inbound(); - let mut container = self.container.write(); - if let Entry::Occupied(entry) = container.connections.entry(node_id) { - if entry.get().is_inbound() != is_inbound { - return false; - } - - trace!(target: "secretstore_net", "{}: removing connection to {} at {}", - self.self_key_pair.public(), node_id, entry.get().node_address()); - entry.remove_entry(); - - true - } else { - false - } - } -} - -/// Listen incoming connections. -fn net_listen(listen_address: &SocketAddr, data: Arc) -> Result<(), Error> { - execute( - &data.executor, - net_listen_future(listen_address, data.clone())?, - ); - Ok(()) -} - -/// Listen incoming connections future. -fn net_listen_future( - listen_address: &SocketAddr, - data: Arc, -) -> Result { - Ok(Box::new( - TcpListener::bind(listen_address)? - .incoming() - .and_then(move |stream| { - net_accept_connection(data.clone(), stream); - Ok(()) - }) - .for_each(|_| Ok(())) - .then(|_| future::ok(())), - )) -} - -/// Accept incoming connection. -fn net_accept_connection(data: Arc, stream: TcpStream) { - execute( - &data.executor, - net_accept_connection_future(data.clone(), stream), - ); -} - -/// Accept incoming connection future. -fn net_accept_connection_future( - data: Arc, - stream: TcpStream, -) -> BoxedEmptyFuture { - Box::new( - io_accept_connection(stream, data.self_key_pair.clone()) - .then(move |result| net_process_connection_result(data, None, result)) - .then(|_| future::ok(())), - ) -} - -/// Connect to remote node. -fn net_connect(data: Arc, remote: SocketAddr) { - execute(&data.executor, net_connect_future(data.clone(), remote)); -} - -/// Connect to remote node future. -fn net_connect_future(data: Arc, remote: SocketAddr) -> BoxedEmptyFuture { - let disconnected_nodes = data.container.disconnected_nodes(); - Box::new( - io_connect(&remote, data.self_key_pair.clone(), disconnected_nodes) - .then(move |result| net_process_connection_result(data, Some(remote), result)) - .then(|_| future::ok(())), - ) -} - -/// Process network connection result. -fn net_process_connection_result( - data: Arc, - outbound_addr: Option, - result: Result>, TimeoutError>, -) -> IoFuture> { - match result { - Ok(DeadlineStatus::Meet(Ok(connection))) => { - let connection = Arc::new(NetConnection::new( - data.executor.clone(), - outbound_addr.is_none(), - connection, - )); - if data.insert(connection.clone()) { - let maintain_action = data - .trigger - .lock() - .on_connection_established(connection.node_id()); - maintain_connection_trigger(data.clone(), maintain_action); - - return net_process_connection_messages(data, connection); - } - } - Ok(DeadlineStatus::Meet(Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - } - Ok(DeadlineStatus::Timeout) => { - warn!(target: "secretstore_net", "{}: timeout when establishing {} connection{}", - data.self_key_pair.public(), if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - } - Err(err) => { - warn!(target: "secretstore_net", "{}: network error '{}' when establishing {} connection{}", - data.self_key_pair.public(), err, if outbound_addr.is_some() { "outbound" } else { "inbound" }, - outbound_addr.map(|a| format!(" with {}", a)).unwrap_or_default()); - } - } - - Box::new(future::ok(Ok(()))) -} - -/// Process connection messages. -fn net_process_connection_messages( - data: Arc, - connection: Arc, -) -> IoFuture> { - Box::new(connection - .read_message_future() - .then(move |result| - match result { - Ok((_, Ok(message))) => { - connection.set_last_message_time(Instant::now()); - data.message_processor.process_connection_message(connection.clone(), message); - // continue serving connection - let process_messages_future = net_process_connection_messages( - data.clone(), connection).then(|_| Ok(())); - execute(&data.executor, process_messages_future); - Box::new(future::ok(Ok(()))) - }, - Ok((_, Err(err))) => { - warn!(target: "secretstore_net", "{}: protocol error '{}' when reading message from node {}", - data.self_key_pair.public(), err, connection.node_id()); - // continue serving connection - let process_messages_future = net_process_connection_messages( - data.clone(), connection).then(|_| Ok(())); - execute(&data.executor, process_messages_future); - Box::new(future::ok(Err(err))) - }, - Err(err) => { - let node_id = *connection.node_id(); - warn!(target: "secretstore_net", "{}: network error '{}' when reading message from node {}", - data.self_key_pair.public(), err, node_id); - // close connection - if data.remove(&*connection) { - let maintain_action = data.trigger.lock().on_connection_closed(&node_id); - maintain_connection_trigger(data, maintain_action); - } - Box::new(future::err(err)) - }, - } - )) -} - -/// Schedule connections. maintain. -fn net_schedule_maintain(data: Arc) { - let closure_data = data.clone(); - execute( - &data.executor, - Interval::new_interval(Duration::new(MAINTAIN_INTERVAL, 0)) - .and_then(move |_| Ok(net_maintain(closure_data.clone()))) - .for_each(|_| Ok(())) - .then(|_| future::ok(())), - ); -} - -/// Maintain network connections. -fn net_maintain(data: Arc) { - trace!(target: "secretstore_net", "{}: executing maintain procedures", data.self_key_pair.public()); - - update_nodes_set(data.clone()); - data.message_processor.maintain_sessions(); - net_keep_alive(data.clone()); - net_connect_disconnected(data); -} - -/// Send keep alive messages to remote nodes. -fn net_keep_alive(data: Arc) { - let active_connections = data.active_connections(); - for connection in active_connections { - // the last_message_time could change after active_connections() call - // => we always need to call Instant::now() after getting last_message_time - let last_message_time = connection.last_message_time(); - let now = Instant::now(); - let last_message_diff = now - last_message_time; - if last_message_diff > KEEP_ALIVE_DISCONNECT_INTERVAL { - warn!(target: "secretstore_net", "{}: keep alive timeout for node {}", - data.self_key_pair.public(), connection.node_id()); - - let node_id = *connection.node_id(); - if data.remove(&*connection) { - let maintain_action = data.trigger.lock().on_connection_closed(&node_id); - maintain_connection_trigger(data.clone(), maintain_action); - } - data.message_processor.process_disconnect(&node_id); - } else if last_message_diff > KEEP_ALIVE_SEND_INTERVAL { - connection.send_message(Message::Cluster(ClusterMessage::KeepAlive( - message::KeepAlive {}, - ))); - } - } -} - -/// Connect disconnected nodes. -fn net_connect_disconnected(data: Arc) { - let disconnected_nodes = data.disconnected_nodes(); - for (node_id, address) in disconnected_nodes { - if data.allow_connecting_to_higher_nodes || *data.self_key_pair.public() < node_id { - net_connect(data.clone(), address); - } - } -} - -/// Schedule future execution. -fn execute + Send + 'static>(executor: &Executor, f: F) { - if let Err(err) = future::Executor::execute(executor, Box::new(f)) { - error!( - "Secret store runtime unable to spawn task. Runtime is shutting down. ({:?})", - err - ); - } -} - -/// Try to update active nodes set from connection trigger. -fn update_nodes_set(data: Arc) { - let maintain_action = data.trigger.lock().on_maintain(); - maintain_connection_trigger(data, maintain_action); -} - -/// Execute maintain procedures of connections trigger. -fn maintain_connection_trigger(data: Arc, maintain_action: Option) { - if maintain_action == Some(Maintain::SessionAndConnections) - || maintain_action == Some(Maintain::Session) - { - let session_params = data.trigger.lock().maintain_session(); - if let Some(session_params) = session_params { - let session = data - .message_processor - .start_servers_set_change_session(session_params); - match session { - Ok(_) => trace!(target: "secretstore_net", "{}: started auto-migrate session", - data.self_key_pair.public()), - Err(err) => { - trace!(target: "secretstore_net", "{}: failed to start auto-migrate session with: {}", - data.self_key_pair.public(), err) - } - } - } - } - if maintain_action == Some(Maintain::SessionAndConnections) - || maintain_action == Some(Maintain::Connections) - { - let mut trigger = data.trigger.lock(); - let mut data = data.container.write(); - trigger.maintain_connections(&mut *data); - } -} - -/// Compose SocketAddr from configuration' address and port. -fn make_socket_address(address: &str, port: u16) -> Result { - let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; - Ok(SocketAddr::new(ip_address, port)) -} diff --git a/secret-store/src/key_server_cluster/cluster_message_processor.rs b/secret-store/src/key_server_cluster/cluster_message_processor.rs deleted file mode 100644 index 6536a6e23..000000000 --- a/secret-store/src/key_server_cluster/cluster_message_processor.rs +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use key_server_cluster::{ - cluster::{new_servers_set_change_session, ServersSetChangeParams}, - cluster_connections::{Connection, ConnectionProvider}, - cluster_sessions::{ - create_cluster_view, AdminSession, ClusterSession, ClusterSessions, - ClusterSessionsContainer, - }, - cluster_sessions_creator::{ClusterSessionCreator, IntoSessionId}, - connection_trigger::ServersSetChangeSessionCreatorConnector, - key_version_negotiation_session::{ - ContinueAction, IsolatedSessionTransport as KeyVersionNegotiationSessionTransport, - SessionImpl as KeyVersionNegotiationSession, - }, - message::{self, ClusterMessage, Message}, - Error, NodeId, NodeKeyPair, -}; -use std::sync::Arc; - -/// Something that is able to process signals/messages from other nodes. -pub trait MessageProcessor: Send + Sync { - /// Process disconnect from the remote node. - fn process_disconnect(&self, node: &NodeId); - /// Process single message from the connection. - fn process_connection_message(&self, connection: Arc, message: Message); - - /// Start servers set change session. This is typically used by ConnectionManager when - /// it detects that auto-migration session needs to be started. - fn start_servers_set_change_session( - &self, - params: ServersSetChangeParams, - ) -> Result, Error>; - /// Try to continue session after key version negotiation session is completed. - fn try_continue_session( - &self, - session: Option>>, - ); - /// Maintain active sessions. Typically called by the ConnectionManager at some intervals. - /// Should cancel stalled sessions and send keep-alive messages for sessions that support it. - fn maintain_sessions(&self); -} - -/// Bridge between ConnectionManager and ClusterSessions. -pub struct SessionsMessageProcessor { - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, -} - -impl SessionsMessageProcessor { - /// Create new instance of SessionsMessageProcessor. - pub fn new( - self_key_pair: Arc, - servers_set_change_creator_connector: Arc, - sessions: Arc, - connections: Arc, - ) -> Self { - SessionsMessageProcessor { - self_key_pair, - servers_set_change_creator_connector, - sessions, - connections, - } - } - - /// Process single session message from connection. - fn process_message, D>( - &self, - sessions: &ClusterSessionsContainer, - connection: Arc, - mut message: Message, - ) -> Option> - where - Message: IntoSessionId, - { - // get or create new session, if required - let mut sender = *connection.node_id(); - let session = self.prepare_session(sessions, &sender, &message); - // send error if session is not found, or failed to create - let session = match session { - Ok(session) => session, - Err(error) => { - // this is new session => it is not yet in container - warn!(target: "secretstore_net", - "{}: {} session read error '{}' when requested for session from node {}", - self.self_key_pair.public(), S::type_name(), error, sender); - if !message.is_error_message() { - let qed = "session_id only fails for cluster messages; - only session messages are passed to process_message; - qed"; - let session_id = message.into_session_id().expect(qed); - let session_nonce = message.session_nonce().expect(qed); - - connection.send_message(SC::make_error_message( - session_id, - session_nonce, - error, - )); - } - return None; - } - }; - - let session_id = session.id(); - let mut is_queued_message = false; - loop { - let message_result = session.on_message(&sender, &message); - match message_result { - Ok(_) => { - // if session is completed => stop - if session.is_finished() { - info!(target: "secretstore_net", - "{}: {} session completed", self.self_key_pair.public(), S::type_name()); - sessions.remove(&session_id); - return Some(session); - } - - // try to dequeue message - match sessions.dequeue_message(&session_id) { - Some((msg_sender, msg)) => { - is_queued_message = true; - sender = msg_sender; - message = msg; - } - None => return Some(session), - } - } - Err(Error::TooEarlyForRequest) => { - sessions.enqueue_message(&session_id, sender, message, is_queued_message); - return Some(session); - } - Err(err) => { - warn!( - target: "secretstore_net", - "{}: {} session error '{}' when processing message {} from node {}", - self.self_key_pair.public(), - S::type_name(), - err, - message, - sender); - session.on_session_error(self.self_key_pair.public(), err); - sessions.remove(&session_id); - return Some(session); - } - } - } - } - - /// Get or insert new session. - fn prepare_session, D>( - &self, - sessions: &ClusterSessionsContainer, - sender: &NodeId, - message: &Message, - ) -> Result, Error> - where - Message: IntoSessionId, - { - fn requires_all_connections(message: &Message) -> bool { - match *message { - Message::Generation(_) => true, - Message::ShareAdd(_) => true, - Message::ServersSetChange(_) => true, - _ => false, - } - } - - // get or create new session, if required - let session_id = message.into_session_id().expect( - "into_session_id fails for cluster messages only; - only session messages are passed to prepare_session; - qed", - ); - let is_initialization_message = message.is_initialization_message(); - let is_delegation_message = message.is_delegation_message(); - match is_initialization_message || is_delegation_message { - false => sessions - .get(&session_id, true) - .ok_or(Error::NoActiveSessionWithId), - true => { - let creation_data = SC::creation_data_from_message(&message)?; - let master = if is_initialization_message { - *sender - } else { - *self.self_key_pair.public() - }; - let cluster = create_cluster_view( - self.self_key_pair.clone(), - self.connections.clone(), - requires_all_connections(&message), - )?; - - let nonce = Some(message.session_nonce().ok_or(Error::InvalidMessage)?); - let exclusive = message.is_exclusive_session_message(); - sessions.insert(cluster, master, session_id, nonce, exclusive, creation_data) - } - } - } - - /// Process single cluster message from the connection. - fn process_cluster_message(&self, connection: Arc, message: ClusterMessage) { - match message { - ClusterMessage::KeepAlive(_) => { - let msg = Message::Cluster(ClusterMessage::KeepAliveResponse( - message::KeepAliveResponse { session_id: None }, - )); - connection.send_message(msg) - } - ClusterMessage::KeepAliveResponse(msg) => { - if let Some(session_id) = msg.session_id { - self.sessions - .on_session_keep_alive(connection.node_id(), session_id.into()); - } - } - _ => { - warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", - self.self_key_pair.public(), message, connection.node_id(), connection.node_address()) - } - } - } -} - -impl MessageProcessor for SessionsMessageProcessor { - fn process_disconnect(&self, node: &NodeId) { - self.sessions.on_connection_timeout(node); - } - - fn process_connection_message(&self, connection: Arc, message: Message) { - trace!(target: "secretstore_net", "{}: received message {} from {}", - self.self_key_pair.public(), message, connection.node_id()); - - // error is ignored as we only process errors on session level - match message { - Message::Generation(message) => self - .process_message( - &self.sessions.generation_sessions, - connection, - Message::Generation(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::Encryption(message) => self - .process_message( - &self.sessions.encryption_sessions, - connection, - Message::Encryption(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::Decryption(message) => self - .process_message( - &self.sessions.decryption_sessions, - connection, - Message::Decryption(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::SchnorrSigning(message) => self - .process_message( - &self.sessions.schnorr_signing_sessions, - connection, - Message::SchnorrSigning(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::EcdsaSigning(message) => self - .process_message( - &self.sessions.ecdsa_signing_sessions, - connection, - Message::EcdsaSigning(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::ServersSetChange(message) => { - let message = Message::ServersSetChange(message); - let is_initialization_message = message.is_initialization_message(); - let session = - self.process_message(&self.sessions.admin_sessions, connection, message); - if is_initialization_message { - if let Some(session) = session { - self.servers_set_change_creator_connector - .set_key_servers_set_change_session(session.clone()); - } - } - } - Message::KeyVersionNegotiation(message) => { - let session = self.process_message( - &self.sessions.negotiation_sessions, - connection, - Message::KeyVersionNegotiation(message), - ); - self.try_continue_session(session); - } - Message::ShareAdd(message) => self - .process_message( - &self.sessions.admin_sessions, - connection, - Message::ShareAdd(message), - ) - .map(|_| ()) - .unwrap_or_default(), - Message::Cluster(message) => self.process_cluster_message(connection, message), - } - } - - fn try_continue_session( - &self, - session: Option>>, - ) { - if let Some(session) = session { - let meta = session.meta(); - let is_master_node = meta.self_node_id == meta.master_node_id; - if is_master_node && session.is_finished() { - self.sessions.negotiation_sessions.remove(&session.id()); - match session.wait() { - Ok(Some((version, master))) => match session.take_continue_action() { - Some(ContinueAction::Decrypt( - session, - origin, - is_shadow_decryption, - is_broadcast_decryption, - )) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize( - origin, - version, - is_shadow_decryption, - is_broadcast_decryption, - ) - } else { - session.delegate( - master, - origin, - version, - is_shadow_decryption, - is_broadcast_decryption, - ) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - } - } - Some(ContinueAction::SchnorrSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - } - } - Some(ContinueAction::EcdsaSign(session, message_hash)) => { - let initialization_error = if self.self_key_pair.public() == &master { - session.initialize(version, message_hash) - } else { - session.delegate(master, version, message_hash) - }; - - if let Err(error) = initialization_error { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - } - } - None => (), - }, - Ok(None) => unreachable!( - "is_master_node; session is finished; - negotiation version always finished with result on master; - qed" - ), - Err(error) => match session.take_continue_action() { - Some(ContinueAction::Decrypt(session, _, _, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.decryption_sessions.remove(&session.id()); - } - Some(ContinueAction::SchnorrSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.schnorr_signing_sessions.remove(&session.id()); - } - Some(ContinueAction::EcdsaSign(session, _)) => { - session.on_session_error(&meta.self_node_id, error); - self.sessions.ecdsa_signing_sessions.remove(&session.id()); - } - None => (), - }, - } - } - } - } - - fn maintain_sessions(&self) { - self.sessions.stop_stalled_sessions(); - self.sessions.sessions_keep_alive(); - } - - fn start_servers_set_change_session( - &self, - params: ServersSetChangeParams, - ) -> Result, Error> { - new_servers_set_change_session( - self.self_key_pair.clone(), - &*self.sessions, - self.connections.clone(), - self.servers_set_change_creator_connector.clone(), - params, - ) - } -} diff --git a/secret-store/src/key_server_cluster/cluster_sessions.rs b/secret-store/src/key_server_cluster/cluster_sessions.rs deleted file mode 100644 index a9a97b805..000000000 --- a/secret-store/src/key_server_cluster/cluster_sessions.rs +++ /dev/null @@ -1,937 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use ethereum_types::H256; -use ethkey::Secret; -use key_server_cluster::{ - cluster::{Cluster, ClusterConfiguration, ClusterView}, - cluster_connections::ConnectionProvider, - connection_trigger::ServersSetChangeSessionCreatorConnector, - decryption_session::SessionImpl as DecryptionSessionImpl, - encryption_session::SessionImpl as EncryptionSessionImpl, - generation_session::SessionImpl as GenerationSessionImpl, - key_version_negotiation_session::{ - IsolatedSessionTransport as VersionNegotiationTransport, - SessionImpl as KeyVersionNegotiationSessionImpl, - }, - message::{self, Message}, - servers_set_change_session::SessionImpl as ServersSetChangeSessionImpl, - share_add_session::{ - IsolatedSessionTransport as ShareAddTransport, SessionImpl as ShareAddSessionImpl, - }, - signing_session_ecdsa::SessionImpl as EcdsaSigningSessionImpl, - signing_session_schnorr::SessionImpl as SchnorrSigningSessionImpl, - Error, NodeId, NodeKeyPair, Requester, SessionId, -}; -use parking_lot::{Condvar, Mutex, RwLock}; -use std::{ - collections::{BTreeMap, BTreeSet, VecDeque}, - sync::{atomic::AtomicBool, Arc, Weak}, - time::{Duration, Instant}, -}; - -use key_server_cluster::cluster_sessions_creator::{ - AdminSessionCreator, ClusterSessionCreator, DecryptionSessionCreator, - EcdsaSigningSessionCreator, EncryptionSessionCreator, GenerationSessionCreator, - KeyVersionNegotiationSessionCreator, SchnorrSigningSessionCreator, SessionCreatorCore, -}; - -/// When there are no session-related messages for SESSION_TIMEOUT_INTERVAL seconds, -/// we must treat this session as stalled && finish it with an error. -/// This timeout is for cases when node is responding to KeepAlive messages, but intentionally ignores -/// session messages. -const SESSION_TIMEOUT_INTERVAL: Duration = Duration::from_secs(60); -/// Interval to send session-level KeepAlive-messages. -const SESSION_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(30); - -lazy_static! { - /// Servers set change session id (there could be at most 1 session => hardcoded id). - pub static ref SERVERS_SET_CHANGE_SESSION_ID: SessionId = "10b7af423bb551d5dc8645db754163a2145d37d78d468fa7330435ed77064c1c" - .parse() - .expect("hardcoded id should parse without errors; qed"); -} - -/// Session id with sub session. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct SessionIdWithSubSession { - /// Key id. - pub id: SessionId, - /// Sub session id. - pub access_key: Secret, -} - -/// Generic cluster session. -pub trait ClusterSession { - /// Session identifier type. - type Id: ::std::fmt::Debug + Ord + Clone; - - /// Session type name. - fn type_name() -> &'static str; - /// Get session id. - fn id(&self) -> Self::Id; - /// If session is finished (either with succcess or not). - fn is_finished(&self) -> bool; - /// When it takes too much time to complete session. - fn on_session_timeout(&self); - /// When it takes too much time to receive response from the node. - fn on_node_timeout(&self, node_id: &NodeId); - /// Process error that has occured during session + propagate this error to required nodes. - fn on_session_error(&self, sender: &NodeId, error: Error); - /// Process session message. - fn on_message(&self, sender: &NodeId, message: &Message) -> Result<(), Error>; - - /// 'Wait for session completion' helper. - fn wait_session Option>>( - completion_event: &Condvar, - session_data: &Mutex, - timeout: Option, - result_reader: F, - ) -> Option> { - let mut locked_data = session_data.lock(); - match result_reader(&locked_data) { - Some(result) => Some(result), - None => { - match timeout { - None => completion_event.wait(&mut locked_data), - Some(timeout) => { - completion_event.wait_for(&mut locked_data, timeout); - } - } - - result_reader(&locked_data) - } - } - } -} - -/// Administrative session. -pub enum AdminSession { - /// Share add session. - ShareAdd(ShareAddSessionImpl), - /// Servers set change session. - ServersSetChange(ServersSetChangeSessionImpl), -} - -/// Administrative session creation data. -pub enum AdminSessionCreationData { - /// Share add session (key id). - ShareAdd(H256), - /// Servers set change session (block id, new_server_set). - ServersSetChange(Option, BTreeSet), -} - -/// Active sessions on this cluster. -pub struct ClusterSessions { - /// Key generation sessions. - pub generation_sessions: - ClusterSessionsContainer, - /// Encryption sessions. - pub encryption_sessions: - ClusterSessionsContainer, - /// Decryption sessions. - pub decryption_sessions: - ClusterSessionsContainer, - /// Schnorr signing sessions. - pub schnorr_signing_sessions: ClusterSessionsContainer< - SchnorrSigningSessionImpl, - SchnorrSigningSessionCreator, - Requester, - >, - /// ECDSA signing sessions. - pub ecdsa_signing_sessions: - ClusterSessionsContainer, - /// Key version negotiation sessions. - pub negotiation_sessions: ClusterSessionsContainer< - KeyVersionNegotiationSessionImpl, - KeyVersionNegotiationSessionCreator, - (), - >, - /// Administrative sessions. - pub admin_sessions: - ClusterSessionsContainer, - /// Self node id. - self_node_id: NodeId, - /// Creator core. - creator_core: Arc, -} - -/// Active sessions container listener. -pub trait ClusterSessionsListener: Send + Sync { - /// When new session is inserted to the container. - fn on_session_inserted(&self, _session: Arc) {} - /// When session is removed from the container. - fn on_session_removed(&self, _session: Arc) {} -} - -/// Active sessions container. -pub struct ClusterSessionsContainer, D> { - /// Sessions creator. - pub creator: SC, - /// Active sessions. - sessions: RwLock>>, - /// Listeners. Lock order: sessions -> listeners. - listeners: Mutex>>>, - /// Sessions container state. - container_state: Arc>, - /// Do not actually remove sessions. - preserve_sessions: bool, - /// Phantom data. - _pd: ::std::marker::PhantomData, -} - -/// Session and its message queue. -pub struct QueuedSession { - /// Session master. - pub master: NodeId, - /// Cluster view. - pub cluster_view: Arc, - /// Last keep alive time. - pub last_keep_alive_time: Instant, - /// Last received message time. - pub last_message_time: Instant, - /// Generation session. - pub session: Arc, - /// Messages queue. - pub queue: VecDeque<(NodeId, Message)>, -} - -/// Cluster sessions container state. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ClusterSessionsContainerState { - /// There's no active sessions => any session can be started. - Idle, - /// There are active sessions => exclusive session can't be started right now. - Active(usize), - /// Exclusive session is active => can't start any other sessions. - Exclusive, -} - -impl ClusterSessions { - /// Create new cluster sessions container. - pub fn new( - config: &ClusterConfiguration, - servers_set_change_session_creator_connector: Arc< - dyn ServersSetChangeSessionCreatorConnector, - >, - ) -> Self { - let container_state = Arc::new(Mutex::new(ClusterSessionsContainerState::Idle)); - let creator_core = Arc::new(SessionCreatorCore::new(config)); - ClusterSessions { - self_node_id: config.self_key_pair.public().clone(), - generation_sessions: ClusterSessionsContainer::new( - GenerationSessionCreator { - core: creator_core.clone(), - make_faulty_generation_sessions: AtomicBool::new(false), - }, - container_state.clone(), - ), - encryption_sessions: ClusterSessionsContainer::new( - EncryptionSessionCreator { - core: creator_core.clone(), - }, - container_state.clone(), - ), - decryption_sessions: ClusterSessionsContainer::new( - DecryptionSessionCreator { - core: creator_core.clone(), - }, - container_state.clone(), - ), - schnorr_signing_sessions: ClusterSessionsContainer::new( - SchnorrSigningSessionCreator { - core: creator_core.clone(), - }, - container_state.clone(), - ), - ecdsa_signing_sessions: ClusterSessionsContainer::new( - EcdsaSigningSessionCreator { - core: creator_core.clone(), - }, - container_state.clone(), - ), - negotiation_sessions: ClusterSessionsContainer::new( - KeyVersionNegotiationSessionCreator { - core: creator_core.clone(), - }, - container_state.clone(), - ), - admin_sessions: ClusterSessionsContainer::new( - AdminSessionCreator { - core: creator_core.clone(), - servers_set_change_session_creator_connector: - servers_set_change_session_creator_connector, - admin_public: config.admin_public.clone(), - }, - container_state, - ), - creator_core: creator_core, - } - } - - #[cfg(test)] - pub fn make_faulty_generation_sessions(&self) { - self.generation_sessions - .creator - .make_faulty_generation_sessions(); - } - - #[cfg(test)] - pub fn preserve_sessions(&mut self) { - self.generation_sessions.preserve_sessions = true; - self.encryption_sessions.preserve_sessions = true; - self.decryption_sessions.preserve_sessions = true; - self.schnorr_signing_sessions.preserve_sessions = true; - self.ecdsa_signing_sessions.preserve_sessions = true; - self.negotiation_sessions.preserve_sessions = true; - self.admin_sessions.preserve_sessions = true; - } - - /// Send session-level keep-alive messages. - pub fn sessions_keep_alive(&self) { - self.admin_sessions - .send_keep_alive(&*SERVERS_SET_CHANGE_SESSION_ID, &self.self_node_id); - } - - /// When session-level keep-alive response is received. - pub fn on_session_keep_alive(&self, sender: &NodeId, session_id: SessionId) { - if session_id == *SERVERS_SET_CHANGE_SESSION_ID { - self.admin_sessions.on_keep_alive(&session_id, sender); - } - } - - /// Stop sessions that are stalling. - pub fn stop_stalled_sessions(&self) { - self.generation_sessions.stop_stalled_sessions(); - self.encryption_sessions.stop_stalled_sessions(); - self.decryption_sessions.stop_stalled_sessions(); - self.schnorr_signing_sessions.stop_stalled_sessions(); - self.ecdsa_signing_sessions.stop_stalled_sessions(); - self.negotiation_sessions.stop_stalled_sessions(); - self.admin_sessions.stop_stalled_sessions(); - } - - /// When connection to node is lost. - pub fn on_connection_timeout(&self, node_id: &NodeId) { - self.generation_sessions.on_connection_timeout(node_id); - self.encryption_sessions.on_connection_timeout(node_id); - self.decryption_sessions.on_connection_timeout(node_id); - self.schnorr_signing_sessions.on_connection_timeout(node_id); - self.ecdsa_signing_sessions.on_connection_timeout(node_id); - self.negotiation_sessions.on_connection_timeout(node_id); - self.admin_sessions.on_connection_timeout(node_id); - self.creator_core.on_connection_timeout(node_id); - } -} - -impl ClusterSessionsContainer -where - S: ClusterSession, - SC: ClusterSessionCreator, -{ - pub fn new(creator: SC, container_state: Arc>) -> Self { - ClusterSessionsContainer { - creator: creator, - sessions: RwLock::new(BTreeMap::new()), - listeners: Mutex::new(Vec::new()), - container_state: container_state, - preserve_sessions: false, - _pd: Default::default(), - } - } - - pub fn add_listener(&self, listener: Arc>) { - self.listeners.lock().push(Arc::downgrade(&listener)); - } - - #[cfg(test)] - pub fn is_empty(&self) -> bool { - self.sessions.read().is_empty() - } - - pub fn get(&self, session_id: &S::Id, update_last_message_time: bool) -> Option> { - let mut sessions = self.sessions.write(); - sessions.get_mut(session_id).map(|s| { - if update_last_message_time { - s.last_message_time = Instant::now(); - } - s.session.clone() - }) - } - - #[cfg(test)] - pub fn first(&self) -> Option> { - self.sessions - .read() - .values() - .nth(0) - .map(|s| s.session.clone()) - } - - pub fn insert( - &self, - cluster: Arc, - master: NodeId, - session_id: S::Id, - session_nonce: Option, - is_exclusive_session: bool, - creation_data: Option, - ) -> Result, Error> { - let mut sessions = self.sessions.write(); - if sessions.contains_key(&session_id) { - return Err(Error::DuplicateSessionId); - } - - // create cluster - // let cluster = create_cluster_view(data, requires_all_connections)?; - // create session - let session = self.creator.create( - cluster.clone(), - master.clone(), - session_nonce, - session_id.clone(), - creation_data, - )?; - // check if session can be started - self.container_state - .lock() - .on_session_starting(is_exclusive_session)?; - - // insert session - let queued_session = QueuedSession { - master: master, - cluster_view: cluster, - last_keep_alive_time: Instant::now(), - last_message_time: Instant::now(), - session: session.clone(), - queue: VecDeque::new(), - }; - sessions.insert(session_id, queued_session); - self.notify_listeners(|l| l.on_session_inserted(session.clone())); - - Ok(session) - } - - pub fn remove(&self, session_id: &S::Id) { - self.do_remove(session_id, &mut *self.sessions.write()); - } - - pub fn enqueue_message( - &self, - session_id: &S::Id, - sender: NodeId, - message: Message, - is_queued_message: bool, - ) { - self.sessions.write().get_mut(session_id).map(|session| { - if is_queued_message { - session.queue.push_front((sender, message)) - } else { - session.queue.push_back((sender, message)) - } - }); - } - - pub fn dequeue_message(&self, session_id: &S::Id) -> Option<(NodeId, Message)> { - self.sessions - .write() - .get_mut(session_id) - .and_then(|session| session.queue.pop_front()) - } - - pub fn stop_stalled_sessions(&self) { - let mut sessions = self.sessions.write(); - for sid in sessions.keys().cloned().collect::>() { - let remove_session = { - let session = sessions - .get(&sid) - .expect("enumerating only existing sessions; qed"); - if Instant::now() - session.last_message_time > SESSION_TIMEOUT_INTERVAL { - session.session.on_session_timeout(); - session.session.is_finished() - } else { - false - } - }; - - if remove_session { - self.do_remove(&sid, &mut *sessions); - } - } - } - - pub fn on_connection_timeout(&self, node_id: &NodeId) { - let mut sessions = self.sessions.write(); - for sid in sessions.keys().cloned().collect::>() { - let remove_session = { - let session = sessions - .get(&sid) - .expect("enumerating only existing sessions; qed"); - session.session.on_node_timeout(node_id); - session.session.is_finished() - }; - - if remove_session { - self.do_remove(&sid, &mut *sessions); - } - } - } - - fn do_remove(&self, session_id: &S::Id, sessions: &mut BTreeMap>) { - if !self.preserve_sessions { - if let Some(session) = sessions.remove(session_id) { - self.container_state.lock().on_session_completed(); - self.notify_listeners(|l| l.on_session_removed(session.session.clone())); - } - } - } - - fn notify_listeners) -> ()>(&self, callback: F) { - let mut listeners = self.listeners.lock(); - let mut listener_index = 0; - while listener_index < listeners.len() { - match listeners[listener_index].upgrade() { - Some(listener) => { - callback(&*listener); - listener_index += 1; - } - None => { - listeners.swap_remove(listener_index); - } - } - } - } -} - -impl ClusterSessionsContainer -where - S: ClusterSession, - SC: ClusterSessionCreator, - SessionId: From, -{ - pub fn send_keep_alive(&self, session_id: &S::Id, self_node_id: &NodeId) { - if let Some(session) = self.sessions.write().get_mut(session_id) { - let now = Instant::now(); - if self_node_id == &session.master - && now - session.last_keep_alive_time > SESSION_KEEP_ALIVE_INTERVAL - { - session.last_keep_alive_time = now; - // since we send KeepAlive message to prevent nodes from disconnecting - // && worst thing that can happen if node is disconnected is that session is failed - // => ignore error here, because probably this node is not need for the rest of the session at all - let _ = session.cluster_view.broadcast(Message::Cluster( - message::ClusterMessage::KeepAliveResponse(message::KeepAliveResponse { - session_id: Some(session_id.clone().into()), - }), - )); - } - } - } - - pub fn on_keep_alive(&self, session_id: &S::Id, sender: &NodeId) { - if let Some(session) = self.sessions.write().get_mut(session_id) { - let now = Instant::now(); - // we only accept keep alive from master node of ServersSetChange session - if sender == &session.master { - session.last_keep_alive_time = now; - } - } - } -} - -impl ClusterSessionsContainerState { - /// When session is starting. - pub fn on_session_starting(&mut self, is_exclusive_session: bool) -> Result<(), Error> { - match *self { - ClusterSessionsContainerState::Idle if is_exclusive_session => { - *self = ClusterSessionsContainerState::Exclusive; - } - ClusterSessionsContainerState::Idle => { - *self = ClusterSessionsContainerState::Active(1); - } - ClusterSessionsContainerState::Active(_) if is_exclusive_session => { - return Err(Error::HasActiveSessions) - } - ClusterSessionsContainerState::Active(sessions_count) => { - *self = ClusterSessionsContainerState::Active(sessions_count + 1); - } - ClusterSessionsContainerState::Exclusive => return Err(Error::ExclusiveSessionActive), - } - Ok(()) - } - - /// When session is completed. - pub fn on_session_completed(&mut self) { - match *self { - ClusterSessionsContainerState::Idle => - unreachable!("idle means that there are no active sessions; on_session_completed is only called once after active session is completed; qed"), - ClusterSessionsContainerState::Active(sessions_count) if sessions_count == 1 => { - *self = ClusterSessionsContainerState::Idle; - }, - ClusterSessionsContainerState::Active(sessions_count) => { - *self = ClusterSessionsContainerState::Active(sessions_count - 1); - } - ClusterSessionsContainerState::Exclusive => { - *self = ClusterSessionsContainerState::Idle; - }, - } - } -} - -impl SessionIdWithSubSession { - /// Create new decryption session Id. - pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { - SessionIdWithSubSession { - id: session_id, - access_key: sub_session_id, - } - } -} - -impl PartialOrd for SessionIdWithSubSession { - fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> { - Some(self.cmp(other)) - } -} - -impl Ord for SessionIdWithSubSessi