Expanse compatibility (#2369)

* Add support for Expanse.

* Fix build.

* Refactor to be able to alter the eth subprotocol name

* Fix JSON.

* Support exp hardfork.

* Fix exp json again.

* Fixed test

* Fix tests.
This commit is contained in:
Gav Wood 2016-09-28 14:21:59 +02:00 committed by GitHub
parent fb92a98451
commit 15a14a5f49
15 changed files with 190 additions and 45 deletions

View File

@ -0,0 +1,69 @@
{
"name": "Expanse",
"forkName": "expanse",
"engine": {
"Ethash": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"minimumDifficulty": "0x020000",
"difficultyBoundDivisor": "0x0800",
"difficultyIncrementDivisor": "60",
"durationLimit": "0x3C",
"blockReward": "0x6f05b59d3b200000",
"registrar" : "0x6c221ca53705f3497ec90ca7b84c59ae7382fc21",
"frontierCompatibilityModeLimit": "0x30d40",
"difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200",
"bombDefuseTransition": "0x30d40"
}
}
},
"params": {
"accountStartNonce": "0x00",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID": "0x1",
"subprotocolName": "exp"
},
"genesis": {
"seal": {
"ethereum": {
"nonce": "0x214652414e4b4f21",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x40000000",
"author": "0x93decab0cd745598860f782ac1e8f046cb99e898",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x4672616e6b6f497346726565646f6d",
"gasLimit": "0x1388"
},
"nodes": [
"enode://7f335a047654f3e70d6f91312a7cf89c39704011f1a584e2698250db3d63817e74b88e26b7854111e16b2c9d0c7173c05419aeee2d0321850227b126d8b1be3f@46.101.156.249:42786",
"enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786",
"enode://96d3919b903e7f5ad59ac2f73c43be9172d9d27e2771355db03fd194732b795829a31fe2ea6de109d0804786c39a807e155f065b4b94c6fce167becd0ac02383@45.55.22.34:42786",
"enode://5f6c625bf287e3c08aad568de42d868781e961cbda805c8397cfb7be97e229419bef9a5a25a75f97632787106bba8a7caf9060fab3887ad2cfbeb182ab0f433f@46.101.182.53:42786",
"enode://d33a8d4c2c38a08971ed975b750f21d54c927c0bf7415931e214465a8d01651ecffe4401e1db913f398383381413c78105656d665d83f385244ab302d6138414@128.199.183.48:42786",
"enode://df872f81e25f72356152b44cab662caf1f2e57c3a156ecd20e9ac9246272af68a2031b4239a0bc831f2c6ab34733a041464d46b3ea36dce88d6c11714446e06b@178.62.208.109:42786",
"enode://f6f0d6b9b7d02ec9e8e4a16e38675f3621ea5e69860c739a65c1597ca28aefb3cec7a6d84e471ac927d42a1b64c1cbdefad75e7ce8872d57548ddcece20afdd1@159.203.64.95:42786"
],
"accounts": {
"0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"bb94f0ceb32257275b2a7a9c094c13e469b4563e": {
"balance": "10000000000000000000000000"
},
"15656715068ab0dbdf0ab00748a8a19e40f28192": {
"balance": "1000000000000000000000000"
},
"c075fa11f85bda3aaba67106226aaf086ac16f4e": {
"balance": "100000000000000000000000"
},
"93decab0cd745598860f782ac1e8f046cb99e898": {
"balance": "10000000000000000000000"
}
}
}

View File

@ -157,6 +157,9 @@
"stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
},
"nodes": [
"enode://cd6611461840543d5b9c56fbf088736154c699c43973b3a1a32390cf27106f87e58a818a606ccb05f3866de95a4fe860786fea71bf891ea95f234480d3022aa3@136.243.154.245:30303",
"enode://bcc7240543fe2cf86f5e9093d05753dd83343f8fda7bf0e833f65985c73afccf8f981301e13ef49c4804491eab043647374df1c4adf85766af88a624ecc3330e@136.243.154.244:30303",
"enode://ed4227681ca8c70beb2277b9e870353a9693f12e7c548c35df6bca6a956934d6f659999c2decb31f75ce217822eefca149ace914f1cbe461ed5a2ebaf9501455@88.212.206.70:30303",
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
"enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303",
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",

View File

@ -32,6 +32,8 @@ pub struct EthashParams {
pub minimum_difficulty: U256,
/// Difficulty bound divisor.
pub difficulty_bound_divisor: U256,
/// Difficulty increment divisor.
pub difficulty_increment_divisor: u64,
/// Block duration.
pub duration_limit: u64,
/// Block reward.
@ -46,6 +48,12 @@ pub struct EthashParams {
pub dao_hardfork_beneficiary: Address,
/// DAO hard-fork DAO accounts list (L)
pub dao_hardfork_accounts: Vec<Address>,
/// Transition block for a change of difficulty params (currently just bound_divisor).
pub difficulty_hardfork_transition: u64,
/// Difficulty param after the difficulty transition.
pub difficulty_hardfork_bound_divisor: U256,
/// Block on which there is no additional difficulty from the exponential bomb.
pub bomb_defuse_transition: u64,
}
impl From<ethjson::spec::EthashParams> for EthashParams {
@ -54,6 +62,7 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
minimum_difficulty: p.minimum_difficulty.into(),
difficulty_bound_divisor: p.difficulty_bound_divisor.into(),
difficulty_increment_divisor: p.difficulty_increment_divisor.map_or(10, Into::into),
duration_limit: p.duration_limit.into(),
block_reward: p.block_reward.into(),
registrar: p.registrar.map_or_else(Address::new, Into::into),
@ -61,6 +70,9 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
dao_hardfork_transition: p.dao_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
dao_hardfork_beneficiary: p.dao_hardfork_beneficiary.map_or_else(Address::new, Into::into),
dao_hardfork_accounts: p.dao_hardfork_accounts.unwrap_or_else(Vec::new).into_iter().map(Into::into).collect(),
difficulty_hardfork_transition: p.difficulty_hardfork_transition.map_or(0x7fffffffffffffff, Into::into),
difficulty_hardfork_bound_divisor: p.difficulty_hardfork_bound_divisor.map_or(p.difficulty_bound_divisor.into(), Into::into),
bomb_defuse_transition: p.bomb_defuse_transition.map_or(0x7fffffffffffffff, Into::into),
}
}
}
@ -267,7 +279,11 @@ impl Ethash {
}
let min_difficulty = self.ethash_params.minimum_difficulty;
let difficulty_bound_divisor = self.ethash_params.difficulty_bound_divisor;
let difficulty_hardfork = header.number() >= self.ethash_params.difficulty_hardfork_transition;
let difficulty_bound_divisor = match difficulty_hardfork {
true => self.ethash_params.difficulty_hardfork_bound_divisor,
false => self.ethash_params.difficulty_bound_divisor,
};
let duration_limit = self.ethash_params.duration_limit;
let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit;
@ -281,17 +297,19 @@ impl Ethash {
else {
trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp());
//block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99)
let diff_inc = (header.timestamp() - parent.timestamp()) / 10;
let diff_inc = (header.timestamp() - parent.timestamp()) / self.ethash_params.difficulty_increment_divisor;
if diff_inc <= 1 {
parent.difficulty().clone() + parent.difficulty().clone() / From::from(2048) * From::from(1 - diff_inc)
parent.difficulty().clone() + parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(1 - diff_inc)
} else {
parent.difficulty().clone() - parent.difficulty().clone() / From::from(2048) * From::from(min(diff_inc - 1, 99))
parent.difficulty().clone() - parent.difficulty().clone() / From::from(difficulty_bound_divisor) * From::from(min(diff_inc - 1, 99))
}
};
target = max(min_difficulty, target);
let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize;
if period > 1 {
target = max(min_difficulty, target + (U256::from(1) << (period - 2)));
if header.number() < self.ethash_params.bomb_defuse_transition {
let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize;
if period > 1 {
target = max(min_difficulty, target + (U256::from(1) << (period - 2)));
}
}
target
}

View File

@ -42,6 +42,9 @@ pub fn new_frontier() -> Spec { load(include_bytes!("../../res/ethereum/frontier
/// Create a new Frontier mainnet chain spec without the DAO hardfork.
pub fn new_classic() -> Spec { load(include_bytes!("../../res/ethereum/classic.json")) }
/// Create a new Frontier mainnet chain spec without the DAO hardfork.
pub fn new_expanse() -> Spec { load(include_bytes!("../../res/ethereum/expanse.json")) }
/// Create a new Frontier chain spec as though it never changes to Homestead.
pub fn new_frontier_test() -> Spec { load(include_bytes!("../../res/ethereum/frontier_test.json")) }

View File

@ -37,6 +37,8 @@ pub struct CommonParams {
pub maximum_extra_data_size: usize,
/// Network id.
pub network_id: U256,
/// Main subprotocol name.
pub subprotocol_name: String,
/// Minimum gas limit.
pub min_gas_limit: U256,
/// Fork block to check.
@ -49,6 +51,7 @@ impl From<ethjson::spec::Params> for CommonParams {
account_start_nonce: p.account_start_nonce.into(),
maximum_extra_data_size: p.maximum_extra_data_size.into(),
network_id: p.network_id.into(),
subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()),
min_gas_limit: p.min_gas_limit.into(),
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None },
}
@ -156,6 +159,9 @@ impl Spec {
/// Get the configured Network ID.
pub fn network_id(&self) -> U256 { self.params.network_id }
/// Get the configured Network ID.
pub fn subprotocol_name(&self) -> String { self.params.subprotocol_name.clone() }
/// Get the configured network fork block.
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params.fork_block }
@ -169,7 +175,7 @@ impl Spec {
header.set_transactions_root(self.transactions_root.clone());
header.set_uncles_hash(RlpStream::new_list(0).out().sha3());
header.set_extra_data(self.extra_data.clone());
header.set_state_root(self.state_root().clone());
header.set_state_root(self.state_root());
header.set_receipts_root(self.receipts_root.clone());
header.set_log_bloom(H2048::new().clone());
header.set_gas_used(self.gas_used.clone());
@ -184,6 +190,7 @@ impl Spec {
let r = Rlp::new(&seal);
(0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect()
});
trace!(target: "spec", "Header hash is {}", header.hash());
header
}
@ -229,13 +236,16 @@ impl Spec {
/// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good(&self, db: &mut StateDB) -> Result<bool, Box<TrieError>> {
if !db.as_hashdb().contains(&self.state_root()) {
trace!(target: "spec", "ensure_db_good: Fresh database? Cannot find state root {}", self.state_root());
let mut root = H256::new();
{
let mut t = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root);
for (address, account) in self.genesis_state.get().iter() {
try!(t.insert(&**address, &account.rlp()));
}
}
trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root);
for (address, account) in self.genesis_state.get().iter() {
account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address));
}

View File

@ -32,6 +32,9 @@ pub struct EthashParams {
#[serde(rename="difficultyBoundDivisor")]
pub difficulty_bound_divisor: Uint,
/// See main EthashParams docs.
#[serde(rename="difficultyIncrementDivisor")]
pub difficulty_increment_divisor: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="durationLimit")]
pub duration_limit: Uint,
/// See main EthashParams docs.
@ -39,9 +42,11 @@ pub struct EthashParams {
pub block_reward: Uint,
/// See main EthashParams docs.
pub registrar: Option<Address>,
/// See main EthashParams docs.
#[serde(rename="frontierCompatibilityModeLimit")]
pub frontier_compatibility_mode_limit: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="daoHardforkTransition")]
pub dao_hardfork_transition: Option<Uint>,
@ -51,6 +56,16 @@ pub struct EthashParams {
/// See main EthashParams docs.
#[serde(rename="daoHardforkAccounts")]
pub dao_hardfork_accounts: Option<Vec<Address>>,
/// See main EthashParams docs.
#[serde(rename="difficultyHardforkTransition")]
pub difficulty_hardfork_transition: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="difficultyHardforkBoundDivisor")]
pub difficulty_hardfork_bound_divisor: Option<Uint>,
/// See main EthashParams docs.
#[serde(rename="bombDefuseTransition")]
pub bomb_defuse_transition: Option<Uint>,
}
/// Ethash engine deserialization.
@ -99,7 +114,10 @@ mod tests {
"0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97",
"0xbb9bc244d798123fde783fcc1c72d3bb8c189413",
"0x807640a13483f8ac783c557fcdf27be11ea4ac7a"
]
],
"difficultyHardforkTransition": "0x59d9",
"difficultyHardforkBoundDivisor": "0x0200",
"bombDefuseTransition": "0x42"
}
}"#;

View File

@ -31,6 +31,9 @@ pub struct Params {
/// Network id.
#[serde(rename="networkID")]
pub network_id: Uint,
/// Name of the main ("eth") subprotocol.
#[serde(rename="subprotocolName")]
pub subprotocol_name: Option<String>,
/// Minimum gas limit.
#[serde(rename="minGasLimit")]
pub min_gas_limit: Uint,
@ -53,6 +56,7 @@ mod tests {
"frontierCompatibilityModeLimit": "0x118c30",
"maximumExtraDataSize": "0x20",
"networkID" : "0x1",
"subprotocolName" : "exp",
"minGasLimit": "0x1388",
"accountStartNonce": "0x00"
}"#;

View File

@ -29,6 +29,7 @@ pub enum SpecType {
Testnet,
Olympic,
Classic,
Expanse,
Custom(String),
}
@ -47,6 +48,7 @@ impl str::FromStr for SpecType {
"frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic,
"morden" | "testnet" => SpecType::Testnet,
"olympic" => SpecType::Olympic,
"expanse" => SpecType::Expanse,
other => SpecType::Custom(other.into()),
};
Ok(spec)
@ -60,6 +62,7 @@ impl SpecType {
SpecType::Testnet => Ok(ethereum::new_morden()),
SpecType::Olympic => Ok(ethereum::new_olympic()),
SpecType::Classic => Ok(ethereum::new_classic()),
SpecType::Expanse => Ok(ethereum::new_expanse()),
SpecType::Custom(ref filename) => {
let file = try!(fs::File::open(filename).map_err(|_| "Could not load specification file."));
Spec::load(file)

View File

@ -148,6 +148,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
Some(id) => id,
None => spec.network_id(),
};
if spec.subprotocol_name().len() != 3 {
warn!("Your chain specification's subprotocol length is not 3. Ignoring.");
} else {
sync_config.subprotocol_name.clone_from_slice(spec.subprotocol_name().as_bytes());
}
sync_config.fork_block = spec.fork_block();
// prepare account provider

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::str;
use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId,
NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError};
use util::{U256, H256};
@ -29,9 +30,6 @@ use ipc::{BinaryConvertable, BinaryConvertError, IpcConfig};
use std::str::FromStr;
use parking_lot::RwLock;
/// Ethereum sync protocol
pub const ETH_PROTOCOL: &'static str = "eth";
/// Sync configuration
#[derive(Debug, Clone, Copy)]
pub struct SyncConfig {
@ -39,6 +37,8 @@ pub struct SyncConfig {
pub max_download_ahead_blocks: usize,
/// Network ID
pub network_id: U256,
/// Main "eth" subprotocol name.
pub subprotocol_name: [u8; 3],
/// Fork block to check
pub fork_block: Option<(BlockNumber, H256)>,
}
@ -48,6 +48,7 @@ impl Default for SyncConfig {
SyncConfig {
max_download_ahead_blocks: 20000,
network_id: U256::from(1),
subprotocol_name: *b"eth",
fork_block: None,
}
}
@ -68,6 +69,8 @@ pub struct EthSync {
network: NetworkService,
/// Protocol handler
handler: Arc<SyncProtocolHandler>,
/// The main subprotocol name
subprotocol_name: [u8; 3],
}
impl EthSync {
@ -78,6 +81,7 @@ impl EthSync {
let sync = Arc::new(EthSync{
network: service,
handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: chain, snapshot_service: snapshot_service }),
subprotocol_name: config.subprotocol_name,
});
Ok(sync)
@ -135,7 +139,7 @@ impl ChainNotify for EthSync {
sealed: Vec<H256>,
_duration: u64)
{
self.network.with_context(ETH_PROTOCOL, |context| {
self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service);
self.handler.sync.write().chain_new_blocks(
&mut sync_io,
@ -149,7 +153,7 @@ impl ChainNotify for EthSync {
fn start(&self) {
self.network.start().unwrap_or_else(|e| warn!("Error starting network: {:?}", e));
self.network.register_protocol(self.handler.clone(), ETH_PROTOCOL, &[62u8, 63u8, 64u8])
self.network.register_protocol(self.handler.clone(), self.subprotocol_name, &[62u8, 63u8, 64u8])
.unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e));
}
@ -204,7 +208,7 @@ impl ManageNetwork for EthSync {
}
fn stop_network(&self) {
self.network.with_context(ETH_PROTOCOL, |context| {
self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.handler.chain, &*self.handler.snapshot_service);
self.handler.sync.write().abort(&mut sync_io);
});

View File

@ -17,7 +17,6 @@
use network::{NetworkContext, PeerId, PacketId, NetworkError};
use ethcore::client::BlockChainClient;
use ethcore::snapshot::SnapshotService;
use api::ETH_PROTOCOL;
/// IO interface for the syning handler.
/// Provides peer connection management and an interface to the blockchain client.
@ -101,7 +100,7 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
}
fn eth_protocol_version(&self, peer_id: PeerId) -> u8 {
self.network.protocol_version(peer_id, ETH_PROTOCOL).unwrap_or(0)
self.network.protocol_version(peer_id, self.network.subprotocol_name()).unwrap_or(0)
}
}

View File

@ -137,7 +137,7 @@ const SYS_TIMER: usize = LAST_SESSION + 1;
/// Protocol handler level packet id
pub type PacketId = u8;
/// Protocol / handler id
pub type ProtocolId = &'static str;
pub type ProtocolId = [u8; 3];
/// Messages used to communitate with the event loop from other threads.
#[derive(Clone)]
@ -185,7 +185,7 @@ pub struct CapabilityInfo {
impl Encodable for CapabilityInfo {
fn rlp_append(&self, s: &mut RlpStream) {
s.begin_list(2);
s.append(&self.protocol);
s.append(&&self.protocol[..]);
s.append(&self.version);
}
}
@ -284,10 +284,13 @@ impl<'s> NetworkContext<'s> {
}
/// Returns max version for a given protocol.
pub fn protocol_version(&self, peer: PeerId, protocol: &str) -> Option<u8> {
pub fn protocol_version(&self, peer: PeerId, protocol: ProtocolId) -> Option<u8> {
let session = self.resolve_session(peer);
session.and_then(|s| s.lock().capability_version(protocol))
}
/// Returns this object's subprotocol name.
pub fn subprotocol_name(&self) -> ProtocolId { self.protocol }
}
/// Shared host information
@ -801,8 +804,8 @@ impl Host {
}
}
for (p, _) in self.handlers.read().iter() {
if s.have_capability(p) {
ready_data.push(p);
if s.have_capability(*p) {
ready_data.push(*p);
}
}
},
@ -811,7 +814,7 @@ impl Host {
protocol,
packet_id,
}) => {
match self.handlers.read().get(protocol) {
match self.handlers.read().get(&protocol) {
None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) },
Some(_) => packet_data.push((protocol, packet_id, data)),
}
@ -826,13 +829,13 @@ impl Host {
}
let handlers = self.handlers.read();
for p in ready_data {
let h = handlers.get(p).unwrap().clone();
let h = handlers.get(&p).unwrap().clone();
self.stats.inc_sessions();
let reserved = self.reserved_nodes.read();
h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token);
}
for (p, packet_id, data) in packet_data {
let h = handlers.get(p).unwrap().clone();
let h = handlers.get(&p).unwrap().clone();
let reserved = self.reserved_nodes.read();
h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
}
@ -857,8 +860,8 @@ impl Host {
if s.is_ready() {
self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst);
for (p, _) in self.handlers.read().iter() {
if s.have_capability(p) {
to_disconnect.push(p);
if s.have_capability(*p) {
to_disconnect.push(*p);
}
}
}
@ -874,7 +877,7 @@ impl Host {
}
}
for p in to_disconnect {
let h = self.handlers.read().get(p).unwrap().clone();
let h = self.handlers.read().get(&p).unwrap().clone();
let reserved = self.reserved_nodes.read();
h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token);
}
@ -980,7 +983,7 @@ impl IoHandler<NetworkIoMessage> for Host {
self.nodes.write().clear_useless();
},
_ => match self.timers.read().get(&token).cloned() {
Some(timer) => match self.handlers.read().get(timer.protocol).cloned() {
Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() {
None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) },
Some(h) => {
let reserved = self.reserved_nodes.read();
@ -1004,11 +1007,11 @@ impl IoHandler<NetworkIoMessage> for Host {
} => {
let h = handler.clone();
let reserved = self.reserved_nodes.read();
h.initialize(&NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved));
self.handlers.write().insert(protocol, h);
h.initialize(&NetworkContext::new(io, *protocol, None, self.sessions.clone(), &reserved));
self.handlers.write().insert(*protocol, h);
let mut info = self.info.write();
for v in versions {
info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 });
info.capabilities.push(CapabilityInfo { protocol: *protocol, version: *v, packet_count:0 });
}
},
NetworkIoMessage::AddTimer {
@ -1023,7 +1026,7 @@ impl IoHandler<NetworkIoMessage> for Host {
*counter += 1;
handler_token
};
self.timers.write().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token });
self.timers.write().insert(handler_token, ProtocolTimer { protocol: *protocol, token: *token });
io.register_timer(handler_token, *delay).unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e));
},
NetworkIoMessage::Disconnect(ref peer) => {

View File

@ -45,7 +45,7 @@
//!
//! fn main () {
//! let mut service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
//! service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]);
//! service.register_protocol(Arc::new(MyHandler), *b"myp", &[1u8]);
//! service.start().expect("Error starting service");
//!
//! // Wait for quit condition

View File

@ -14,8 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{str, io};
use std::net::SocketAddr;
use std::io;
use std::sync::*;
use mio::*;
use mio::tcp::*;
@ -63,7 +63,7 @@ pub enum SessionData {
/// Packet data
data: Vec<u8>,
/// Packet protocol ID
protocol: &'static str,
protocol: [u8; 3],
/// Zero based packet ID
packet_id: u8,
},
@ -89,15 +89,21 @@ pub struct SessionInfo {
#[derive(Debug, PartialEq, Eq)]
pub struct PeerCapabilityInfo {
pub protocol: String,
pub protocol: ProtocolId,
pub version: u8,
}
impl Decodable for PeerCapabilityInfo {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let c = decoder.as_rlp();
let p: Vec<u8> = try!(c.val_at(0));
if p.len() != 3 {
return Err(DecoderError::Custom("Invalid subprotocol string length. Should be 3"));
}
let mut p2: ProtocolId = [0u8; 3];
p2.clone_from_slice(&p);
Ok(PeerCapabilityInfo {
protocol: try!(c.val_at(0)),
protocol: p2,
version: try!(c.val_at(1))
})
}
@ -105,7 +111,7 @@ impl Decodable for PeerCapabilityInfo {
#[derive(Debug)]
struct SessionCapabilityInfo {
pub protocol: &'static str,
pub protocol: [u8; 3],
pub version: u8,
pub packet_count: u8,
pub id_offset: u8,
@ -239,12 +245,12 @@ impl Session {
}
/// Checks if peer supports given capability
pub fn have_capability(&self, protocol: &str) -> bool {
pub fn have_capability(&self, protocol: [u8; 3]) -> bool {
self.info.capabilities.iter().any(|c| c.protocol == protocol)
}
/// Checks if peer supports given capability
pub fn capability_version(&self, protocol: &str) -> Option<u8> {
pub fn capability_version(&self, protocol: [u8; 3]) -> Option<u8> {
self.info.capabilities.iter().filter_map(|c| if c.protocol == protocol { Some(c.version) } else { None }).max()
}
@ -270,10 +276,10 @@ impl Session {
}
/// Send a protocol packet to peer.
pub fn send_packet<Message>(&mut self, io: &IoContext<Message>, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), NetworkError>
pub fn send_packet<Message>(&mut self, io: &IoContext<Message>, protocol: [u8; 3], packet_id: u8, data: &[u8]) -> Result<(), NetworkError>
where Message: Send + Sync + Clone {
if self.info.capabilities.is_empty() || !self.had_hello {
debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), protocol, packet_id);
debug!(target: "network", "Sending to unconfirmed session {}, protocol: {}, packet: {}", self.token(), str::from_utf8(&protocol[..]).unwrap_or("??"), packet_id);
return Err(From::from(NetworkError::BadProtocol));
}
if self.expired() {

View File

@ -41,7 +41,7 @@ impl TestProtocol {
/// Creates and register protocol with the network service
pub fn register(service: &mut NetworkService, drop_session: bool) -> Arc<TestProtocol> {
let handler = Arc::new(TestProtocol::new(drop_session));
service.register_protocol(handler.clone(), "test", &[42u8, 43u8]).expect("Error registering test protocol handler");
service.register_protocol(handler.clone(), *b"tst", &[42u8, 43u8]).expect("Error registering test protocol handler");
handler
}
@ -93,7 +93,7 @@ impl NetworkProtocolHandler for TestProtocol {
fn net_service() {
let service = NetworkService::new(NetworkConfiguration::new_local()).expect("Error creating network service");
service.start().unwrap();
service.register_protocol(Arc::new(TestProtocol::new(false)), "myproto", &[1u8]).unwrap();
service.register_protocol(Arc::new(TestProtocol::new(false)), *b"myp", &[1u8]).unwrap();
}
#[test]