Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0a5818ebf1
|
||
|
|
5d2d73fa64
|
||
|
|
dd1879bb91
|
||
|
|
e264ed5c37
|
||
|
|
d472bd4f7c
|
||
|
|
465d692956
|
||
|
|
81c1207828
|
||
|
|
f33ba13d74
|
||
|
|
5459d4c3f8
|
||
|
|
3e05717395
|
||
|
|
54d10ee40b
|
||
|
|
9cffdc5867
|
||
|
|
4f96be2024
|
||
|
|
32e1bc6aa5
|
||
|
|
387014f77b
|
19
CHANGELOG
19
CHANGELOG
@@ -1,3 +1,22 @@
|
||||
- 0.2.12
|
||||
* Breaking upgrade of chainlib.
|
||||
* Implement generic block and tx.
|
||||
- 0.2.11
|
||||
* Upgrade shep to handle exception in filestore list
|
||||
- 0.2.10
|
||||
* Upgrade shep to guarantee state lock atomicity
|
||||
- 0.2.9
|
||||
* Minimize instantiations of adapters in filter execution
|
||||
- 0.2.8
|
||||
* Upgrade chainsyncer
|
||||
- 0.2.7
|
||||
* Upgrade chainlib
|
||||
- 0.2.6
|
||||
* Deps upgrade
|
||||
- 0.2.5
|
||||
* Deps upgrade
|
||||
- 0.2.4
|
||||
* Allow omission of state store sync in queue store backend
|
||||
- 0.2.2
|
||||
* Fix missing symbol crashes related to race conditions
|
||||
- 0.2.1
|
||||
|
||||
@@ -13,13 +13,13 @@ logg = logging.getLogger(__name__)
|
||||
|
||||
class ChaindAdapter:
|
||||
|
||||
def __init__(self, chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0):
|
||||
def __init__(self, chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, store_sync=True):
|
||||
self.cache_adapter = cache_adapter
|
||||
self.dispatcher = dispatcher
|
||||
store_lock = StoreLock()
|
||||
while True:
|
||||
try:
|
||||
self.store = QueueStore(chain_spec, state_store, index_store, counter_store, cache=cache)
|
||||
self.store = QueueStore(chain_spec, state_store, index_store, counter_store, cache=cache, sync=store_sync)
|
||||
break
|
||||
except FileNotFoundError as e:
|
||||
logg.debug('queuestore instantiation failed, possible race condition (will try again): {}'.format(e))
|
||||
|
||||
@@ -26,13 +26,13 @@ logg = logging.getLogger(__name__)
|
||||
|
||||
class ChaindFsAdapter(ChaindAdapter):
|
||||
|
||||
def __init__(self, chain_spec, path, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, digest_bytes=32, event_callback=None):
|
||||
def __init__(self, chain_spec, path, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, digest_bytes=32, event_callback=None, store_sync=True):
|
||||
factory = SimpleFileStoreFactory(path, use_lock=True).add
|
||||
state_store = Status(factory, allow_invalid=True, event_callback=event_callback)
|
||||
index_path = os.path.join(path, 'tx')
|
||||
index_store = IndexStore(index_path, digest_bytes=digest_bytes)
|
||||
counter_store = CounterStore(path)
|
||||
super(ChaindFsAdapter, self).__init__(chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=cache, pending_retry_threshold=pending_retry_threshold, error_retry_threshold=error_retry_threshold)
|
||||
super(ChaindFsAdapter, self).__init__(chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=cache, pending_retry_threshold=pending_retry_threshold, error_retry_threshold=error_retry_threshold, store_sync=store_sync)
|
||||
|
||||
|
||||
def put(self, signed_tx):
|
||||
@@ -51,11 +51,11 @@ class ChaindFsAdapter(ChaindAdapter):
|
||||
logg.error('I am just a simple syncer and do not know how to handle the state which the tx {} is in: {}'.format(tx_hash, e))
|
||||
return None
|
||||
except FileNotFoundError as e:
|
||||
logg.debug('queuestore get {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
|
||||
logg.debug('queuestore get (file missing) {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
|
||||
store_lock.again()
|
||||
continue
|
||||
except StateLockedKey as e:
|
||||
logg.debug('queuestore get {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
|
||||
logg.debug('queuestore get (statelock) {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
|
||||
store_lock.again()
|
||||
continue
|
||||
|
||||
@@ -90,12 +90,19 @@ class ChaindFsAdapter(ChaindAdapter):
|
||||
def succeed(self, block, tx):
|
||||
if self.store.is_reserved(tx.hash):
|
||||
raise QueueLockError(tx.hash)
|
||||
|
||||
return self.store.final(tx.hash, block, tx, error=False)
|
||||
r = self.store.final(tx.hash, block, tx, error=False)
|
||||
(k, v) = self.store.get(tx.hash)
|
||||
self.store.purge(k)
|
||||
return r
|
||||
|
||||
|
||||
def fail(self, block, tx):
|
||||
return self.store.final(tx.hash, block, tx, error=True)
|
||||
if self.store.is_reserved(tx.hash):
|
||||
raise QueueLockError(tx.hash)
|
||||
r = self.store.final(tx.hash, block, tx, error=True)
|
||||
(k, v) = self.store.get(tx.hash)
|
||||
self.store.purge(k)
|
||||
return r
|
||||
|
||||
|
||||
def sendfail(self):
|
||||
@@ -128,7 +135,8 @@ class ChaindFsAdapter(ChaindAdapter):
|
||||
r = None
|
||||
try:
|
||||
r = self.dispatcher.send(tx_wire)
|
||||
except RPCException:
|
||||
except RPCException as e:
|
||||
logg.error('dispatch send failed for {}: {}'.format(tx_hash, e))
|
||||
self.store.fail(tx_hash)
|
||||
return False
|
||||
|
||||
|
||||
33
chaind/dispatch.py
Normal file
33
chaind/dispatch.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# standard imports
|
||||
import logging
|
||||
|
||||
# local ipmorts
|
||||
from chaind.adapters.fs import ChaindFsAdapter
|
||||
from chaind.eth.cache import EthCacheTx
|
||||
|
||||
logg = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DispatchProcessor:
|
||||
|
||||
def __init__(self, chain_spec, queue_dir, dispatcher):
|
||||
self.dispatcher = dispatcher
|
||||
self.chain_spec = chain_spec,
|
||||
self.queue_dir = queue_dir
|
||||
|
||||
|
||||
def process(self, rpc, limit=50):
|
||||
adapter = ChaindFsAdapter(
|
||||
self.chain_spec,
|
||||
self.queue_dir,
|
||||
EthCacheTx,
|
||||
self.dispatcher,
|
||||
)
|
||||
|
||||
upcoming = adapter.upcoming(limit=limit)
|
||||
logg.info('processor has {} candidates for {}, processing with limit {}'.format(len(upcoming), self.chain_spec, limit))
|
||||
i = 0
|
||||
for tx_hash in upcoming:
|
||||
if adapter.dispatch(tx_hash):
|
||||
i += 1
|
||||
return i
|
||||
@@ -26,27 +26,51 @@ class StateFilter(SyncFilter):
|
||||
self.adapter_path = adapter_path
|
||||
self.tx_adapter = tx_adapter
|
||||
self.throttler = throttler
|
||||
self.last_block_height = 0
|
||||
self.adapter = None
|
||||
self.store_lock = None
|
||||
|
||||
|
||||
def __get_adapter(self, block, force_reload=False):
|
||||
if self.store_lock == None:
|
||||
self.store_lock = StoreLock()
|
||||
|
||||
reload = False
|
||||
if block.number != self.last_block_height:
|
||||
reload = True
|
||||
elif self.adapter == None:
|
||||
reload = True
|
||||
elif force_reload:
|
||||
reload = True
|
||||
|
||||
self.last_block_height = block.number
|
||||
|
||||
if reload:
|
||||
while True:
|
||||
logg.info('reloading adapter')
|
||||
try:
|
||||
self.adapter = ChaindFsAdapter(
|
||||
self.chain_spec,
|
||||
self.adapter_path,
|
||||
self.tx_adapter,
|
||||
None,
|
||||
)
|
||||
break
|
||||
except BackendError as e:
|
||||
logg.error('adapter instantiation failed: {}, one more try'.format(e))
|
||||
self.store_lock.again()
|
||||
continue
|
||||
|
||||
return self.adapter
|
||||
|
||||
|
||||
def filter(self, conn, block, tx, session=None):
|
||||
cache_tx = None
|
||||
store_lock = StoreLock()
|
||||
queue_adapter = None
|
||||
queue_adapter = self.__get_adapter(block)
|
||||
|
||||
self.store_lock.reset()
|
||||
|
||||
while True:
|
||||
try:
|
||||
queue_adapter = ChaindFsAdapter(
|
||||
self.chain_spec,
|
||||
self.adapter_path,
|
||||
self.tx_adapter,
|
||||
None,
|
||||
)
|
||||
except BackendError as e:
|
||||
logg.error('adapter instantiation failed: {}, one more try'.format(e))
|
||||
store_lock.again()
|
||||
continue
|
||||
|
||||
store_lock.reset()
|
||||
|
||||
try:
|
||||
cache_tx = queue_adapter.get(tx.hash)
|
||||
break
|
||||
@@ -54,15 +78,16 @@ class StateFilter(SyncFilter):
|
||||
logg.debug('skipping not local transaction {}'.format(tx.hash))
|
||||
return False
|
||||
except BackendError as e:
|
||||
logg.error('adapter instantiation failed: {}, one more try'.format(e))
|
||||
queue_adapter = None
|
||||
store_lock.again()
|
||||
logg.error('adapter get failed: {}, one more try'.format(e))
|
||||
self.store_lock.again()
|
||||
queue_adapter = self.__get_adapter(block, force_reload=True)
|
||||
continue
|
||||
|
||||
if cache_tx == None:
|
||||
raise NotLocalTxError(tx.hash)
|
||||
|
||||
store_lock = StoreLock()
|
||||
self.store_lock.reset()
|
||||
|
||||
queue_lock = StoreLock(error=QueueLockError)
|
||||
while True:
|
||||
try:
|
||||
@@ -76,18 +101,21 @@ class StateFilter(SyncFilter):
|
||||
queue_lock.again()
|
||||
except FileNotFoundError as e:
|
||||
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
|
||||
store_lock.again()
|
||||
self.store_lock.again()
|
||||
queue_adapter = self.__get_adapter(block, force_reload=True)
|
||||
continue
|
||||
except NotLocalTxError as e:
|
||||
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
|
||||
store_lock.again()
|
||||
self.store_lock.again()
|
||||
queue_adapter = self.__get_adapter(block, force_reload=True)
|
||||
continue
|
||||
except StateLockedKey as e:
|
||||
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
|
||||
store_lock.again()
|
||||
self.store_lock.again()
|
||||
queue_adapter = self.__get_adapter(block, force_reload=True)
|
||||
continue
|
||||
|
||||
logg.info('filter registered {} for {} in {}'.format(tx.status.name, tx.hash, block))
|
||||
logg.info('filter registered {} for {} in {}'.format(tx.status_name, tx.hash, block))
|
||||
|
||||
if self.throttler != None:
|
||||
self.throttler.dec(tx.hash)
|
||||
|
||||
@@ -5,7 +5,7 @@ import time
|
||||
from .error import BackendError
|
||||
|
||||
BASE_DELAY = 0.01
|
||||
BASE_DELAY_LIMIT = 3.0
|
||||
BASE_DELAY_LIMIT = 10.0
|
||||
|
||||
|
||||
class StoreLock:
|
||||
|
||||
@@ -22,7 +22,7 @@ logg = logging.getLogger(__name__)
|
||||
|
||||
class SessionController:
|
||||
|
||||
def __init__(self, config, adapter, processor):
|
||||
def __init__(self, config, processor):
|
||||
self.dead = False
|
||||
os.makedirs(os.path.dirname(config.get('SESSION_SOCKET_PATH')), exist_ok=True)
|
||||
try:
|
||||
@@ -37,7 +37,6 @@ class SessionController:
|
||||
self.srv.settimeout(float(config.get('SESSION_DISPATCH_DELAY')))
|
||||
self.processor = processor
|
||||
self.chain_spec = config.get('CHAIN_SPEC')
|
||||
self.adapter = adapter
|
||||
|
||||
|
||||
def shutdown(self, signo, frame):
|
||||
@@ -65,7 +64,7 @@ class SessionController:
|
||||
r = None
|
||||
while True:
|
||||
try:
|
||||
r = self.processor(self.chain_spec, self.adapter, conn)
|
||||
r = self.processor(conn)
|
||||
break
|
||||
except BackendError as e:
|
||||
state_lock.again(e)
|
||||
|
||||
@@ -4,6 +4,7 @@ import os
|
||||
import uuid
|
||||
|
||||
# external imports
|
||||
from chainlib.settings import ChainSettings
|
||||
from chainsyncer.settings import ChainsyncerSettings
|
||||
from chainqueue.settings import ChainqueueSettings
|
||||
|
||||
@@ -106,18 +107,39 @@ class ChaindSettings(ChainsyncerSettings, ChainqueueSettings):
|
||||
raise ValueError('at least one backend must be set')
|
||||
|
||||
|
||||
def process_chaind_queue(self, config):
|
||||
if config.get('QUEUE_STATE_PATH') == None:
|
||||
queue_state_dir = self.dir_for('queue')
|
||||
config.add(queue_state_dir, 'QUEUE_STATE_PATH', False)
|
||||
logg.debug('setting queue state path {}'.format(queue_state_dir))
|
||||
|
||||
self.process_queue_tx(config)
|
||||
self.process_queue_paths(config)
|
||||
if config.get('QUEUE_BACKEND') == 'fs':
|
||||
self.process_queue_backend_fs(config)
|
||||
self.process_queue_backend(config)
|
||||
self.process_queue_store(config)
|
||||
|
||||
|
||||
def process(self, config):
|
||||
super(ChaindSettings, self).process(config)
|
||||
if self.include_sync:
|
||||
self.process_sync(config)
|
||||
self.process_sync_backend(config)
|
||||
#super(ChaindSettings, self).process(config)
|
||||
self.process_common(config)
|
||||
|
||||
if self.include_queue:
|
||||
self.process_queue_backend(config)
|
||||
self.process_dispatch(config)
|
||||
self.process_token(config)
|
||||
if self.include_sync:
|
||||
self.process_sync_backend(config)
|
||||
|
||||
self.process_backend(config)
|
||||
self.process_session(config)
|
||||
|
||||
if self.include_sync:
|
||||
self.process_sync(config)
|
||||
if self.include_queue:
|
||||
self.process_chaind_queue(config)
|
||||
self.process_dispatch(config)
|
||||
self.process_token(config)
|
||||
|
||||
self.process_socket(config)
|
||||
|
||||
|
||||
|
||||
@@ -7,6 +7,11 @@ from chainqueue.cache import CacheTokenTx
|
||||
from chainlib.status import Status as TxStatus
|
||||
from chainlib.chain import ChainSpec
|
||||
from chainlib.error import RPCException
|
||||
from chainlib.tx import (
|
||||
Tx,
|
||||
TxResult,
|
||||
)
|
||||
from chainlib.block import Block
|
||||
|
||||
|
||||
class MockCacheAdapter(CacheTokenTx):
|
||||
@@ -29,15 +34,22 @@ class MockDispatcher:
|
||||
|
||||
|
||||
def send(self, v):
|
||||
import sys
|
||||
sys.stderr.write('susu v {} {}\n'.format(v, self.fails))
|
||||
if v in self.fails:
|
||||
raise RPCException('{} is in fails'.format(v))
|
||||
pass
|
||||
|
||||
|
||||
class MockTx:
|
||||
class MockTx(Tx):
|
||||
|
||||
def __init__(self, tx_hash, status=TxStatus.SUCCESS):
|
||||
self.hash = tx_hash
|
||||
self.status = status
|
||||
result = TxResult()
|
||||
result.status = status
|
||||
super(MockTx, self).__init__(result=result)
|
||||
self.set_hash(tx_hash)
|
||||
|
||||
|
||||
class MockBlock(Block):
|
||||
|
||||
def __init__(self, number):
|
||||
super(MockBlock, self).__init__()
|
||||
self.number = number
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
chainlib~=0.1.1
|
||||
chainqueue~=0.1.8
|
||||
chainsyncer~=0.4.3
|
||||
chainlib~=0.2.0
|
||||
chainqueue~=0.1.16
|
||||
chainsyncer~=0.4.9
|
||||
confini~=0.6.0
|
||||
funga~=0.5.2
|
||||
pyxdg~=0.26
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[metadata]
|
||||
name = chaind
|
||||
version = 0.2.2
|
||||
version = 0.2.12
|
||||
description = Base package for chain queue service
|
||||
author = Louis Holbrook
|
||||
author_email = dev@holbrook.no
|
||||
|
||||
@@ -14,6 +14,7 @@ from chaind.filter import StateFilter
|
||||
# test imports
|
||||
from chaind.unittest.common import (
|
||||
MockTx,
|
||||
MockBlock,
|
||||
MockCacheAdapter,
|
||||
MockDispatcher,
|
||||
)
|
||||
@@ -76,7 +77,8 @@ class TestChaindFs(TestChaindFsBase):
|
||||
|
||||
fltr = StateFilter(self.chain_spec, self.path, MockCacheAdapter)
|
||||
tx = MockTx(hsh)
|
||||
fltr.filter(None, None, tx)
|
||||
block = MockBlock(42)
|
||||
fltr.filter(None, block, tx)
|
||||
|
||||
|
||||
def test_fs_filter_fail(self):
|
||||
@@ -87,7 +89,8 @@ class TestChaindFs(TestChaindFsBase):
|
||||
|
||||
fltr = StateFilter(self.chain_spec, self.path, MockCacheAdapter)
|
||||
tx = MockTx(hsh, TxStatus.ERROR)
|
||||
fltr.filter(None, None, tx)
|
||||
block = MockBlock(42)
|
||||
fltr.filter(None, block, tx)
|
||||
|
||||
|
||||
def test_upcoming(self):
|
||||
|
||||
Reference in New Issue
Block a user