42 Commits

Author SHA1 Message Date
lash
443e64f3e9 Add chain spec parts to session paths 2022-05-15 11:32:42 +00:00
lash
bb5e0b359f Bump deps 2022-05-14 16:24:03 +00:00
lash
8bc49e7408 Bump minor version 2022-05-14 16:22:40 +00:00
lash
c83aade144 Add data config path module 2022-05-14 12:40:21 +00:00
lash
63a1d07a28 Update dir arg destinations in config handle 2022-05-14 12:39:53 +00:00
lash
6da2a1ced9 Update config processing 2022-05-14 12:24:24 +00:00
lash
9f2a791b1f Update state config names, remove dead code 2022-05-13 13:47:11 +00:00
lash
ba894044d8 Remove redundant chaind init file 2022-05-13 10:35:21 +00:00
lash
e5a18d1abf Make flag and arg preparations stateless 2022-05-13 10:32:56 +00:00
lash
139e2772af Implement settings processing on chainlib 0.3.0 structure 2022-05-13 09:40:18 +00:00
lash
0a5818ebf1 Update settings for queue and syncer 2022-05-10 06:13:56 +00:00
lash
5d2d73fa64 Upgrade chainqueue, chainsyncer 2022-05-09 19:44:28 +00:00
lash
dd1879bb91 Implement mocks on generic tx, block 2022-05-09 19:25:31 +00:00
lash
e264ed5c37 Upgrade deps, to avoid shep sync on persist set 2022-05-05 17:08:09 +00:00
lash
d472bd4f7c Upgrade deps, handle filestore list exception in shep 2022-05-05 15:41:30 +00:00
lash
465d692956 Upgrade deps 2022-05-05 15:02:21 +00:00
lash
81c1207828 Recreate adapter only per block 2022-05-05 12:09:07 +00:00
lash
f33ba13d74 Upgrade chainsyncer 2022-05-05 08:00:14 +00:00
lash
5459d4c3f8 Upgrade deps 2022-05-04 18:22:29 +00:00
lash
3e05717395 Add error line for rpc fails in dispatch 2022-05-04 06:44:40 +00:00
lash
54d10ee40b Upgrade chainqueue 2022-05-04 05:43:39 +00:00
lash
9cffdc5867 Factor out dispatch processor from chain implementation 2022-05-04 05:38:01 +00:00
lash
4f96be2024 Upgrade chainqueue 2022-05-03 17:22:59 +00:00
lash
32e1bc6aa5 Correct purge call, add missing lock module 2022-05-02 20:10:30 +00:00
lash
387014f77b Purge items from memory state on final 2022-05-02 20:05:41 +00:00
lash
c3a592c0f6 Implement shep store lock 2022-05-02 09:59:13 +00:00
lash
5d61506133 WIP whackamole race condition problems 2022-05-01 07:55:51 +00:00
lash
5102b4ac6e Fix crashes related to race condition hits 2022-05-01 07:31:18 +00:00
lash
9b98703f24 Receive race handling from chainqueue, rehabilitate tests 2022-05-01 06:58:52 +00:00
lash
4be5325df2 Renew backend every filter pass, allow race in backend 2022-04-30 18:32:10 +00:00
lash
96bdca20cc Filter out final states from upcoming in network exceptions 2022-04-30 16:44:37 +00:00
lash
14fce6f63f Upgrade chainsyncer 2022-04-30 07:51:59 +00:00
lash
9ed3bad0c4 Add upcoming throttling, tests 2022-04-30 05:45:02 +00:00
lash
e87ec0cd4c Upgrade chainqueue, chainsyncer 2022-04-29 06:26:43 +00:00
lash
b52d69c3f9 Correct chainlib version 2022-04-28 12:44:59 +00:00
lash
1d2656aaed Add settings module 2022-04-28 12:41:08 +00:00
lash
b198e3f6b1 Upgrade chainsyncer, chainqueue 2022-04-27 06:35:18 +00:00
lash
a55591f243 Include cli module in setup 2022-04-27 06:31:30 +00:00
lash
800b70680b Improve data dir generation, improve socket settings handling 2022-04-27 06:29:46 +00:00
lash
e54f03a8f6 Add chainqueue settings 2022-04-26 21:28:31 +00:00
lash
18d10dc8b7 Allow selective sync and or queue in settings 2022-04-26 19:41:29 +00:00
lash
7e78fd0da2 Implement cli processing, settings renderer 2022-04-26 17:25:37 +00:00
21 changed files with 614 additions and 157 deletions

View File

@@ -1,3 +1,33 @@
- 0.3.0
* Implement on chainlib 0.3.0
- 0.2.12
* Breaking upgrade of chainlib.
* Implement generic block and tx.
- 0.2.11
* Upgrade shep to handle exception in filestore list
- 0.2.10
* Upgrade shep to guarantee state lock atomicity
- 0.2.9
* Minimize instantiations of adapters in filter execution
- 0.2.8
* Upgrade chainsyncer
- 0.2.7
* Upgrade chainlib
- 0.2.6
* Deps upgrade
- 0.2.5
* Deps upgrade
- 0.2.4
* Allow omission of state store sync in queue store backend
- 0.2.2
* Fix missing symbol crashes related to race conditions
- 0.2.1
* Receive removed race checks from chainqueue
- 0.2.0
* primitive race condition handling between fs access of sync and queue
* re-enable throttling based on in-flight transaction count
- 0.1.2
* add settings object
- 0.1.0 - 0.1.0
* consume non chain-specific code * consume non chain-specific code
- 0.0.1 - 0.0.1

View File

@@ -1,10 +1,27 @@
# standard imports
import logging
import time
# external imports # external imports
from chainqueue import Store as QueueStore from chainqueue import Store as QueueStore
# local imports
from chaind.lock import StoreLock
logg = logging.getLogger(__name__)
class ChaindAdapter: class ChaindAdapter:
def __init__(self, chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0): def __init__(self, chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, store_sync=True):
self.cache_adapter = cache_adapter self.cache_adapter = cache_adapter
self.dispatcher = dispatcher self.dispatcher = dispatcher
self.store = QueueStore(chain_spec, state_store, index_store, counter_store, cache=cache) store_lock = StoreLock()
while True:
try:
self.store = QueueStore(chain_spec, state_store, index_store, counter_store, cache=cache, sync=store_sync)
break
except FileNotFoundError as e:
logg.debug('queuestore instantiation failed, possible race condition (will try again): {}'.format(e))
store_lock.again()
continue

View File

@@ -1,5 +1,7 @@
# standard imports # standard imports
import logging import logging
import os
import time
# external imports # external imports
from chainlib.error import RPCException from chainlib.error import RPCException
@@ -10,36 +12,67 @@ from chainqueue.store.fs import (
CounterStore, CounterStore,
) )
from shep.store.file import SimpleFileStoreFactory from shep.store.file import SimpleFileStoreFactory
from shep.error import (
StateInvalid,
StateLockedKey,
)
# local imports # local imports
from .base import ChaindAdapter from .base import ChaindAdapter
from chaind.lock import StoreLock
logg = logging.getLogger(__name__) logg = logging.getLogger(__name__)
class ChaindFsAdapter(ChaindAdapter): class ChaindFsAdapter(ChaindAdapter):
def __init__(self, chain_spec, path, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, digest_bytes=32): def __init__(self, chain_spec, path, cache_adapter, dispatcher, cache=None, pending_retry_threshold=0, error_retry_threshold=0, digest_bytes=32, event_callback=None, store_sync=True):
factory = SimpleFileStoreFactory(path).add factory = SimpleFileStoreFactory(path, use_lock=True).add
state_store = Status(factory) state_store = Status(factory, allow_invalid=True, event_callback=event_callback)
index_store = IndexStore(path, digest_bytes=digest_bytes) index_path = os.path.join(path, 'tx')
index_store = IndexStore(index_path, digest_bytes=digest_bytes)
counter_store = CounterStore(path) counter_store = CounterStore(path)
super(ChaindFsAdapter, self).__init__(chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=cache, pending_retry_threshold=pending_retry_threshold, error_retry_threshold=error_retry_threshold) super(ChaindFsAdapter, self).__init__(chain_spec, state_store, index_store, counter_store, cache_adapter, dispatcher, cache=cache, pending_retry_threshold=pending_retry_threshold, error_retry_threshold=error_retry_threshold, store_sync=store_sync)
def put(self, signed_tx): def put(self, signed_tx):
#cache_tx = self.deserialize(signed_tx)
(s, tx_hash,) = self.store.put(signed_tx, cache_adapter=self.cache_adapter) (s, tx_hash,) = self.store.put(signed_tx, cache_adapter=self.cache_adapter)
return tx_hash return tx_hash
def get(self, tx_hash): def get(self, tx_hash):
v = self.store.get(tx_hash) v = None
store_lock = StoreLock()
while True:
try:
v = self.store.get(tx_hash)
break
except StateInvalid as e:
logg.error('I am just a simple syncer and do not know how to handle the state which the tx {} is in: {}'.format(tx_hash, e))
return None
except FileNotFoundError as e:
logg.debug('queuestore get (file missing) {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
store_lock.again()
continue
except StateLockedKey as e:
logg.debug('queuestore get (statelock) {} failed, possible race condition (will try again): {}'.format(tx_hash, e))
store_lock.again()
continue
return v[1] return v[1]
def upcoming(self): def upcoming(self, limit=0):
return self.store.upcoming() real_limit = 0
in_flight = []
if limit > 0:
in_flight = self.store.by_state(state=self.store.IN_NETWORK, not_state=self.store.FINAL)
real_limit = limit - len(in_flight)
if real_limit <= 0:
return []
r = self.store.upcoming(limit=real_limit)
logg.info('upcoming returning {} upcoming from limit {} less {} active in-flight txs'.format(len(r), limit, len(in_flight)))
return r
def pending(self): def pending(self):
@@ -50,12 +83,30 @@ class ChaindFsAdapter(ChaindAdapter):
return self.store.deferred() return self.store.deferred()
def failed(self):
return self.store.failed()
def succeed(self, block, tx): def succeed(self, block, tx):
return self.store.final(tx.hash, block, tx, error=False) if self.store.is_reserved(tx.hash):
raise QueueLockError(tx.hash)
r = self.store.final(tx.hash, block, tx, error=False)
(k, v) = self.store.get(tx.hash)
self.store.purge(k)
return r
def fail(self, block, tx): def fail(self, block, tx):
return self.store.final(tx.hash, block, tx, error=True) if self.store.is_reserved(tx.hash):
raise QueueLockError(tx.hash)
r = self.store.final(tx.hash, block, tx, error=True)
(k, v) = self.store.get(tx.hash)
self.store.purge(k)
return r
def sendfail(self):
return self.store.fail(tx.hash)
def enqueue(self, tx_hash): def enqueue(self, tx_hash):
@@ -63,15 +114,44 @@ class ChaindFsAdapter(ChaindAdapter):
def dispatch(self, tx_hash): def dispatch(self, tx_hash):
entry = self.store.send_start(tx_hash) entry = None
store_lock = StoreLock()
while True:
try:
entry = self.store.send_start(tx_hash)
break
except FileNotFoundError as e:
logg.debug('dispatch failed to find {} in backend, will try again: {}'.format(tx_hash, e))
store_lock.again()
continue
except StateLockedKey as e:
logg.debug('dispatch failed to find {} in backend, will try again: {}'.format(tx_hash, e))
store_lock.again()
continue
tx_wire = entry.serialize() tx_wire = entry.serialize()
r = None r = None
try: try:
r = self.dispatcher.send(tx_wire) r = self.dispatcher.send(tx_wire)
except RPCException: except RPCException as e:
logg.error('dispatch send failed for {}: {}'.format(tx_hash, e))
self.store.fail(tx_hash) self.store.fail(tx_hash)
return False return False
self.store.send_end(tx_hash) store_lock = StoreLock()
while True:
try:
self.store.send_end(tx_hash)
break
except FileNotFoundError as e:
logg.debug('dispatch failed to find {} in backend, will try again: {}'.format(tx_hash, e))
store_lock.again(e)
continue
except StateLockedKey as e:
logg.debug('dispatch failed to find {} in backend, will try again: {}'.format(tx_hash, e))
store_lock.again(e)
continue
return True return True

19
chaind/cli/arg.py Normal file
View File

@@ -0,0 +1,19 @@
def apply_flag(flag):
flag.add('session')
flag.add('dispatch')
flag.add('socket')
flag.add('socket_client')
flag.add('token')
flag.alias('chaind_base', 'session')
flag.alias('chaind_socket_client', 'session', 'socket', 'socket_client')
return flag
def apply_arg(arg):
arg.add_long('session-id', 'session', help='Session to store state and data under')
arg.add_long('socket-path', 'socket', help='UNIX socket path')
arg.add_long('send-socket', 'socket_client', typ=bool, help='Send to UNIX socket')
arg.add_long('token-module', 'token', help='Python module path to resolve tokens from identifiers')
return arg

13
chaind/cli/base.py Normal file
View File

@@ -0,0 +1,13 @@
# standard imports
import enum
class ChaindFlag(enum.IntEnum):
SESSION = 1
DISPATCH = 2
SOCKET = 16
SOCKET_CLIENT = 32
TOKEN = 256
argflag_local_base = ChaindFlag.SESSION
argflag_local_socket_client = ChaindFlag.SESSION | ChaindFlag.SOCKET | ChaindFlag.SOCKET_CLIENT

19
chaind/cli/config.py Normal file
View File

@@ -0,0 +1,19 @@
def process_config(config, arg, args, flags):
args_override = {}
if arg.match('session', flags):
args_override['SESSION_ID'] = getattr(args, 'session_id')
args_override['SESSION_RUNTIME_DIR'] = getattr(args, 'runtime_path')
args_override['SESSION_DATA_DIR'] = getattr(args, 'state_path')
if arg.match('socket', flags):
args_override['SESSION_SOCKET_PATH'] = getattr(args, 'socket')
if arg.match('token', flags):
args_override['TOKEN_MODULE'] = getattr(args, 'token_module')
config.dict_override(args_override, 'local cli args')
if arg.match('socket_client', flags):
config.add(getattr(args, 'send_socket'), '_SOCKET_SEND', False)
return config

6
chaind/data/__init__.py Normal file
View File

@@ -0,0 +1,6 @@
# standard imports
import os
data_dir = os.path.realpath(os.path.dirname(__file__))
config_dir = os.path.join(data_dir, 'config')

View File

@@ -1,15 +1,5 @@
[session] [session]
socket_path = socket_path =
runtime_dir = runtime_path =
id = id =
data_dir = data_path =
[database]
engine =
name = chaind
driver =
user =
password =
host =
port =
debug = 0

View File

@@ -0,0 +1,2 @@
[token]
module =

33
chaind/dispatch.py Normal file
View File

@@ -0,0 +1,33 @@
# standard imports
import logging
# local ipmorts
from chaind.adapters.fs import ChaindFsAdapter
from chaind.eth.cache import EthCacheTx
logg = logging.getLogger(__name__)
class DispatchProcessor:
def __init__(self, chain_spec, queue_dir, dispatcher):
self.dispatcher = dispatcher
self.chain_spec = chain_spec,
self.queue_dir = queue_dir
def process(self, rpc, limit=50):
adapter = ChaindFsAdapter(
self.chain_spec,
self.queue_dir,
EthCacheTx,
self.dispatcher,
)
upcoming = adapter.upcoming(limit=limit)
logg.info('processor has {} candidates for {}, processing with limit {}'.format(len(upcoming), self.chain_spec, limit))
i = 0
for tx_hash in upcoming:
if adapter.dispatch(tx_hash):
i += 1
return i

View File

@@ -16,3 +16,11 @@ class ClientBlockError(BlockingIOError):
class ClientInputError(ValueError): class ClientInputError(ValueError):
pass pass
class QueueLockError(Exception):
pass
class BackendError(Exception):
pass

View File

@@ -1,30 +1,122 @@
# standard imports # standard imports
import logging import logging
import time
# external imports # external imports
from chainlib.status import Status as TxStatus from chainlib.status import Status as TxStatus
from chainsyncer.filter import SyncFilter from chainsyncer.filter import SyncFilter
from chainqueue.error import NotLocalTxError from chainqueue.error import NotLocalTxError
from chaind.adapters.fs import ChaindFsAdapter
from shep.error import StateLockedKey
# local imports
from .error import (
QueueLockError,
BackendError,
)
from chaind.lock import StoreLock
logg = logging.getLogger(__name__) logg = logging.getLogger(__name__)
class StateFilter(SyncFilter): class StateFilter(SyncFilter):
def __init__(self, adapter, throttler=None): def __init__(self, chain_spec, adapter_path, tx_adapter, throttler=None):
self.adapter = adapter self.chain_spec = chain_spec
self.adapter_path = adapter_path
self.tx_adapter = tx_adapter
self.throttler = throttler self.throttler = throttler
self.last_block_height = 0
self.adapter = None
self.store_lock = None
def __get_adapter(self, block, force_reload=False):
if self.store_lock == None:
self.store_lock = StoreLock()
reload = False
if block.number != self.last_block_height:
reload = True
elif self.adapter == None:
reload = True
elif force_reload:
reload = True
self.last_block_height = block.number
if reload:
while True:
logg.info('reloading adapter')
try:
self.adapter = ChaindFsAdapter(
self.chain_spec,
self.adapter_path,
self.tx_adapter,
None,
)
break
except BackendError as e:
logg.error('adapter instantiation failed: {}, one more try'.format(e))
self.store_lock.again()
continue
return self.adapter
def filter(self, conn, block, tx, session=None): def filter(self, conn, block, tx, session=None):
try: cache_tx = None
cache_tx = self.adapter.get(tx.hash) queue_adapter = self.__get_adapter(block)
except NotLocalTxError:
logg.debug('skipping not local transaction {}'.format(tx.hash)) self.store_lock.reset()
return False
if tx.status == TxStatus.SUCCESS: while True:
self.adapter.succeed(block, tx) try:
else: cache_tx = queue_adapter.get(tx.hash)
self.adapter.fail(block, tx) break
except NotLocalTxError:
logg.debug('skipping not local transaction {}'.format(tx.hash))
return False
except BackendError as e:
logg.error('adapter get failed: {}, one more try'.format(e))
self.store_lock.again()
queue_adapter = self.__get_adapter(block, force_reload=True)
continue
if cache_tx == None:
raise NotLocalTxError(tx.hash)
self.store_lock.reset()
queue_lock = StoreLock(error=QueueLockError)
while True:
try:
if tx.status == TxStatus.SUCCESS:
queue_adapter.succeed(block, tx)
else:
queue_adapter.fail(block, tx)
break
except QueueLockError as e:
logg.debug('queue item {} is blocked, will retry: {}'.format(tx.hash, e))
queue_lock.again()
except FileNotFoundError as e:
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
self.store_lock.again()
queue_adapter = self.__get_adapter(block, force_reload=True)
continue
except NotLocalTxError as e:
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
self.store_lock.again()
queue_adapter = self.__get_adapter(block, force_reload=True)
continue
except StateLockedKey as e:
logg.debug('queue item {} not found, possible race condition, will retry: {}'.format(tx.hash, e))
self.store_lock.again()
queue_adapter = self.__get_adapter(block, force_reload=True)
continue
logg.info('filter registered {} for {} in {}'.format(tx.status_name, tx.hash, block))
if self.throttler != None: if self.throttler != None:
self.throttler.dec(tx.hash) self.throttler.dec(tx.hash)

34
chaind/lock.py Normal file
View File

@@ -0,0 +1,34 @@
# standard imports
import time
# local imports
from .error import BackendError
BASE_DELAY = 0.01
BASE_DELAY_LIMIT = 10.0
class StoreLock:
def __init__(self, delay=BASE_DELAY, delay_limit=BASE_DELAY_LIMIT, error=BackendError, description=None):
self.base_delay = delay
self.delay = delay
self.delay_limit = delay_limit
self.error = error
self.description = description
def again(self, e=None):
if self.delay > self.delay_limit:
err = None
if e != None:
err = str(e)
else:
err = self.description
raise self.error(err)
time.sleep(self.delay)
self.delay *= 2
def reset(self):
self.delay = self.base_delay

View File

@@ -8,19 +8,21 @@ import stat
from hexathon import strip_0x from hexathon import strip_0x
# local imports # local imports
from chaind.error import ( from .error import (
NothingToDoError, NothingToDoError,
ClientGoneError, ClientGoneError,
ClientBlockError, ClientBlockError,
ClientInputError, ClientInputError,
) )
from .lock import StoreLock
from .error import BackendError
logg = logging.getLogger(__name__) logg = logging.getLogger(__name__)
class SessionController: class SessionController:
def __init__(self, config, adapter, processor): def __init__(self, config, processor):
self.dead = False self.dead = False
os.makedirs(os.path.dirname(config.get('SESSION_SOCKET_PATH')), exist_ok=True) os.makedirs(os.path.dirname(config.get('SESSION_SOCKET_PATH')), exist_ok=True)
try: try:
@@ -35,7 +37,6 @@ class SessionController:
self.srv.settimeout(float(config.get('SESSION_DISPATCH_DELAY'))) self.srv.settimeout(float(config.get('SESSION_DISPATCH_DELAY')))
self.processor = processor self.processor = processor
self.chain_spec = config.get('CHAIN_SPEC') self.chain_spec = config.get('CHAIN_SPEC')
self.adapter = adapter
def shutdown(self, signo, frame): def shutdown(self, signo, frame):
@@ -59,7 +60,16 @@ class SessionController:
def process(self, conn): def process(self, conn):
r = self.processor(self.chain_spec, self.adapter, conn) state_lock = StoreLock()
r = None
while True:
try:
r = self.processor(conn)
break
except BackendError as e:
state_lock.again(e)
continue
if r > 0: if r > 0:
self.srv.settimeout(0.1) self.srv.settimeout(0.1)
else: else:
@@ -104,7 +114,6 @@ class SessionController:
logg.error('invalid input "{}"'.format(data_in_str)) logg.error('invalid input "{}"'.format(data_in_str))
raise ClientInputError() raise ClientInputError()
logg.info('recv {} bytes'.format(len(data)))
return (srvs, data,) return (srvs, data,)
@@ -117,3 +126,4 @@ class SessionController:
logg.debug('{} bytes sent'.format(len(v))) logg.debug('{} bytes sent'.format(len(v)))
except BrokenPipeError: except BrokenPipeError:
logg.debug('they just hung up. how rude.') logg.debug('they just hung up. how rude.')
srvs.close()

126
chaind/settings.py Normal file
View File

@@ -0,0 +1,126 @@
# standard imports
import logging
import os
import uuid
# external imports
from chainlib.settings import ChainSettings
from chainqueue.settings import *
logg = logging.getLogger(__name__)
class ChaindSettings(ChainSettings):
def __init__(settings, include_sync=False, include_queue=False):
super(ChaindSettings, settings).__init__()
settings.include_sync = include_sync
settings.include_queue = include_queue
def dir_for(self, k):
return os.path.join(self.o['SESSION_PATH'], k)
def process_session(settings, config):
session_id = config.get('SESSION_ID')
base_dir = os.getcwd()
data_dir = config.get('SESSION_DATA_PATH')
if data_dir == None:
data_dir = os.path.join(base_dir, '.chaind', 'chaind', settings.get('CHAIND_BACKEND'))
data_engine_dir = os.path.join(data_dir, config.get('CHAIND_ENGINE'))
os.makedirs(data_engine_dir, exist_ok=True)
# check if existing session
if session_id == None:
fp = os.path.join(data_engine_dir, 'default')
try:
os.stat(fp)
fp = os.path.realpath(fp)
except FileNotFoundError:
fp = None
if fp != None:
session_id = os.path.basename(fp)
make_default = False
if session_id == None:
session_id = str(uuid.uuid4())
make_default = True
chain_spec = settings.get('CHAIN_SPEC')
network_id_str = str(chain_spec.network_id())
# create the session persistent dir
session_path = os.path.join(
data_engine_dir,
chain_spec.arch(),
chain_spec.fork(),
network_id_str,
session_id,
)
if make_default:
fp = os.path.join(data_engine_dir, 'default')
os.symlink(session_path, fp)
data_path = session_path
os.makedirs(data_path, exist_ok=True)
# create volatile dir
uid = os.getuid()
runtime_path = config.get('SESSION_RUNTIME_PATH')
if runtime_path == None:
runtime_path = os.path.join('/run', 'user', str(uid), 'chaind', settings.get('CHAIND_BACKEND'))
runtime_path = os.path.join(
runtime_path,
config.get('CHAIND_ENGINE'),
chain_spec.arch(),
chain_spec.fork(),
str(chain_spec.network_id()),
session_id,
)
os.makedirs(runtime_path, exist_ok=True)
settings.set('SESSION_RUNTIME_PATH', runtime_path)
settings.set('SESSION_PATH', session_path)
settings.set('SESSION_DATA_PATH', data_path)
settings.set('SESSION_ID', session_id)
return settings
def process_socket(settings, config):
socket_path = config.get('SESSION_SOCKET_PATH')
if socket_path == None:
socket_path = os.path.join(settings.get('SESSION_RUNTIME_PATH'), 'chaind.sock')
settings.set('SESSION_SOCKET_PATH', socket_path)
return settings
def process_dispatch(settings, config):
settings.set('SESSION_DISPATCH_DELAY', 0.01)
return settings
def process_token(settings, config):
settings.set('TOKEN_MODULE', config.get('TOKEN_MODULE'))
return settings
def process_backend(settings, config):
settings.set('CHAIND_BACKEND', config.get('STATE_BACKEND')) #backend)
return settings
def process_queue(settings, config):
if config.get('STATE_PATH') == None:
queue_state_dir = settings.dir_for('queue')
config.add(queue_state_dir, 'STATE_PATH', False)
logg.debug('setting queue state path {}'.format(queue_state_dir))
settings = process_queue_tx(settings, config)
settings = process_queue_paths(settings, config)
if config.get('STATE_BACKEND') == 'fs':
settings = process_queue_backend_fs(settings, config)
settings = process_queue_store(settings, config)
return settings

View File

@@ -1,5 +1,4 @@
# standard imports # standard imports
import unittest
import hashlib import hashlib
import tempfile import tempfile
@@ -8,9 +7,11 @@ from chainqueue.cache import CacheTokenTx
from chainlib.status import Status as TxStatus from chainlib.status import Status as TxStatus
from chainlib.chain import ChainSpec from chainlib.chain import ChainSpec
from chainlib.error import RPCException from chainlib.error import RPCException
from chainlib.tx import (
# local imports Tx,
from chaind.adapters.fs import ChaindFsAdapter TxResult,
)
from chainlib.block import Block
class MockCacheAdapter(CacheTokenTx): class MockCacheAdapter(CacheTokenTx):
@@ -33,22 +34,22 @@ class MockDispatcher:
def send(self, v): def send(self, v):
if v not in self.fails: if v in self.fails:
raise RPCException('{} is in fails'.format(v)) raise RPCException('{} is in fails'.format(v))
pass pass
class MockTx: class MockTx(Tx):
def __init__(self, tx_hash, status=TxStatus.SUCCESS): def __init__(self, tx_hash, status=TxStatus.SUCCESS):
self.hash = tx_hash result = TxResult()
self.status = status result.status = status
super(MockTx, self).__init__(result=result)
self.set_hash(tx_hash)
class TestChaindFsBase(unittest.TestCase): class MockBlock(Block):
def setUp(self):
self.chain_spec = ChainSpec('foo', 'bar', 42, 'baz')
self.path = tempfile.mkdtemp()
self.adapter = ChaindFsAdapter(self.chain_spec, self.path, self.cache_adapter, self.dispatcher)
def __init__(self, number):
super(MockBlock, self).__init__()
self.number = number

31
chaind/unittest/fs.py Normal file
View File

@@ -0,0 +1,31 @@
# standard imports
import unittest
import tempfile
import logging
# external imports
from chainlib.chain import ChainSpec
# local imports
from chaind.adapters.fs import ChaindFsAdapter
logging.STATETRACE = 5
logg = logging.getLogger(__name__)
logg.setLevel(logging.STATETRACE)
class TestChaindFsBase(unittest.TestCase):
def setUp(self):
self.chain_spec = ChainSpec('foo', 'bar', 42, 'baz')
self.path = tempfile.mkdtemp()
self.adapter = ChaindFsAdapter(self.chain_spec, self.path, self.cache_adapter, self.dispatcher, event_callback=self.log_state)
def log_state(self, k, from_state, to_state):
logg.log(logging.STATETRACE, 'state change {}: {} -> {}'.format(
k,
from_state,
to_state,
)
)

View File

@@ -1,6 +1,6 @@
chainlib~=0.1.0 chainlib~=0.3.0
chainqueue~=0.1.1 chainqueue~=0.2.0
chainsyncer~=0.3.5 chainsyncer~=0.5.0
confini~=0.6.0 confini~=0.6.1
funga~=0.5.2 funga~=0.5.2
pyxdg~=0.26 pyxdg~=0.26

View File

@@ -1,80 +0,0 @@
#!/usr/bin/python
import os
import argparse
import logging
# external imports
import alembic
from alembic.config import Config as AlembicConfig
import confini
import chainqueue.db
import chainsyncer.db
# local imports
from chaind import Environment
logging.basicConfig(level=logging.WARNING)
logg = logging.getLogger()
rootdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
dbdir = os.path.join(rootdir, 'chaind', 'db')
configdir = os.path.join(rootdir, 'chaind', 'data', 'config')
default_migrations_dir = os.path.join(dbdir, 'migrations')
env = Environment(env=os.environ)
argparser = argparse.ArgumentParser()
argparser.add_argument('-c', type=str, default=env.config_dir, help='config directory')
argparser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str, help='environment prefix for variables to overwrite configuration')
argparser.add_argument('--data-dir', dest='data_dir', type=str, help='data directory')
argparser.add_argument('--migrations-dir', dest='migrations_dir', default=default_migrations_dir, type=str, help='path to alembic migrations directory')
argparser.add_argument('--reset', action='store_true', help='reset exsting database')
argparser.add_argument('-v', action='store_true', help='be verbose')
argparser.add_argument('-vv', action='store_true', help='be more verbose')
args = argparser.parse_args()
if args.vv:
logging.getLogger().setLevel(logging.DEBUG)
elif args.v:
logging.getLogger().setLevel(logging.INFO)
# process config
logg.debug('loading config from {}'.format(args.c))
config = confini.Config(configdir, args.env_prefix, override_dirs=[args.c])
config.process()
args_override = {
'SESSION_DATA_DIR': getattr(args, 'data_dir'),
}
config.dict_override(args_override, 'cli args')
if config.get('DATABASE_ENGINE') == 'sqlite':
config.add(os.path.join(config.get('SESSION_DATA_DIR'), config.get('DATABASE_NAME') + '.sqlite'), 'DATABASE_NAME', True)
config.censor('PASSWORD', 'DATABASE')
logg.debug('config loaded:\n{}'.format(config))
config.add(os.path.join(args.migrations_dir, config.get('DATABASE_ENGINE')), '_MIGRATIONS_DIR', True)
if not os.path.isdir(config.get('_MIGRATIONS_DIR')):
logg.debug('migrations dir for engine {} not found, reverting to default'.format(config.get('DATABASE_ENGINE')))
config.add(os.path.join(args.migrations_dir, 'default'), '_MIGRATIONS_DIR', True)
os.makedirs(config.get('SESSION_DATA_DIR'), exist_ok=True)
dsn = chainqueue.db.dsn_from_config(config)
def main():
logg.info('using migrations dir {}'.format(config.get('_MIGRATIONS_DIR')))
logg.info('using db {}'.format(dsn))
ac = AlembicConfig(os.path.join(config.get('_MIGRATIONS_DIR'), 'alembic.ini'))
ac.set_main_option('sqlalchemy.url', dsn)
ac.set_main_option('script_location', config.get('_MIGRATIONS_DIR'))
if args.reset:
logg.debug('reset is set, purging existing content')
alembic.command.downgrade(ac, 'base')
alembic.command.upgrade(ac, 'head')
if __name__ == '__main__':
main()

View File

@@ -1,7 +1,7 @@
[metadata] [metadata]
name = chaind name = chaind
version = 0.1.0 version = 0.3.0
description = Base package for chain queue servicek description = Base package for chain queue service
author = Louis Holbrook author = Louis Holbrook
author_email = dev@holbrook.no author_email = dev@holbrook.no
url = https://gitlab.com/chaintool/chaind url = https://gitlab.com/chaintool/chaind
@@ -23,14 +23,15 @@ licence_files =
LICENSE LICENSE
[options] [options]
python_requires = >= 3.6 python_requires = >= 3.7
include_package_data = True include_package_data = True
packages = packages =
chaind chaind
# chaind.sql
# chaind.runnable # chaind.runnable
chaind.adapters chaind.adapters
chaind.unittest chaind.unittest
chaind.data
chaind.cli
#[options.entry_points] #[options.entry_points]
#console_scripts = #console_scripts =

View File

@@ -14,16 +14,16 @@ from chaind.filter import StateFilter
# test imports # test imports
from chaind.unittest.common import ( from chaind.unittest.common import (
MockTx, MockTx,
MockBlock,
MockCacheAdapter, MockCacheAdapter,
TestChaindFsBase, MockDispatcher,
) )
from chaind.unittest.fs import TestChaindFsBase
logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG)
logg = logging.getLogger() logg = logging.getLogger()
class TestChaindFs(TestChaindFsBase): class TestChaindFs(TestChaindFsBase):
def setUp(self): def setUp(self):
@@ -43,12 +43,15 @@ class TestChaindFs(TestChaindFsBase):
self.assertEqual(data, v) self.assertEqual(data, v)
def test_fs_defer(self): def test_fs_fail(self):
data = os.urandom(128).hex() data = os.urandom(128).hex()
hsh = self.adapter.put(data) hsh = self.adapter.put(data)
self.dispatcher.add_fail(hsh) self.dispatcher.add_fail(data)
self.adapter.dispatch(hsh)
txs = self.adapter.deferred() r = self.adapter.dispatch(hsh)
self.assertFalse(r)
txs = self.adapter.failed()
self.assertEqual(len(txs), 1) self.assertEqual(len(txs), 1)
@@ -72,9 +75,10 @@ class TestChaindFs(TestChaindFsBase):
data = os.urandom(128).hex() data = os.urandom(128).hex()
hsh = self.adapter.put(data) hsh = self.adapter.put(data)
fltr = StateFilter(self.adapter) fltr = StateFilter(self.chain_spec, self.path, MockCacheAdapter)
tx = MockTx(hsh) tx = MockTx(hsh)
fltr.filter(None, None, tx) block = MockBlock(42)
fltr.filter(None, block, tx)
def test_fs_filter_fail(self): def test_fs_filter_fail(self):
@@ -83,9 +87,30 @@ class TestChaindFs(TestChaindFsBase):
data = os.urandom(128).hex() data = os.urandom(128).hex()
hsh = self.adapter.put(data) hsh = self.adapter.put(data)
fltr = StateFilter(self.adapter) fltr = StateFilter(self.chain_spec, self.path, MockCacheAdapter)
tx = MockTx(hsh, TxStatus.ERROR) tx = MockTx(hsh, TxStatus.ERROR)
fltr.filter(None, None, tx) block = MockBlock(42)
fltr.filter(None, block, tx)
def test_upcoming(self):
drv = QueueDriver(self.adapter)
txs = []
for i in range(10):
data = os.urandom(128).hex()
hsh = self.adapter.put(data)
txs.append(hsh)
self.adapter.enqueue(hsh)
r = self.adapter.upcoming(limit=5)
self.assertEqual(len(r), 5)
r = self.adapter.dispatch(txs[0])
self.assertTrue(r)
r = self.adapter.upcoming(limit=5)
self.assertEqual(len(r), 4)
if __name__ == '__main__': if __name__ == '__main__':