Compare commits
29 Commits
lash/api-c
...
revert-289
| Author | SHA1 | Date | |
|---|---|---|---|
| 6e42e34528 | |||
| 28936a58fe | |||
| 4b8096ff49 | |||
| c9eb3e32da | |||
| 1be5a92f44 | |||
|
|
8855ccd3d2 | ||
|
|
09dfdbb38a | ||
| 1abb642361 | |||
| 93bcbd7d51 | |||
|
|
818899670a | ||
|
|
1882910a8e | ||
| 3cc909c936 | |||
| 60b6e1abdb | |||
|
|
9c7e72f71c | ||
|
|
e3acc1757a | ||
|
|
8250b15d32 | ||
|
|
31d7cf5789 | ||
|
|
2544c159c2 | ||
|
|
7691d9a127 | ||
|
|
a2a3634683 | ||
|
|
fe0835a4e7 | ||
| d8f51c5bdd | |||
|
|
13fb67d2d8 | ||
|
|
8f1afa094d | ||
|
|
1d9f134125 | ||
|
|
b6a4bab1c8 | ||
| 805fc56c7b | |||
|
|
e3d39a2144 | ||
|
|
90176f2806 |
@@ -10,6 +10,7 @@ include:
|
|||||||
#- local: 'apps/data-seeding/.gitlab-ci.yml'
|
#- local: 'apps/data-seeding/.gitlab-ci.yml'
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
|
- version
|
||||||
- build
|
- build
|
||||||
- test
|
- test
|
||||||
- deploy
|
- deploy
|
||||||
@@ -20,9 +21,39 @@ variables:
|
|||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
COMPOSE_DOCKER_CLI_BUILD: "1"
|
COMPOSE_DOCKER_CLI_BUILD: "1"
|
||||||
CI_DEBUG_TRACE: "true"
|
CI_DEBUG_TRACE: "true"
|
||||||
|
SEMVERBOT_VERSION: "0.2.0"
|
||||||
|
|
||||||
before_script:
|
#before_script:
|
||||||
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
|
# - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
|
||||||
|
|
||||||
|
version:
|
||||||
|
#image: python:3.7-stretch
|
||||||
|
image: registry.gitlab.com/grassrootseconomics/cic-base-images/ci-version:b01318ae
|
||||||
|
stage: version
|
||||||
|
script:
|
||||||
|
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||||
|
- ssh-keyscan gitlab.com >> ~/.ssh/known_hosts && chmod 644 ~/.ssh/known_hosts
|
||||||
|
- eval $(ssh-agent -s)
|
||||||
|
- ssh-add <(echo "$SSH_PRIVATE_KEY")
|
||||||
|
- git remote set-url origin git@gitlab.com:grassrootseconomics/cic-internal-integration.git
|
||||||
|
- export TAG=$(sbot predict version -m auto)
|
||||||
|
- |
|
||||||
|
if [[ -z $TAG ]]
|
||||||
|
then
|
||||||
|
echo "tag could not be set $@"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
- echo $TAG > version
|
||||||
|
- git tag -a v$TAG -m "ci tagged"
|
||||||
|
- git push origin v$TAG
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- version
|
||||||
|
rules:
|
||||||
|
- if: $CI_COMMIT_REF_PROTECTED == "true"
|
||||||
|
when: always
|
||||||
|
- if: $CI_COMMIT_REF_NAME == "master"
|
||||||
|
when: always
|
||||||
|
|
||||||
# runs on protected branches and pushes to repo
|
# runs on protected branches and pushes to repo
|
||||||
build-push:
|
build-push:
|
||||||
@@ -30,12 +61,17 @@ build-push:
|
|||||||
tags:
|
tags:
|
||||||
- integration
|
- integration
|
||||||
#script:
|
#script:
|
||||||
# - TAG=$CI_COMMIT_REF_SLUG-$CI_COMMIT_SHORT_SHA sh ./scripts/build-push.sh
|
# - TAG=$CI_Cbefore_script:
|
||||||
|
before_script:
|
||||||
|
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
|
||||||
script:
|
script:
|
||||||
- TAG=latest sh ./scripts/build-push.sh
|
- TAG=latest ./scripts/build-push.sh
|
||||||
|
- TAG=$(cat ./version) ./scripts/build-push.sh
|
||||||
rules:
|
rules:
|
||||||
- if: $CI_COMMIT_REF_PROTECTED == "true"
|
- if: $CI_COMMIT_REF_PROTECTED == "true"
|
||||||
when: always
|
when: always
|
||||||
|
- if: $CI_COMMIT_REF_NAME == "master"
|
||||||
|
when: always
|
||||||
|
|
||||||
deploy-dev:
|
deploy-dev:
|
||||||
stage: deploy
|
stage: deploy
|
||||||
|
|||||||
16
.semverbot.toml
Normal file
16
.semverbot.toml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
[git]
|
||||||
|
|
||||||
|
[git.config]
|
||||||
|
email = "semverbot@grassroots.org"
|
||||||
|
name = "semvervot"
|
||||||
|
|
||||||
|
[git.tags]
|
||||||
|
prefix = "v"
|
||||||
|
|
||||||
|
[semver]
|
||||||
|
mode = "git-commit"
|
||||||
|
|
||||||
|
[semver.detection]
|
||||||
|
patch = ["fix", "[fix]", "patch", "[patch]"]
|
||||||
|
minor = ["minor", "[minor]", "feat", "[feat]", "release", "[release]", "bump", "[bump]"]
|
||||||
|
major = ["BREAKING CHANGE"]
|
||||||
661
LICENSE
Normal file
661
LICENSE
Normal file
@@ -0,0 +1,661 @@
|
|||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
cic-internal-integration
|
||||||
|
Copyright (C) 2021 Grassroots Economics
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
This repo uses docker-compose and docker buildkit. Set the following environment variables to get started:
|
This repo uses docker-compose and docker buildkit. Set the following environment variables to get started:
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
export COMPOSE_DOCKER_CLI_BUILD=1
|
export COMPOSE_DOCKER_CLI_BUILD=1
|
||||||
export DOCKER_BUILDKIT=1
|
export DOCKER_BUILDKIT=1
|
||||||
|
|||||||
3
apps/cic-base-os/aux/wait-for-it/.gitignore
vendored
Normal file
3
apps/cic-base-os/aux/wait-for-it/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
**/*.pyc
|
||||||
|
.pydevproject
|
||||||
|
/vendor/
|
||||||
7
apps/cic-base-os/aux/wait-for-it/.travis.yml
Normal file
7
apps/cic-base-os/aux/wait-for-it/.travis.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
language: python
|
||||||
|
python:
|
||||||
|
- "2.7"
|
||||||
|
|
||||||
|
script:
|
||||||
|
- python test/wait-for-it.py
|
||||||
|
|
||||||
20
apps/cic-base-os/aux/wait-for-it/LICENSE
Normal file
20
apps/cic-base-os/aux/wait-for-it/LICENSE
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2016 Giles Hall
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
75
apps/cic-base-os/aux/wait-for-it/README.md
Normal file
75
apps/cic-base-os/aux/wait-for-it/README.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# wait-for-it
|
||||||
|
|
||||||
|
`wait-for-it.sh` is a pure bash script that will wait on the availability of a
|
||||||
|
host and TCP port. It is useful for synchronizing the spin-up of
|
||||||
|
interdependent services, such as linked docker containers. Since it is a pure
|
||||||
|
bash script, it does not have any external dependencies.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```text
|
||||||
|
wait-for-it.sh host:port [-s] [-t timeout] [-- command args]
|
||||||
|
-h HOST | --host=HOST Host or IP under test
|
||||||
|
-p PORT | --port=PORT TCP port under test
|
||||||
|
Alternatively, you specify the host and port as host:port
|
||||||
|
-s | --strict Only execute subcommand if the test succeeds
|
||||||
|
-q | --quiet Don't output any status messages
|
||||||
|
-t TIMEOUT | --timeout=TIMEOUT
|
||||||
|
Timeout in seconds, zero for no timeout
|
||||||
|
-- COMMAND ARGS Execute command with args after the test finishes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
For example, let's test to see if we can access port 80 on `www.google.com`,
|
||||||
|
and if it is available, echo the message `google is up`.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:80 -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:80
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
google is up
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set your own timeout with the `-t` or `--timeout=` option. Setting
|
||||||
|
the timeout value to 0 will disable the timeout:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh -t 0 www.google.com:80 -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting for www.google.com:80 without a timeout
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
google is up
|
||||||
|
```
|
||||||
|
|
||||||
|
The subcommand will be executed regardless if the service is up or not. If you
|
||||||
|
wish to execute the subcommand only if the service is up, add the `--strict`
|
||||||
|
argument. In this example, we will test port 81 on `www.google.com` which will
|
||||||
|
fail:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:81 --timeout=1 --strict -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting 1 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: timeout occurred after waiting 1 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: strict mode, refusing to execute subprocess
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't want to execute a subcommand, leave off the `--` argument. This
|
||||||
|
way, you can test the exit condition of `wait-for-it.sh` in your own scripts,
|
||||||
|
and determine how to proceed:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:80
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:80
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
$ echo $?
|
||||||
|
0
|
||||||
|
$ ./wait-for-it.sh www.google.com:81
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: timeout occurred after waiting 15 seconds for www.google.com:81
|
||||||
|
$ echo $?
|
||||||
|
124
|
||||||
|
```
|
||||||
|
|
||||||
|
## Community
|
||||||
|
|
||||||
|
*Debian*: There is a [Debian package](https://tracker.debian.org/pkg/wait-for-it).
|
||||||
182
apps/cic-base-os/aux/wait-for-it/wait-for-it.sh
Executable file
182
apps/cic-base-os/aux/wait-for-it/wait-for-it.sh
Executable file
@@ -0,0 +1,182 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Use this script to test if a given TCP host/port are available
|
||||||
|
|
||||||
|
WAITFORIT_cmdname=${0##*/}
|
||||||
|
|
||||||
|
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
|
||||||
|
|
||||||
|
usage()
|
||||||
|
{
|
||||||
|
cat << USAGE >&2
|
||||||
|
Usage:
|
||||||
|
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
|
||||||
|
-h HOST | --host=HOST Host or IP under test
|
||||||
|
-p PORT | --port=PORT TCP port under test
|
||||||
|
Alternatively, you specify the host and port as host:port
|
||||||
|
-s | --strict Only execute subcommand if the test succeeds
|
||||||
|
-q | --quiet Don't output any status messages
|
||||||
|
-t TIMEOUT | --timeout=TIMEOUT
|
||||||
|
Timeout in seconds, zero for no timeout
|
||||||
|
-- COMMAND ARGS Execute command with args after the test finishes
|
||||||
|
USAGE
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for()
|
||||||
|
{
|
||||||
|
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
|
||||||
|
else
|
||||||
|
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
|
||||||
|
fi
|
||||||
|
WAITFORIT_start_ts=$(date +%s)
|
||||||
|
while :
|
||||||
|
do
|
||||||
|
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
|
||||||
|
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
|
||||||
|
WAITFORIT_result=$?
|
||||||
|
else
|
||||||
|
(echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
|
||||||
|
WAITFORIT_result=$?
|
||||||
|
fi
|
||||||
|
if [[ $WAITFORIT_result -eq 0 ]]; then
|
||||||
|
WAITFORIT_end_ts=$(date +%s)
|
||||||
|
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
return $WAITFORIT_result
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_wrapper()
|
||||||
|
{
|
||||||
|
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
|
||||||
|
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
|
||||||
|
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
|
||||||
|
else
|
||||||
|
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
|
||||||
|
fi
|
||||||
|
WAITFORIT_PID=$!
|
||||||
|
trap "kill -INT -$WAITFORIT_PID" INT
|
||||||
|
wait $WAITFORIT_PID
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
|
||||||
|
fi
|
||||||
|
return $WAITFORIT_RESULT
|
||||||
|
}
|
||||||
|
|
||||||
|
# process arguments
|
||||||
|
while [[ $# -gt 0 ]]
|
||||||
|
do
|
||||||
|
case "$1" in
|
||||||
|
*:* )
|
||||||
|
WAITFORIT_hostport=(${1//:/ })
|
||||||
|
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
|
||||||
|
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
--child)
|
||||||
|
WAITFORIT_CHILD=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-q | --quiet)
|
||||||
|
WAITFORIT_QUIET=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-s | --strict)
|
||||||
|
WAITFORIT_STRICT=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-h)
|
||||||
|
WAITFORIT_HOST="$2"
|
||||||
|
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--host=*)
|
||||||
|
WAITFORIT_HOST="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-p)
|
||||||
|
WAITFORIT_PORT="$2"
|
||||||
|
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--port=*)
|
||||||
|
WAITFORIT_PORT="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-t)
|
||||||
|
WAITFORIT_TIMEOUT="$2"
|
||||||
|
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--timeout=*)
|
||||||
|
WAITFORIT_TIMEOUT="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
WAITFORIT_CLI=("$@")
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
--help)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echoerr "Unknown argument: $1"
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
|
||||||
|
echoerr "Error: you need to provide a host and port to test."
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
|
||||||
|
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
|
||||||
|
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
|
||||||
|
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
|
||||||
|
|
||||||
|
# Check to see if timeout is from busybox?
|
||||||
|
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
|
||||||
|
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
|
||||||
|
|
||||||
|
WAITFORIT_BUSYTIMEFLAG=""
|
||||||
|
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
|
||||||
|
WAITFORIT_ISBUSY=1
|
||||||
|
# Check if busybox timeout uses -t flag
|
||||||
|
# (recent Alpine versions don't support -t anymore)
|
||||||
|
if timeout &>/dev/stdout | grep -q -e '-t '; then
|
||||||
|
WAITFORIT_BUSYTIMEFLAG="-t"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
WAITFORIT_ISBUSY=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
|
||||||
|
wait_for
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
else
|
||||||
|
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
|
||||||
|
wait_for_wrapper
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
else
|
||||||
|
wait_for
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $WAITFORIT_CLI != "" ]]; then
|
||||||
|
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
fi
|
||||||
|
exec "${WAITFORIT_CLI[@]}"
|
||||||
|
else
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
fi
|
||||||
3
apps/cic-cache/aux/wait-for-it/.gitignore
vendored
Normal file
3
apps/cic-cache/aux/wait-for-it/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
**/*.pyc
|
||||||
|
.pydevproject
|
||||||
|
/vendor/
|
||||||
7
apps/cic-cache/aux/wait-for-it/.travis.yml
Normal file
7
apps/cic-cache/aux/wait-for-it/.travis.yml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
language: python
|
||||||
|
python:
|
||||||
|
- "2.7"
|
||||||
|
|
||||||
|
script:
|
||||||
|
- python test/wait-for-it.py
|
||||||
|
|
||||||
20
apps/cic-cache/aux/wait-for-it/LICENSE
Normal file
20
apps/cic-cache/aux/wait-for-it/LICENSE
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
Copyright (c) 2016 Giles Hall
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||||
|
of the Software, and to permit persons to whom the Software is furnished to do
|
||||||
|
so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
75
apps/cic-cache/aux/wait-for-it/README.md
Normal file
75
apps/cic-cache/aux/wait-for-it/README.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# wait-for-it
|
||||||
|
|
||||||
|
`wait-for-it.sh` is a pure bash script that will wait on the availability of a
|
||||||
|
host and TCP port. It is useful for synchronizing the spin-up of
|
||||||
|
interdependent services, such as linked docker containers. Since it is a pure
|
||||||
|
bash script, it does not have any external dependencies.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```text
|
||||||
|
wait-for-it.sh host:port [-s] [-t timeout] [-- command args]
|
||||||
|
-h HOST | --host=HOST Host or IP under test
|
||||||
|
-p PORT | --port=PORT TCP port under test
|
||||||
|
Alternatively, you specify the host and port as host:port
|
||||||
|
-s | --strict Only execute subcommand if the test succeeds
|
||||||
|
-q | --quiet Don't output any status messages
|
||||||
|
-t TIMEOUT | --timeout=TIMEOUT
|
||||||
|
Timeout in seconds, zero for no timeout
|
||||||
|
-- COMMAND ARGS Execute command with args after the test finishes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
For example, let's test to see if we can access port 80 on `www.google.com`,
|
||||||
|
and if it is available, echo the message `google is up`.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:80 -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:80
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
google is up
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set your own timeout with the `-t` or `--timeout=` option. Setting
|
||||||
|
the timeout value to 0 will disable the timeout:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh -t 0 www.google.com:80 -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting for www.google.com:80 without a timeout
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
google is up
|
||||||
|
```
|
||||||
|
|
||||||
|
The subcommand will be executed regardless if the service is up or not. If you
|
||||||
|
wish to execute the subcommand only if the service is up, add the `--strict`
|
||||||
|
argument. In this example, we will test port 81 on `www.google.com` which will
|
||||||
|
fail:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:81 --timeout=1 --strict -- echo "google is up"
|
||||||
|
wait-for-it.sh: waiting 1 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: timeout occurred after waiting 1 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: strict mode, refusing to execute subprocess
|
||||||
|
```
|
||||||
|
|
||||||
|
If you don't want to execute a subcommand, leave off the `--` argument. This
|
||||||
|
way, you can test the exit condition of `wait-for-it.sh` in your own scripts,
|
||||||
|
and determine how to proceed:
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ ./wait-for-it.sh www.google.com:80
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:80
|
||||||
|
wait-for-it.sh: www.google.com:80 is available after 0 seconds
|
||||||
|
$ echo $?
|
||||||
|
0
|
||||||
|
$ ./wait-for-it.sh www.google.com:81
|
||||||
|
wait-for-it.sh: waiting 15 seconds for www.google.com:81
|
||||||
|
wait-for-it.sh: timeout occurred after waiting 15 seconds for www.google.com:81
|
||||||
|
$ echo $?
|
||||||
|
124
|
||||||
|
```
|
||||||
|
|
||||||
|
## Community
|
||||||
|
|
||||||
|
*Debian*: There is a [Debian package](https://tracker.debian.org/pkg/wait-for-it).
|
||||||
182
apps/cic-cache/aux/wait-for-it/wait-for-it.sh
Executable file
182
apps/cic-cache/aux/wait-for-it/wait-for-it.sh
Executable file
@@ -0,0 +1,182 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Use this script to test if a given TCP host/port are available
|
||||||
|
|
||||||
|
WAITFORIT_cmdname=${0##*/}
|
||||||
|
|
||||||
|
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
|
||||||
|
|
||||||
|
usage()
|
||||||
|
{
|
||||||
|
cat << USAGE >&2
|
||||||
|
Usage:
|
||||||
|
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
|
||||||
|
-h HOST | --host=HOST Host or IP under test
|
||||||
|
-p PORT | --port=PORT TCP port under test
|
||||||
|
Alternatively, you specify the host and port as host:port
|
||||||
|
-s | --strict Only execute subcommand if the test succeeds
|
||||||
|
-q | --quiet Don't output any status messages
|
||||||
|
-t TIMEOUT | --timeout=TIMEOUT
|
||||||
|
Timeout in seconds, zero for no timeout
|
||||||
|
-- COMMAND ARGS Execute command with args after the test finishes
|
||||||
|
USAGE
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for()
|
||||||
|
{
|
||||||
|
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
|
||||||
|
else
|
||||||
|
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
|
||||||
|
fi
|
||||||
|
WAITFORIT_start_ts=$(date +%s)
|
||||||
|
while :
|
||||||
|
do
|
||||||
|
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
|
||||||
|
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
|
||||||
|
WAITFORIT_result=$?
|
||||||
|
else
|
||||||
|
(echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
|
||||||
|
WAITFORIT_result=$?
|
||||||
|
fi
|
||||||
|
if [[ $WAITFORIT_result -eq 0 ]]; then
|
||||||
|
WAITFORIT_end_ts=$(date +%s)
|
||||||
|
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
return $WAITFORIT_result
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_wrapper()
|
||||||
|
{
|
||||||
|
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
|
||||||
|
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
|
||||||
|
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
|
||||||
|
else
|
||||||
|
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
|
||||||
|
fi
|
||||||
|
WAITFORIT_PID=$!
|
||||||
|
trap "kill -INT -$WAITFORIT_PID" INT
|
||||||
|
wait $WAITFORIT_PID
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
|
||||||
|
fi
|
||||||
|
return $WAITFORIT_RESULT
|
||||||
|
}
|
||||||
|
|
||||||
|
# process arguments
|
||||||
|
while [[ $# -gt 0 ]]
|
||||||
|
do
|
||||||
|
case "$1" in
|
||||||
|
*:* )
|
||||||
|
WAITFORIT_hostport=(${1//:/ })
|
||||||
|
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
|
||||||
|
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
--child)
|
||||||
|
WAITFORIT_CHILD=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-q | --quiet)
|
||||||
|
WAITFORIT_QUIET=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-s | --strict)
|
||||||
|
WAITFORIT_STRICT=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-h)
|
||||||
|
WAITFORIT_HOST="$2"
|
||||||
|
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--host=*)
|
||||||
|
WAITFORIT_HOST="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-p)
|
||||||
|
WAITFORIT_PORT="$2"
|
||||||
|
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--port=*)
|
||||||
|
WAITFORIT_PORT="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-t)
|
||||||
|
WAITFORIT_TIMEOUT="$2"
|
||||||
|
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--timeout=*)
|
||||||
|
WAITFORIT_TIMEOUT="${1#*=}"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
WAITFORIT_CLI=("$@")
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
--help)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echoerr "Unknown argument: $1"
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
|
||||||
|
echoerr "Error: you need to provide a host and port to test."
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
|
||||||
|
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
|
||||||
|
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
|
||||||
|
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
|
||||||
|
|
||||||
|
# Check to see if timeout is from busybox?
|
||||||
|
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
|
||||||
|
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
|
||||||
|
|
||||||
|
WAITFORIT_BUSYTIMEFLAG=""
|
||||||
|
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
|
||||||
|
WAITFORIT_ISBUSY=1
|
||||||
|
# Check if busybox timeout uses -t flag
|
||||||
|
# (recent Alpine versions don't support -t anymore)
|
||||||
|
if timeout &>/dev/stdout | grep -q -e '-t '; then
|
||||||
|
WAITFORIT_BUSYTIMEFLAG="-t"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
WAITFORIT_ISBUSY=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
|
||||||
|
wait_for
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
else
|
||||||
|
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
|
||||||
|
wait_for_wrapper
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
else
|
||||||
|
wait_for
|
||||||
|
WAITFORIT_RESULT=$?
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $WAITFORIT_CLI != "" ]]; then
|
||||||
|
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
|
||||||
|
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
fi
|
||||||
|
exec "${WAITFORIT_CLI[@]}"
|
||||||
|
else
|
||||||
|
exit $WAITFORIT_RESULT
|
||||||
|
fi
|
||||||
@@ -8,6 +8,7 @@ import base64
|
|||||||
import confini
|
import confini
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
|
import cic_cache.cli
|
||||||
from cic_cache.db import dsn_from_config
|
from cic_cache.db import dsn_from_config
|
||||||
from cic_cache.db.models.base import SessionBase
|
from cic_cache.db.models.base import SessionBase
|
||||||
from cic_cache.runnable.daemons.query import (
|
from cic_cache.runnable.daemons.query import (
|
||||||
@@ -23,26 +24,17 @@ rootdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
|||||||
dbdir = os.path.join(rootdir, 'cic_cache', 'db')
|
dbdir = os.path.join(rootdir, 'cic_cache', 'db')
|
||||||
migrationsdir = os.path.join(dbdir, 'migrations')
|
migrationsdir = os.path.join(dbdir, 'migrations')
|
||||||
|
|
||||||
config_dir = os.path.join('/usr/local/etc/cic-cache')
|
# process args
|
||||||
|
arg_flags = cic_cache.cli.argflag_std_base
|
||||||
argparser = argparse.ArgumentParser()
|
local_arg_flags = cic_cache.cli.argflag_local_task
|
||||||
argparser.add_argument('-c', type=str, default=config_dir, help='config file')
|
argparser = cic_cache.cli.ArgumentParser(arg_flags)
|
||||||
argparser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str, help='environment prefix for variables to overwrite configuration')
|
argparser.process_local_flags(local_arg_flags)
|
||||||
argparser.add_argument('-v', action='store_true', help='be verbose')
|
|
||||||
argparser.add_argument('-vv', action='store_true', help='be more verbose')
|
|
||||||
args = argparser.parse_args()
|
args = argparser.parse_args()
|
||||||
|
|
||||||
if args.vv:
|
# process config
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
config = cic_cache.cli.Config.from_args(args, arg_flags, local_arg_flags)
|
||||||
elif args.v:
|
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
|
||||||
|
|
||||||
config = confini.Config(args.c, args.env_prefix)
|
|
||||||
config.process()
|
|
||||||
config.censor('PASSWORD', 'DATABASE')
|
|
||||||
config.censor('PASSWORD', 'SSL')
|
|
||||||
logg.debug('config:\n{}'.format(config))
|
|
||||||
|
|
||||||
|
# connect to database
|
||||||
dsn = dsn_from_config(config)
|
dsn = dsn_from_config(config)
|
||||||
SessionBase.connect(dsn, config.true('DATABASE_DEBUG'))
|
SessionBase.connect(dsn, config.true('DATABASE_DEBUG'))
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import celery
|
|||||||
import confini
|
import confini
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
|
import cic_cache.cli
|
||||||
from cic_cache.db import dsn_from_config
|
from cic_cache.db import dsn_from_config
|
||||||
from cic_cache.db.models.base import SessionBase
|
from cic_cache.db.models.base import SessionBase
|
||||||
from cic_cache.tasks.tx import *
|
from cic_cache.tasks.tx import *
|
||||||
@@ -16,35 +17,20 @@ from cic_cache.tasks.tx import *
|
|||||||
logging.basicConfig(level=logging.WARNING)
|
logging.basicConfig(level=logging.WARNING)
|
||||||
logg = logging.getLogger()
|
logg = logging.getLogger()
|
||||||
|
|
||||||
config_dir = os.path.join('/usr/local/etc/cic-cache')
|
# process args
|
||||||
|
arg_flags = cic_cache.cli.argflag_std_base
|
||||||
|
local_arg_flags = cic_cache.cli.argflag_local_task
|
||||||
argparser = argparse.ArgumentParser()
|
argparser = cic_cache.cli.ArgumentParser(arg_flags)
|
||||||
argparser.add_argument('-c', type=str, default=config_dir, help='config file')
|
argparser.process_local_flags(local_arg_flags)
|
||||||
argparser.add_argument('-q', type=str, default='cic-cache', help='queue name for worker tasks')
|
|
||||||
argparser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str, help='environment prefix for variables to overwrite configuration')
|
|
||||||
argparser.add_argument('-v', action='store_true', help='be verbose')
|
|
||||||
argparser.add_argument('-vv', action='store_true', help='be more verbose')
|
|
||||||
|
|
||||||
args = argparser.parse_args()
|
args = argparser.parse_args()
|
||||||
|
|
||||||
if args.vv:
|
# process config
|
||||||
logging.getLogger().setLevel(logging.DEBUG)
|
config = cic_cache.cli.Config.from_args(args, arg_flags, local_arg_flags)
|
||||||
elif args.v:
|
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
|
||||||
|
|
||||||
config = confini.Config(args.c, args.env_prefix)
|
|
||||||
config.process()
|
|
||||||
|
|
||||||
# connect to database
|
# connect to database
|
||||||
dsn = dsn_from_config(config)
|
dsn = dsn_from_config(config)
|
||||||
SessionBase.connect(dsn)
|
SessionBase.connect(dsn)
|
||||||
|
|
||||||
# verify database connection with minimal sanity query
|
|
||||||
#session = SessionBase.create_session()
|
|
||||||
#session.execute('select version_num from alembic_version')
|
|
||||||
#session.close()
|
|
||||||
|
|
||||||
# set up celery
|
# set up celery
|
||||||
current_app = celery.Celery(__name__)
|
current_app = celery.Celery(__name__)
|
||||||
|
|
||||||
@@ -87,9 +73,9 @@ def main():
|
|||||||
elif args.v:
|
elif args.v:
|
||||||
argv.append('--loglevel=INFO')
|
argv.append('--loglevel=INFO')
|
||||||
argv.append('-Q')
|
argv.append('-Q')
|
||||||
argv.append(args.q)
|
argv.append(config.get('CELERY_QUEUE'))
|
||||||
argv.append('-n')
|
argv.append('-n')
|
||||||
argv.append(args.q)
|
argv.append(config.get('CELERY_QUEUE'))
|
||||||
|
|
||||||
current_app.worker_main(argv)
|
current_app.worker_main(argv)
|
||||||
|
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ logging.basicConfig(level=logging.WARNING)
|
|||||||
logg = logging.getLogger()
|
logg = logging.getLogger()
|
||||||
|
|
||||||
# process args
|
# process args
|
||||||
arg_flags = cic_cache.cli.argflag_std_read
|
arg_flags = cic_cache.cli.argflag_std_base
|
||||||
local_arg_flags = cic_cache.cli.argflag_local_sync
|
local_arg_flags = cic_cache.cli.argflag_local_sync
|
||||||
argparser = cic_cache.cli.ArgumentParser(arg_flags)
|
argparser = cic_cache.cli.ArgumentParser(arg_flags)
|
||||||
argparser.process_local_flags(local_arg_flags)
|
argparser.process_local_flags(local_arg_flags)
|
||||||
|
|||||||
@@ -1,19 +1,17 @@
|
|||||||
# syntax = docker/dockerfile:1.2
|
ARG DOCKER_REGISTRY="registry.gitlab.com/grassrootseconomics"
|
||||||
FROM registry.gitlab.com/grassrootseconomics/cic-base-images:python-3.8.6-dev-55da5f4e as dev
|
|
||||||
|
FROM $DOCKER_REGISTRY/cic-base-images:python-3.8.6-dev-e8eb2ee2
|
||||||
# RUN pip install $pip_extra_index_url_flag cic-base[full_graph]==0.1.2b9
|
|
||||||
|
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
#RUN pip install $pip_extra_index_url_flag -r test_requirements.txt
|
|
||||||
#RUN pip install $pip_extra_index_url_flag .
|
|
||||||
#RUN pip install .[server]
|
|
||||||
|
|
||||||
ARG EXTRA_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
ARG EXTRA_PIP_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
||||||
ARG GITLAB_PYTHON_REGISTRY="https://gitlab.com/api/v4/projects/27624814/packages/pypi/simple"
|
|
||||||
ARG EXTRA_PIP_ARGS=""
|
ARG EXTRA_PIP_ARGS=""
|
||||||
|
ARG PIP_INDEX_URL="https://pypi.org/simple"
|
||||||
|
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY --extra-index-url $EXTRA_INDEX_URL $EXTRA_PIP_ARGS \
|
--pre \
|
||||||
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -23,10 +21,10 @@ RUN python setup.py install
|
|||||||
# ini files in config directory defines the configurable parameters for the application
|
# ini files in config directory defines the configurable parameters for the application
|
||||||
# they can all be overridden by environment variables
|
# they can all be overridden by environment variables
|
||||||
# to generate a list of environment variables from configuration, use: confini-dump -z <dir> (executable provided by confini package)
|
# to generate a list of environment variables from configuration, use: confini-dump -z <dir> (executable provided by confini package)
|
||||||
COPY config/ /usr/local/etc/cic-cache/
|
#COPY config/ /usr/local/etc/cic-cache/
|
||||||
|
|
||||||
# for db migrations
|
# for db migrations
|
||||||
RUN git clone https://github.com/vishnubob/wait-for-it.git /usr/local/bin/wait-for-it/
|
COPY ./aux/wait-for-it/wait-for-it.sh ./
|
||||||
COPY cic_cache/db/migrations/ /usr/local/share/cic-cache/alembic/
|
COPY cic_cache/db/migrations/ /usr/local/share/cic-cache/alembic/
|
||||||
|
|
||||||
COPY /docker/start_tracker.sh ./start_tracker.sh
|
COPY /docker/start_tracker.sh ./start_tracker.sh
|
||||||
|
|||||||
@@ -17,11 +17,12 @@ logg = logging.getLogger()
|
|||||||
rootdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
rootdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||||
dbdir = os.path.join(rootdir, 'cic_cache', 'db')
|
dbdir = os.path.join(rootdir, 'cic_cache', 'db')
|
||||||
migrationsdir = os.path.join(dbdir, 'migrations')
|
migrationsdir = os.path.join(dbdir, 'migrations')
|
||||||
|
configdir = os.path.join(rootdir, 'cic_cache', 'data', 'config')
|
||||||
|
|
||||||
config_dir = os.path.join('/usr/local/etc/cic-cache')
|
#config_dir = os.path.join('/usr/local/etc/cic-cache')
|
||||||
|
|
||||||
argparser = argparse.ArgumentParser()
|
argparser = argparse.ArgumentParser()
|
||||||
argparser.add_argument('-c', type=str, default=config_dir, help='config file')
|
argparser.add_argument('-c', type=str, help='config file')
|
||||||
argparser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str, help='environment prefix for variables to overwrite configuration')
|
argparser.add_argument('--env-prefix', default=os.environ.get('CONFINI_ENV_PREFIX'), dest='env_prefix', type=str, help='environment prefix for variables to overwrite configuration')
|
||||||
argparser.add_argument('--migrations-dir', dest='migrations_dir', default=migrationsdir, type=str, help='path to alembic migrations directory')
|
argparser.add_argument('--migrations-dir', dest='migrations_dir', default=migrationsdir, type=str, help='path to alembic migrations directory')
|
||||||
argparser.add_argument('--reset', action='store_true', help='downgrade before upgrading')
|
argparser.add_argument('--reset', action='store_true', help='downgrade before upgrading')
|
||||||
@@ -35,7 +36,7 @@ if args.vv:
|
|||||||
elif args.v:
|
elif args.v:
|
||||||
logging.getLogger().setLevel(logging.INFO)
|
logging.getLogger().setLevel(logging.INFO)
|
||||||
|
|
||||||
config = confini.Config(args.c, args.env_prefix)
|
config = confini.Config(configdir, args.env_prefix)
|
||||||
config.process()
|
config.process()
|
||||||
config.censor('PASSWORD', 'DATABASE')
|
config.censor('PASSWORD', 'DATABASE')
|
||||||
config.censor('PASSWORD', 'SSL')
|
config.censor('PASSWORD', 'SSL')
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
celery==4.4.7
|
celery==4.4.7
|
||||||
erc20-demurrage-token~=0.0.3a1
|
erc20-demurrage-token~=0.0.5a3
|
||||||
cic-eth-registry>=0.6.1a2,<0.7.0
|
cic-eth-registry~=0.6.1a6
|
||||||
cic-eth[services]~=0.12.4a8
|
chainlib~=0.0.9rc1
|
||||||
|
cic_eth~=0.12.4a11
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = cic-eth-aux-erc20-demurrage-token
|
name = cic-eth-aux-erc20-demurrage-token
|
||||||
version = 0.0.2a6
|
version = 0.0.2a7
|
||||||
description = cic-eth tasks supporting erc20 demurrage token
|
description = cic-eth tasks supporting erc20 demurrage token
|
||||||
author = Louis Holbrook
|
author = Louis Holbrook
|
||||||
author_email = dev@holbrook.no
|
author_email = dev@holbrook.no
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
SQLAlchemy==1.3.20
|
SQLAlchemy==1.3.20
|
||||||
cic-eth-registry>=0.6.1a5,<0.7.0
|
cic-eth-registry>=0.6.1a6,<0.7.0
|
||||||
hexathon~=0.0.1a8
|
hexathon~=0.0.1a8
|
||||||
chainqueue>=0.0.4a6,<0.1.0
|
chainqueue>=0.0.4a6,<0.1.0
|
||||||
eth-erc20>=0.1.2a2,<0.2.0
|
eth-erc20>=0.1.2a2,<0.2.0
|
||||||
|
|||||||
@@ -683,3 +683,4 @@ class Api(ApiBase):
|
|||||||
|
|
||||||
t = self.callback_success.apply_async([r])
|
t = self.callback_success.apply_async([r])
|
||||||
return t
|
return t
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import datetime
|
|||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
from cic_eth_registry import CICRegistry
|
|
||||||
from chainlib.chain import ChainSpec
|
from chainlib.chain import ChainSpec
|
||||||
from chainlib.eth.tx import unpack
|
from chainlib.eth.tx import unpack
|
||||||
from chainlib.connection import RPCConnection
|
from chainlib.connection import RPCConnection
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ arg_flags = cic_eth.cli.argflag_std_read
|
|||||||
local_arg_flags = cic_eth.cli.argflag_local_task
|
local_arg_flags = cic_eth.cli.argflag_local_task
|
||||||
argparser = cic_eth.cli.ArgumentParser(arg_flags)
|
argparser = cic_eth.cli.ArgumentParser(arg_flags)
|
||||||
argparser.process_local_flags(local_arg_flags)
|
argparser.process_local_flags(local_arg_flags)
|
||||||
argparser.add_argument('--default-token-symbol', dest='default_token_symbol', type=str, help='Symbol of default token to use')
|
#argparser.add_argument('--default-token-symbol', dest='default_token_symbol', type=str, help='Symbol of default token to use')
|
||||||
argparser.add_argument('--trace-queue-status', default=None, dest='trace_queue_status', action='store_true', help='set to perist all queue entry status changes to storage')
|
argparser.add_argument('--trace-queue-status', default=None, dest='trace_queue_status', action='store_true', help='set to perist all queue entry status changes to storage')
|
||||||
argparser.add_argument('--aux-all', action='store_true', help='include tasks from all submodules from the aux module path')
|
argparser.add_argument('--aux-all', action='store_true', help='include tasks from all submodules from the aux module path')
|
||||||
argparser.add_argument('--aux', action='append', type=str, default=[], help='add single submodule from the aux module path')
|
argparser.add_argument('--aux', action='append', type=str, default=[], help='add single submodule from the aux module path')
|
||||||
@@ -84,7 +84,7 @@ args = argparser.parse_args()
|
|||||||
|
|
||||||
# process config
|
# process config
|
||||||
extra_args = {
|
extra_args = {
|
||||||
'default_token_symbol': 'CIC_DEFAULT_TOKEN_SYMBOL',
|
# 'default_token_symbol': 'CIC_DEFAULT_TOKEN_SYMBOL',
|
||||||
'aux_all': None,
|
'aux_all': None,
|
||||||
'aux': None,
|
'aux': None,
|
||||||
'trace_queue_status': 'TASKS_TRACE_QUEUE_STATUS',
|
'trace_queue_status': 'TASKS_TRACE_QUEUE_STATUS',
|
||||||
@@ -187,6 +187,17 @@ elif len(args.aux) > 0:
|
|||||||
logg.info('aux module {} found in path {}'.format(v, aux_dir))
|
logg.info('aux module {} found in path {}'.format(v, aux_dir))
|
||||||
aux.append(v)
|
aux.append(v)
|
||||||
|
|
||||||
|
default_token_symbol = config.get('CIC_DEFAULT_TOKEN_SYMBOL')
|
||||||
|
defaullt_token_address = None
|
||||||
|
if default_token_symbol:
|
||||||
|
default_token_address = registry.by_name(default_token_symbol)
|
||||||
|
else:
|
||||||
|
default_token_address = registry.by_name('DefaultToken')
|
||||||
|
c = ERC20Token(chain_spec, conn, default_token_address)
|
||||||
|
default_token_symbol = c.symbol
|
||||||
|
logg.info('found default token {} address {}'.format(default_token_symbol, default_token_address))
|
||||||
|
config.add(default_token_symbol, 'CIC_DEFAULT_TOKEN_SYMBOL', exists_ok=True)
|
||||||
|
|
||||||
for v in aux:
|
for v in aux:
|
||||||
mname = 'cic_eth_aux.' + v
|
mname = 'cic_eth_aux.' + v
|
||||||
mod = importlib.import_module(mname)
|
mod = importlib.import_module(mname)
|
||||||
@@ -204,8 +215,8 @@ def main():
|
|||||||
argv.append('-n')
|
argv.append('-n')
|
||||||
argv.append(config.get('CELERY_QUEUE'))
|
argv.append(config.get('CELERY_QUEUE'))
|
||||||
|
|
||||||
BaseTask.default_token_symbol = config.get('CIC_DEFAULT_TOKEN_SYMBOL')
|
BaseTask.default_token_symbol = default_token_symbol
|
||||||
BaseTask.default_token_address = registry.by_name(BaseTask.default_token_symbol)
|
BaseTask.default_token_address = default_token_address
|
||||||
default_token = ERC20Token(chain_spec, conn, add_0x(BaseTask.default_token_address))
|
default_token = ERC20Token(chain_spec, conn, add_0x(BaseTask.default_token_address))
|
||||||
default_token.load(conn)
|
default_token.load(conn)
|
||||||
BaseTask.default_token_decimals = default_token.decimals
|
BaseTask.default_token_decimals = default_token.decimals
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ version = (
|
|||||||
0,
|
0,
|
||||||
12,
|
12,
|
||||||
4,
|
4,
|
||||||
'alpha.11',
|
'alpha.14',
|
||||||
)
|
)
|
||||||
|
|
||||||
version_object = semver.VersionInfo(
|
version_object = semver.VersionInfo(
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth-accounts
|
|
||||||
@section Accounts
|
@section Accounts
|
||||||
|
|
||||||
Accounts are private keys in the signer component keyed by "addresses," a one-way transformation of a public key. Data can be signed by using the account as identifier for corresponding RPC requests.
|
Accounts are private keys in the signer component keyed by "addresses," a one-way transformation of a public key. Data can be signed by using the account as identifier for corresponding RPC requests.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@node cic-eth system maintenance
|
@anchor{cic-eth-appendix-system-maintenance}
|
||||||
@appendix Admin API
|
@appendix Admin API
|
||||||
|
|
||||||
The admin API is still in an early stage of refinement. User friendliness can be considerably improved.
|
The admin API is still in an early stage of refinement. User friendliness can be considerably improved.
|
||||||
@@ -33,7 +33,7 @@ Get the current state of a lock
|
|||||||
|
|
||||||
@appendixsection tag_account
|
@appendixsection tag_account
|
||||||
|
|
||||||
Associate an identifier with an account address (@xref{cic-eth system accounts})
|
Associate an identifier with an account address (@xref{cic-eth-system-accounts})
|
||||||
|
|
||||||
@appendixsection have_account
|
@appendixsection have_account
|
||||||
|
|
||||||
|
|||||||
@@ -14,5 +14,6 @@ Released 2021 under GPL3
|
|||||||
@c
|
@c
|
||||||
@contents
|
@contents
|
||||||
|
|
||||||
@include index.texi
|
@include content.texi
|
||||||
|
@include appendix.texi
|
||||||
|
|
||||||
|
|||||||
3
apps/cic-eth/doc/texinfo/appendix.texi
Normal file
3
apps/cic-eth/doc/texinfo/appendix.texi
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
@include admin.texi
|
||||||
|
@include chains.texi
|
||||||
|
@include transfertypes.texi
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
@node cic-eth Appendix Task chains
|
@anchor{cic-eth-appendix-task-chains}
|
||||||
@appendix Task chains
|
@appendix Task chains
|
||||||
|
|
||||||
TBC - explain here how to generate these chain diagrams
|
TBC - explain here how to generate these chain diagrams
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth configuration
|
|
||||||
@section Configuration
|
@section Configuration
|
||||||
|
|
||||||
Configuration parameters are grouped by configuration filename.
|
Configuration parameters are grouped by configuration filename.
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
|
@node cic-eth
|
||||||
@top cic-eth
|
@top cic-eth
|
||||||
|
|
||||||
@include intro.texi
|
|
||||||
@include dependencies.texi
|
@include dependencies.texi
|
||||||
@include configuration.texi
|
@include configuration.texi
|
||||||
@include system.texi
|
@include system.texi
|
||||||
@@ -9,6 +9,3 @@
|
|||||||
@include incoming.texi
|
@include incoming.texi
|
||||||
@include services.texi
|
@include services.texi
|
||||||
@include tools.texi
|
@include tools.texi
|
||||||
@include admin.texi
|
|
||||||
@include chains.texi
|
|
||||||
@include transfertypes.texi
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth-dependencies
|
|
||||||
@section Dependencies
|
@section Dependencies
|
||||||
|
|
||||||
This application is written in Python 3.8. It is tightly coupled with @code{python-celery}, which provides the task worker ecosystem. It also uses @code{SQLAlchemy} which provides useful abstractions for persistent storage though SQL, and @code{alembic} for database schema migrations.
|
This application is written in Python 3.8. It is tightly coupled with @code{python-celery}, which provides the task worker ecosystem. It also uses @code{SQLAlchemy} which provides useful abstractions for persistent storage though SQL, and @code{alembic} for database schema migrations.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
@node cic-eth-incoming
|
@anchor{cic-eth-incoming}
|
||||||
@section Incoming transactions
|
@section Incoming transactions
|
||||||
|
|
||||||
All transactions in mined blocks will be passed to a selection of plugin filters to the @code{chainsyncer} component. Each of these filters are individual python module files in @code{cic_eth.runnable.daemons.filters}. This section describes their function.
|
All transactions in mined blocks will be passed to a selection of plugin filters to the @code{chainsyncer} component. Each of these filters are individual python module files in @code{cic_eth.runnable.daemons.filters}. This section describes their function.
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
@node cic-eth-interacting
|
|
||||||
@section Interacting with the system
|
@section Interacting with the system
|
||||||
|
|
||||||
The API to the @var{cic-eth} component is a proxy for executing @emph{chains of Celery tasks}. The tasks that compose individual chains are documented in @ref{cic-eth Appendix Task chains,the Task Chain appendix}, which also describes a CLI tool that can generate graph representationso of them.
|
The API to the @var{cic-eth} component is a proxy for executing @emph{chains of Celery tasks}. The tasks that compose individual chains are documented in @ref{cic-eth-appendix-task-chains,the Task Chain appendix}, which also describes a CLI tool that can generate graph representationso of them.
|
||||||
|
|
||||||
There are two API classes, @var{Api} and @var{AdminApi}. The former is described later in this section, the latter described in @ref{cic-eth system maintenance,the Admin API appendix}.
|
There are two API classes, @var{Api} and @var{AdminApi}. The former is described later in this section, the latter described in @ref{cic-eth-appendix-system-maintenance,the Admin API appendix}.
|
||||||
|
|
||||||
|
|
||||||
@subsection Interface
|
@subsection Interface
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth-outgoing
|
|
||||||
@section Outgoing transactions
|
@section Outgoing transactions
|
||||||
|
|
||||||
@strong{Important! A pre-requisite for proper functioning of the component is that no other agent is sending transactions to the network for any of the keys in the keystore.}
|
@strong{Important! A pre-requisite for proper functioning of the component is that no other agent is sending transactions to the network for any of the keys in the keystore.}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth-services
|
|
||||||
@section Services
|
@section Services
|
||||||
|
|
||||||
There are four daemons that together orchestrate all of the aforementioned recipes. This section will provide a high level description of them.
|
There are four daemons that together orchestrate all of the aforementioned recipes. This section will provide a high level description of them.
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
@node cic-eth system accounts
|
|
||||||
@section System initialization
|
@section System initialization
|
||||||
|
|
||||||
When the system starts for the first time, it is locked for any state change request other than account creation@footnote{Specifically, the @code{INIT}, @code{SEND} and @code{QUEUE} lock bits are set.}. These locks should be @emph{reset} once system initialization has been completed. Currently, system initialization only involves creating and tagging required system accounts, as specified below.
|
When the system starts for the first time, it is locked for any state change request other than account creation@footnote{Specifically, the @code{INIT}, @code{SEND} and @code{QUEUE} lock bits are set.}. These locks should be @emph{reset} once system initialization has been completed. Currently, system initialization only involves creating and tagging required system accounts, as specified below.
|
||||||
|
|
||||||
See @ref{cic-eth-locking,Locking} and @ref{cic-eth-tools-ctrl,ctrl in Tools} for details on locking.
|
See @ref{cic-eth-locking,Locking} and @ref{cic-eth-tools-ctrl,ctrl in Tools} for details on locking.
|
||||||
|
|
||||||
|
@anchor{cic-eth-system-accounts}
|
||||||
@subsection System accounts
|
@subsection System accounts
|
||||||
|
|
||||||
Certain accounts in the system have special roles. These are defined by @emph{tagging} certain accounts addresses with well-known identifiers.
|
Certain accounts in the system have special roles. These are defined by @emph{tagging} certain accounts addresses with well-known identifiers.
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth-tools
|
|
||||||
@section Tools
|
@section Tools
|
||||||
|
|
||||||
A collection of CLI tools have been provided to help with diagnostics and other administrative tasks. These use the same configuration infrastructure as the daemons.
|
A collection of CLI tools have been provided to help with diagnostics and other administrative tasks. These use the same configuration infrastructure as the daemons.
|
||||||
@@ -37,7 +36,7 @@ Execute a token transfer on behalf of a custodial account.
|
|||||||
|
|
||||||
@subsection tag (cic-eth-tag)
|
@subsection tag (cic-eth-tag)
|
||||||
|
|
||||||
Associate an account address with a string identifier. @xref{cic-eth system accounts}
|
Associate an account address with a string identifier. @xref{cic-eth-system-accounts}
|
||||||
|
|
||||||
|
|
||||||
@anchor{cic-eth-tools-ctrl}
|
@anchor{cic-eth-tools-ctrl}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
@node cic-eth Appendix Transaction types
|
|
||||||
@appendix Transfer types
|
@appendix Transfer types
|
||||||
|
|
||||||
@table @var
|
@table @var
|
||||||
|
|||||||
@@ -1,46 +1,32 @@
|
|||||||
# syntax = docker/dockerfile:1.2
|
ARG DOCKER_REGISTRY="registry.gitlab.com/grassrootseconomics"
|
||||||
FROM registry.gitlab.com/grassrootseconomics/cic-base-images:python-3.8.6-dev-55da5f4e as dev
|
|
||||||
|
FROM $DOCKER_REGISTRY/cic-base-images:python-3.8.6-dev-e8eb2ee2
|
||||||
|
|
||||||
# Copy just the requirements and install....this _might_ give docker a hint on caching but we
|
# Copy just the requirements and install....this _might_ give docker a hint on caching but we
|
||||||
# do load these all into setup.py later
|
# do load these all into setup.py later
|
||||||
# TODO can we take all the requirements out of setup.py and just do a pip install -r requirements.txt && python setup.py
|
# TODO can we take all the requirements out of setup.py and just do a pip install -r requirements.txt && python setup.py
|
||||||
#COPY cic-eth/requirements.txt .
|
#COPY cic-eth/requirements.txt .
|
||||||
|
|
||||||
ARG EXTRA_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
ARG EXTRA_PIP_INDEX_URL=https://pip.grassrootseconomics.net:8433
|
||||||
ARG GITLAB_PYTHON_REGISTRY="https://gitlab.com/api/v4/projects/27624814/packages/pypi/simple"
|
|
||||||
ARG EXTRA_PIP_ARGS=""
|
ARG EXTRA_PIP_ARGS=""
|
||||||
#RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
ARG PIP_INDEX_URL=https://pypi.org/simple
|
||||||
# pip install --index-url https://pypi.org/simple \
|
|
||||||
# --force-reinstall \
|
|
||||||
# --extra-index-url $GITLAB_PYTHON_REGISTRY --extra-index-url $EXTRA_INDEX_URL \
|
|
||||||
# -r requirements.txt
|
|
||||||
|
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY \
|
--pre \
|
||||||
--extra-index-url $EXTRA_INDEX_URL \
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
$EXTRA_PIP_ARGS \
|
cic-eth-aux-erc20-demurrage-token~=0.0.2a7
|
||||||
cic-eth-aux-erc20-demurrage-token~=0.0.2a6
|
|
||||||
|
|
||||||
|
|
||||||
COPY *requirements.txt ./
|
COPY *requirements.txt ./
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY \
|
--pre \
|
||||||
--extra-index-url $EXTRA_INDEX_URL \
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
$EXTRA_PIP_ARGS \
|
|
||||||
-r requirements.txt \
|
-r requirements.txt \
|
||||||
-r services_requirements.txt \
|
-r services_requirements.txt \
|
||||||
-r admin_requirements.txt
|
-r admin_requirements.txt
|
||||||
|
|
||||||
# always install the latest signer
|
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
|
||||||
pip install --index-url https://pypi.org/simple \
|
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY \
|
|
||||||
--extra-index-url $EXTRA_INDEX_URL \
|
|
||||||
$EXTRA_PIP_ARGS \
|
|
||||||
crypto-dev-signer
|
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN python setup.py install
|
RUN python setup.py install
|
||||||
|
|
||||||
@@ -53,7 +39,7 @@ RUN chmod 755 *.sh
|
|||||||
# # ini files in config directory defines the configurable parameters for the application
|
# # ini files in config directory defines the configurable parameters for the application
|
||||||
# # they can all be overridden by environment variables
|
# # they can all be overridden by environment variables
|
||||||
# # to generate a list of environment variables from configuration, use: confini-dump -z <dir> (executable provided by confini package)
|
# # to generate a list of environment variables from configuration, use: confini-dump -z <dir> (executable provided by confini package)
|
||||||
COPY config/ /usr/local/etc/cic-eth/
|
#COPY config/ /usr/local/etc/cic-eth/
|
||||||
COPY cic_eth/db/migrations/ /usr/local/share/cic-eth/alembic/
|
COPY cic_eth/db/migrations/ /usr/local/share/cic-eth/alembic/
|
||||||
COPY crypto_dev_signer_config/ /usr/local/etc/crypto-dev-signer/
|
COPY crypto_dev_signer_config/ /usr/local/etc/crypto-dev-signer/
|
||||||
|
|
||||||
|
|||||||
@@ -2,5 +2,6 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
>&2 echo executing database migration
|
>&2 echo executing database migration
|
||||||
python scripts/migrate.py -c /usr/local/etc/cic-eth --migrations-dir /usr/local/share/cic-eth/alembic -vv
|
#python scripts/migrate.py -c /usr/local/etc/cic-eth --migrations-dir /usr/local/share/cic-eth/alembic -vv
|
||||||
|
python scripts/migrate.py --migrations-dir /usr/local/share/cic-eth/alembic -vv
|
||||||
set +e
|
set +e
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
celery==4.4.7
|
celery==4.4.7
|
||||||
chainlib-eth>=0.0.9rc4,<0.1.0
|
chainlib-eth>=0.0.10a16,<0.1.0
|
||||||
semver==2.13.0
|
semver==2.13.0
|
||||||
crypto-dev-signer>=0.4.15rc2,<0.5.0
|
crypto-dev-signer>=0.4.15rc2,<0.5.0
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
chainqueue>=0.0.5a1,<0.1.0
|
chainqueue>=0.0.6a1,<0.1.0
|
||||||
chainsyncer[sql]>=0.0.6a3,<0.1.0
|
chainsyncer[sql]>=0.0.7a3,<0.1.0
|
||||||
alembic==1.4.2
|
alembic==1.4.2
|
||||||
confini>=0.3.6rc4,<0.5.0
|
confini>=0.3.6rc4,<0.5.0
|
||||||
redis==3.5.3
|
redis==3.5.3
|
||||||
@@ -8,7 +8,7 @@ pycryptodome==3.10.1
|
|||||||
liveness~=0.0.1a7
|
liveness~=0.0.1a7
|
||||||
eth-address-index>=0.2.4a1,<0.3.0
|
eth-address-index>=0.2.4a1,<0.3.0
|
||||||
eth-accounts-index>=0.1.2a3,<0.2.0
|
eth-accounts-index>=0.1.2a3,<0.2.0
|
||||||
cic-eth-registry>=0.6.1a5,<0.7.0
|
cic-eth-registry>=0.6.1a6,<0.7.0
|
||||||
erc20-faucet>=0.3.2a2,<0.4.0
|
erc20-faucet>=0.3.2a2,<0.4.0
|
||||||
erc20-transfer-authorization>=0.3.5a2,<0.4.0
|
erc20-transfer-authorization>=0.3.5a2,<0.4.0
|
||||||
sarafu-faucet>=0.0.7a2,<0.1.0
|
sarafu-faucet>=0.0.7a2,<0.1.0
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = cic-eth
|
name = cic-eth
|
||||||
version = attr: cic_eth.version.__version_string__
|
#version = attr: cic_eth.version.__version_string__
|
||||||
|
version = 0.12.4a13
|
||||||
description = CIC Network Ethereum interaction
|
description = CIC Network Ethereum interaction
|
||||||
author = Louis Holbrook
|
author = Louis Holbrook
|
||||||
author_email = dev@holbrook.no
|
author_email = dev@holbrook.no
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ def test_tokens_noproof(
|
|||||||
custodial_roles,
|
custodial_roles,
|
||||||
foo_token_declaration,
|
foo_token_declaration,
|
||||||
bar_token_declaration,
|
bar_token_declaration,
|
||||||
celery_worker,
|
celery_session_worker,
|
||||||
):
|
):
|
||||||
|
|
||||||
api = Api(str(default_chain_spec), queue=None, callback_param='foo')
|
api = Api(str(default_chain_spec), queue=None, callback_param='foo')
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
crypto-dev-signer>=0.4.15rc2,<=0.4.15
|
crypto-dev-signer>=0.4.15rc2,<=0.4.15
|
||||||
chainqueue>=0.0.5a1,<0.1.0
|
chainqueue>=0.0.5a3,<0.1.0
|
||||||
cic-eth-registry>=0.6.1a5,<0.7.0
|
cic-eth-registry>=0.6.1a6,<0.7.0
|
||||||
redis==3.5.3
|
redis==3.5.3
|
||||||
hexathon~=0.0.1a8
|
hexathon~=0.0.1a8
|
||||||
pycryptodome==3.10.1
|
pycryptodome==3.10.1
|
||||||
|
|||||||
@@ -1,19 +1,23 @@
|
|||||||
# syntax = docker/dockerfile:1.2
|
FROM node:15.3.0-alpine3.10
|
||||||
#FROM node:15.3.0-alpine3.10
|
|
||||||
FROM node:lts-alpine3.14
|
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --no-cache postgresql bash
|
RUN apk add --no-cache postgresql bash
|
||||||
|
|
||||||
|
ARG NPM_REPOSITORY=${NPM_REPOSITORY:-https://registry.npmjs.org}
|
||||||
|
RUN npm config set snyk=false
|
||||||
|
#RUN npm config set registry={NPM_REPOSITORY}
|
||||||
|
RUN npm config set registry=${NPM_REPOSITORY}
|
||||||
|
|
||||||
# copy the dependencies
|
# copy the dependencies
|
||||||
COPY package.json package-lock.json .
|
COPY package.json package-lock.json ./
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.npm \
|
RUN --mount=type=cache,mode=0755,target=/root/.npm \
|
||||||
npm set cache /root/.npm && \
|
npm set cache /root/.npm && \
|
||||||
npm ci
|
npm cache verify && \
|
||||||
|
npm ci --verbose
|
||||||
|
|
||||||
COPY webpack.config.js .
|
COPY webpack.config.js ./
|
||||||
COPY tsconfig.json .
|
COPY tsconfig.json ./
|
||||||
## required to build the cic-client-meta module
|
## required to build the cic-client-meta module
|
||||||
COPY . .
|
COPY . .
|
||||||
COPY tests/*.asc /root/pgp/
|
COPY tests/*.asc /root/pgp/
|
||||||
|
|||||||
5650
apps/cic-meta/package-lock.json
generated
5650
apps/cic-meta/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,9 @@
|
|||||||
create table if not exists store (
|
create table if not exists store (
|
||||||
id serial primary key not null,
|
id serial primary key not null,
|
||||||
owner_fingerprint text not null,
|
owner_fingerprint text default null,
|
||||||
hash char(64) not null unique,
|
hash char(64) not null unique,
|
||||||
content text not null
|
content text not null,
|
||||||
|
mime_type text
|
||||||
);
|
);
|
||||||
|
|
||||||
create index if not exists idx_fp on store ((lower(owner_fingerprint)));
|
create index if not exists idx_fp on store ((lower(owner_fingerprint)));
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
create table if not exists store (
|
create table if not exists store (
|
||||||
/*id serial primary key not null,*/
|
/*id serial primary key not null,*/
|
||||||
id integer primary key autoincrement,
|
id integer primary key autoincrement,
|
||||||
owner_fingerprint text not null,
|
owner_fingerprint text default null,
|
||||||
hash char(64) not null unique,
|
hash char(64) not null unique,
|
||||||
content text not null
|
content text not null,
|
||||||
|
mime_type text
|
||||||
);
|
);
|
||||||
|
|
||||||
create index if not exists idx_fp on store ((lower(owner_fingerprint)));
|
create index if not exists idx_fp on store ((lower(owner_fingerprint)));
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
import * as Automerge from 'automerge';
|
import * as Automerge from 'automerge';
|
||||||
import * as pgp from 'openpgp';
|
import * as pgp from 'openpgp';
|
||||||
|
import * as crypto from 'crypto';
|
||||||
|
|
||||||
import { Envelope, Syncable } from '@cicnet/crdt-meta';
|
import { Envelope, Syncable, bytesToHex } from '@cicnet/crdt-meta';
|
||||||
|
|
||||||
|
|
||||||
function handleNoMergeGet(db, digest, keystore) {
|
function handleNoMergeGet(db, digest, keystore) {
|
||||||
const sql = "SELECT content FROM store WHERE hash = '" + digest + "'";
|
const sql = "SELECT owner_fingerprint, content, mime_type FROM store WHERE hash = '" + digest + "'";
|
||||||
return new Promise<string|boolean>((whohoo, doh) => {
|
return new Promise<any>((whohoo, doh) => {
|
||||||
db.query(sql, (e, rs) => {
|
db.query(sql, (e, rs) => {
|
||||||
if (e !== null && e !== undefined) {
|
if (e !== null && e !== undefined) {
|
||||||
doh(e);
|
doh(e);
|
||||||
@@ -16,16 +17,36 @@ function handleNoMergeGet(db, digest, keystore) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const immutable = rs.rows[0]['owner_fingerprint'] == undefined;
|
||||||
|
let mimeType;
|
||||||
|
if (immutable) {
|
||||||
|
if (rs.rows[0]['mime_type'] === undefined) {
|
||||||
|
mimeType = 'application/octet-stream';
|
||||||
|
} else {
|
||||||
|
mimeType = rs.rows[0]['mime_type'];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mimeType = 'application/json';
|
||||||
|
}
|
||||||
|
|
||||||
const cipherText = rs.rows[0]['content'];
|
const cipherText = rs.rows[0]['content'];
|
||||||
pgp.message.readArmored(cipherText).then((m) => {
|
pgp.message.readArmored(cipherText).then((m) => {
|
||||||
const opts = {
|
const opts = {
|
||||||
message: m,
|
message: m,
|
||||||
privateKeys: [keystore.getPrivateKey()],
|
privateKeys: [keystore.getPrivateKey()],
|
||||||
|
format: 'binary',
|
||||||
};
|
};
|
||||||
pgp.decrypt(opts).then((plainText) => {
|
pgp.decrypt(opts).then((plainText) => {
|
||||||
const o = Syncable.fromJSON(plainText.data);
|
let r;
|
||||||
const r = JSON.stringify(o.m['data']);
|
if (immutable) {
|
||||||
whohoo(r);
|
r = plainText.data;
|
||||||
|
} else {
|
||||||
|
mimeType = 'application/json';
|
||||||
|
const d = new TextDecoder().decode(plainText.data);
|
||||||
|
const o = Syncable.fromJSON(d);
|
||||||
|
r = JSON.stringify(o.m['data']);
|
||||||
|
}
|
||||||
|
whohoo([r, mimeType]);
|
||||||
}).catch((e) => {
|
}).catch((e) => {
|
||||||
console.error('decrypt', e);
|
console.error('decrypt', e);
|
||||||
doh(e);
|
doh(e);
|
||||||
@@ -57,6 +78,7 @@ function handleServerMergePost(data, db, digest, keystore, signer) {
|
|||||||
} else {
|
} else {
|
||||||
e = Envelope.fromJSON(v);
|
e = Envelope.fromJSON(v);
|
||||||
s = e.unwrap();
|
s = e.unwrap();
|
||||||
|
console.debug('s', s, o)
|
||||||
s.replace(o, 'server merge');
|
s.replace(o, 'server merge');
|
||||||
e.set(s);
|
e.set(s);
|
||||||
s.onwrap = (e) => {
|
s.onwrap = (e) => {
|
||||||
@@ -139,7 +161,13 @@ function handleClientMergeGet(db, digest, keystore) {
|
|||||||
privateKeys: [keystore.getPrivateKey()],
|
privateKeys: [keystore.getPrivateKey()],
|
||||||
};
|
};
|
||||||
pgp.decrypt(opts).then((plainText) => {
|
pgp.decrypt(opts).then((plainText) => {
|
||||||
const o = Syncable.fromJSON(plainText.data);
|
let d;
|
||||||
|
if (typeof(plainText.data) == 'string') {
|
||||||
|
d = plainText.data;
|
||||||
|
} else {
|
||||||
|
d = new TextDecoder().decode(plainText.data);
|
||||||
|
}
|
||||||
|
const o = Syncable.fromJSON(d);
|
||||||
const e = new Envelope(o);
|
const e = new Envelope(o);
|
||||||
whohoo(e.toJSON());
|
whohoo(e.toJSON());
|
||||||
}).catch((e) => {
|
}).catch((e) => {
|
||||||
@@ -201,10 +229,65 @@ function handleClientMergePut(data, db, digest, keystore, signer) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function handleImmutablePost(data, db, digest, keystore, contentType) {
|
||||||
|
return new Promise<Array<string|boolean>>((whohoo, doh) => {
|
||||||
|
let data_binary = data;
|
||||||
|
const h = crypto.createHash('sha256');
|
||||||
|
h.update(data_binary);
|
||||||
|
const z = h.digest();
|
||||||
|
const r = bytesToHex(z);
|
||||||
|
|
||||||
|
if (digest) {
|
||||||
|
if (r != digest) {
|
||||||
|
doh('hash mismatch: ' + r + ' != ' + digest);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
digest = r;
|
||||||
|
console.debug('calculated digest ' + digest);
|
||||||
|
}
|
||||||
|
|
||||||
|
handleNoMergeGet(db, digest, keystore).then((haveDigest) => {
|
||||||
|
if (haveDigest !== false) {
|
||||||
|
whohoo([false, digest]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let message;
|
||||||
|
if (typeof(data) == 'string') {
|
||||||
|
data_binary = new TextEncoder().encode(data);
|
||||||
|
message = pgp.message.fromText(data);
|
||||||
|
} else {
|
||||||
|
message = pgp.message.fromBinary(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
const opts = {
|
||||||
|
message: message,
|
||||||
|
publicKeys: keystore.getEncryptKeys(),
|
||||||
|
};
|
||||||
|
pgp.encrypt(opts).then((cipherText) => {
|
||||||
|
const sql = "INSERT INTO store (hash, content, mime_type) VALUES ('" + digest + "', '" + cipherText.data + "', '" + contentType + "') ON CONFLICT (hash) DO UPDATE SET content = EXCLUDED.content;";
|
||||||
|
db.query(sql, (e, rs) => {
|
||||||
|
if (e !== null && e !== undefined) {
|
||||||
|
doh(e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
whohoo([true, digest]);
|
||||||
|
});
|
||||||
|
}).catch((e) => {
|
||||||
|
doh(e);
|
||||||
|
});
|
||||||
|
}).catch((e) => {
|
||||||
|
doh(e);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
export {
|
export {
|
||||||
handleClientMergePut,
|
handleClientMergePut,
|
||||||
handleClientMergeGet,
|
handleClientMergeGet,
|
||||||
handleServerMergePost,
|
handleServerMergePost,
|
||||||
handleServerMergePut,
|
handleServerMergePut,
|
||||||
handleNoMergeGet,
|
handleNoMergeGet,
|
||||||
|
handleImmutablePost,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -118,37 +118,71 @@ async function processRequest(req, res) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mod = req.method.toLowerCase() + ":automerge:";
|
||||||
|
let modDetail = undefined;
|
||||||
|
let immutablePost = false;
|
||||||
try {
|
try {
|
||||||
digest = parseDigest(req.url);
|
digest = parseDigest(req.url);
|
||||||
} catch(e) {
|
} catch(e) {
|
||||||
console.error('digest error: ' + e)
|
if (req.url == '/') {
|
||||||
res.writeHead(400, {"Content-Type": "text/plain"});
|
immutablePost = true;
|
||||||
res.end();
|
modDetail = 'immutable';
|
||||||
return;
|
} else {
|
||||||
|
console.error('url is not empty (' + req.url + ') and not valid digest error: ' + e)
|
||||||
|
res.writeHead(400, {"Content-Type": "text/plain"});
|
||||||
|
res.end();
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const mergeHeader = req.headers['x-cic-automerge'];
|
if (modDetail === undefined) {
|
||||||
let mod = req.method.toLowerCase() + ":automerge:";
|
const mergeHeader = req.headers['x-cic-automerge'];
|
||||||
switch (mergeHeader) {
|
switch (mergeHeader) {
|
||||||
case "client":
|
case "client":
|
||||||
mod += "client"; // client handles merges
|
if (immutablePost) {
|
||||||
break;
|
res.writeHead(400, 'Valid digest missing', {"Content-Type": "text/plain"});
|
||||||
case "server":
|
res.end();
|
||||||
mod += "server"; // server handles merges
|
return;
|
||||||
break;
|
}
|
||||||
default:
|
modDetail = "client"; // client handles merges
|
||||||
mod += "none"; // merged object only (get only)
|
break;
|
||||||
|
case "server":
|
||||||
|
if (immutablePost) {
|
||||||
|
res.writeHead(400, 'Valid digest missing', {"Content-Type": "text/plain"});
|
||||||
|
res.end();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
modDetail = "server"; // server handles merges
|
||||||
|
break;
|
||||||
|
case "immutable":
|
||||||
|
modDetail = "immutable"; // no merging, literal immutable content with content-addressing
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
modDetail = "none"; // merged object only (get only)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
mod += modDetail;
|
||||||
|
|
||||||
let data = '';
|
|
||||||
|
// handle bigger chunks of data
|
||||||
|
let data;
|
||||||
req.on('data', (d) => {
|
req.on('data', (d) => {
|
||||||
data += d;
|
if (data === undefined) {
|
||||||
|
data = d;
|
||||||
|
} else {
|
||||||
|
data += d;
|
||||||
|
}
|
||||||
});
|
});
|
||||||
req.on('end', async () => {
|
req.on('end', async (d) => {
|
||||||
console.debug('mode', mod);
|
let inputContentType = req.headers['content-type'];
|
||||||
let content = '';
|
let debugString = 'executing mode ' + mod ;
|
||||||
|
if (data !== undefined) {
|
||||||
|
debugString += ' for content type ' + inputContentType + ' length ' + data.length;
|
||||||
|
}
|
||||||
|
console.debug(debugString);
|
||||||
|
let content;
|
||||||
let contentType = 'application/json';
|
let contentType = 'application/json';
|
||||||
console.debug('handling data', data);
|
let statusCode = 200;
|
||||||
let r:any = undefined;
|
let r:any = undefined;
|
||||||
try {
|
try {
|
||||||
switch (mod) {
|
switch (mod) {
|
||||||
@@ -159,6 +193,7 @@ async function processRequest(req, res) {
|
|||||||
res.end();
|
res.end();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
content = '';
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'get:automerge:client':
|
case 'get:automerge:client':
|
||||||
@@ -176,6 +211,7 @@ async function processRequest(req, res) {
|
|||||||
res.end();
|
res.end();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
content = '';
|
||||||
break;
|
break;
|
||||||
//case 'get:automerge:server':
|
//case 'get:automerge:server':
|
||||||
// content = await handlers.handleServerMergeGet(db, digest, keystore);
|
// content = await handlers.handleServerMergeGet(db, digest, keystore);
|
||||||
@@ -183,12 +219,24 @@ async function processRequest(req, res) {
|
|||||||
|
|
||||||
case 'get:automerge:none':
|
case 'get:automerge:none':
|
||||||
r = await handlers.handleNoMergeGet(db, digest, keystore);
|
r = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
if (r == false) {
|
if (r === false) {
|
||||||
res.writeHead(404, {"Content-Type": "text/plain"});
|
res.writeHead(404, {"Content-Type": "text/plain"});
|
||||||
res.end();
|
res.end();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
content = r;
|
content = r[0];
|
||||||
|
contentType = r[1];
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'post:automerge:immutable':
|
||||||
|
if (inputContentType === undefined) {
|
||||||
|
inputContentType = 'application/octet-stream';
|
||||||
|
}
|
||||||
|
r = await handlers.handleImmutablePost(data, db, digest, keystore, inputContentType);
|
||||||
|
if (r[0]) {
|
||||||
|
statusCode = 201;
|
||||||
|
}
|
||||||
|
content = r[1];
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -204,14 +252,21 @@ async function processRequest(req, res) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (content === undefined) {
|
if (content === undefined) {
|
||||||
console.error('empty content', data);
|
console.error('empty content', mod, digest, data);
|
||||||
res.writeHead(404, {"Content-Type": "text/plain"});
|
res.writeHead(404, {"Content-Type": "text/plain"});
|
||||||
res.end();
|
res.end();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const responseContentLength = (new TextEncoder().encode(content)).length;
|
//let responseContentLength;
|
||||||
res.writeHead(200, {
|
//if (typeof(content) == 'string') {
|
||||||
|
// (new TextEncoder().encode(content)).length;
|
||||||
|
//}
|
||||||
|
const responseContentLength = content.length;
|
||||||
|
//if (responseContentLength === undefined) {
|
||||||
|
// responseContentLength = 0;
|
||||||
|
//}
|
||||||
|
res.writeHead(statusCode, {
|
||||||
"Access-Control-Allow-Origin": "*",
|
"Access-Control-Allow-Origin": "*",
|
||||||
"Content-Type": contentType,
|
"Content-Type": contentType,
|
||||||
"Content-Length": responseContentLength,
|
"Content-Length": responseContentLength,
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import * as handlers from '../scripts/server/handlers';
|
|||||||
import { Envelope, Syncable, ArgPair, PGPKeyStore, PGPSigner, KeyStore, Signer } from '@cicnet/crdt-meta';
|
import { Envelope, Syncable, ArgPair, PGPKeyStore, PGPSigner, KeyStore, Signer } from '@cicnet/crdt-meta';
|
||||||
import { SqliteAdapter } from '../src/db';
|
import { SqliteAdapter } from '../src/db';
|
||||||
|
|
||||||
|
const hashOfFoo = '2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae';
|
||||||
|
|
||||||
function createKeystore() {
|
function createKeystore() {
|
||||||
const pksa = fs.readFileSync(__dirname + '/privatekeys.asc', 'utf-8');
|
const pksa = fs.readFileSync(__dirname + '/privatekeys.asc', 'utf-8');
|
||||||
const pubksa = fs.readFileSync(__dirname + '/publickeys.asc', 'utf-8');
|
const pubksa = fs.readFileSync(__dirname + '/publickeys.asc', 'utf-8');
|
||||||
@@ -44,11 +46,13 @@ function createDatabase(sqlite_file:string):Promise<any> {
|
|||||||
// doh(e);
|
// doh(e);
|
||||||
// return;
|
// return;
|
||||||
// }
|
// }
|
||||||
|
// get this from real sql files sources
|
||||||
const sql = `CREATE TABLE store (
|
const sql = `CREATE TABLE store (
|
||||||
id integer primary key autoincrement,
|
id integer primary key autoincrement,
|
||||||
owner_fingerprint text not null,
|
owner_fingerprint text default null,
|
||||||
hash char(64) not null unique,
|
hash char(64) not null unique,
|
||||||
content text not null
|
content text not null,
|
||||||
|
mime_type text default null
|
||||||
);
|
);
|
||||||
`
|
`
|
||||||
|
|
||||||
@@ -111,15 +115,18 @@ describe('server', async () => {
|
|||||||
let j = env.toJSON();
|
let j = env.toJSON();
|
||||||
const content = await handlers.handleClientMergePut(j, db, digest, keystore, signer);
|
const content = await handlers.handleClientMergePut(j, db, digest, keystore, signer);
|
||||||
assert(content); // true-ish
|
assert(content); // true-ish
|
||||||
|
console.debug('content', content);
|
||||||
|
|
||||||
let v = await handlers.handleNoMergeGet(db, digest, keystore);
|
let v = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
if (v === undefined) {
|
if (v === false) {
|
||||||
db.close();
|
db.close();
|
||||||
assert.fail('');
|
assert.fail('');
|
||||||
}
|
}
|
||||||
|
db.close();
|
||||||
|
return;
|
||||||
|
|
||||||
v = await handlers.handleClientMergeGet(db, digest, keystore);
|
v = await handlers.handleClientMergeGet(db, digest, keystore);
|
||||||
if (v === undefined) {
|
if (v === false) {
|
||||||
db.close();
|
db.close();
|
||||||
assert.fail('');
|
assert.fail('');
|
||||||
}
|
}
|
||||||
@@ -187,7 +194,7 @@ describe('server', async () => {
|
|||||||
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
assert(v); // true-ish
|
assert(v); // true-ish
|
||||||
|
|
||||||
let o = JSON.parse(j);
|
let o = JSON.parse(j[0]);
|
||||||
o.bar = 'xyzzy';
|
o.bar = 'xyzzy';
|
||||||
j = JSON.stringify(o);
|
j = JSON.stringify(o);
|
||||||
|
|
||||||
@@ -212,82 +219,39 @@ describe('server', async () => {
|
|||||||
|
|
||||||
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
assert(j); // true-ish
|
assert(j); // true-ish
|
||||||
o = JSON.parse(j);
|
o = JSON.parse(j[0]);
|
||||||
console.log(o);
|
console.log(o);
|
||||||
|
|
||||||
db.close();
|
db.close();
|
||||||
});
|
});
|
||||||
|
|
||||||
await it('server_merge', async () => {
|
// await it('server_merge', async () => {
|
||||||
const keystore = await createKeystore();
|
|
||||||
const signer = new PGPSigner(keystore);
|
|
||||||
|
|
||||||
const db = await createDatabase(__dirname + '/db.three.sqlite');
|
|
||||||
|
|
||||||
const digest = 'deadbeef';
|
|
||||||
let s = new Syncable(digest, {
|
|
||||||
bar: 'baz',
|
|
||||||
});
|
|
||||||
let env = await wrap(s, signer)
|
|
||||||
let j:any = env.toJSON();
|
|
||||||
|
|
||||||
let v = await handlers.handleClientMergePut(j, db, digest, keystore, signer);
|
|
||||||
assert(v); // true-ish
|
|
||||||
|
|
||||||
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
|
||||||
assert(v); // true-ish
|
|
||||||
|
|
||||||
let o = JSON.parse(j);
|
|
||||||
o.bar = 'xyzzy';
|
|
||||||
j = JSON.stringify(o);
|
|
||||||
|
|
||||||
let signMaterial = await handlers.handleServerMergePost(j, db, digest, keystore, signer);
|
|
||||||
assert(signMaterial)
|
|
||||||
|
|
||||||
env = Envelope.fromJSON(signMaterial);
|
|
||||||
|
|
||||||
console.log('envvvv', env);
|
|
||||||
|
|
||||||
const signedData = await signData(env.o['digest'], keystore);
|
|
||||||
console.log('signed', signedData);
|
|
||||||
|
|
||||||
o = {
|
|
||||||
'm': env,
|
|
||||||
's': signedData,
|
|
||||||
}
|
|
||||||
j = JSON.stringify(o);
|
|
||||||
console.log(j);
|
|
||||||
|
|
||||||
v = await handlers.handleServerMergePut(j, db, digest, keystore, signer);
|
|
||||||
assert(v);
|
|
||||||
|
|
||||||
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
|
||||||
assert(j); // true-ish
|
|
||||||
o = JSON.parse(j);
|
|
||||||
console.log(o);
|
|
||||||
|
|
||||||
db.close();
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// await it('server_merge_empty', async () => {
|
|
||||||
// const keystore = await createKeystore();
|
// const keystore = await createKeystore();
|
||||||
// const signer = new PGPSigner(keystore);
|
// const signer = new PGPSigner(keystore);
|
||||||
//
|
//
|
||||||
// const db = await createDatabase(__dirname + '/db.three.sqlite');
|
// const db = await createDatabase(__dirname + '/db.three.sqlite');
|
||||||
//
|
//
|
||||||
// const digest = '0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef';
|
// const digest = 'deadbeef';
|
||||||
// let o:any = {
|
// let s = new Syncable(digest, {
|
||||||
// foo: 'bar',
|
// bar: 'baz',
|
||||||
// xyzzy: 42,
|
// });
|
||||||
// }
|
// let env = await wrap(s, signer)
|
||||||
// let j:any = JSON.stringify(o);
|
// let j:any = env.toJSON();
|
||||||
|
//
|
||||||
|
// let v = await handlers.handleClientMergePut(j, db, digest, keystore, signer);
|
||||||
|
// assert(v); // true-ish
|
||||||
|
//
|
||||||
|
// j = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
|
// assert(v); // true-ish
|
||||||
|
//
|
||||||
|
// let o = JSON.parse(j);
|
||||||
|
// o.bar = 'xyzzy';
|
||||||
|
// j = JSON.stringify(o);
|
||||||
//
|
//
|
||||||
// let signMaterial = await handlers.handleServerMergePost(j, db, digest, keystore, signer);
|
// let signMaterial = await handlers.handleServerMergePost(j, db, digest, keystore, signer);
|
||||||
// assert(signMaterial)
|
// assert(signMaterial)
|
||||||
//
|
//
|
||||||
// const env = Envelope.fromJSON(signMaterial);
|
// env = Envelope.fromJSON(signMaterial);
|
||||||
//
|
//
|
||||||
// console.log('envvvv', env);
|
// console.log('envvvv', env);
|
||||||
//
|
//
|
||||||
@@ -301,7 +265,7 @@ describe('server', async () => {
|
|||||||
// j = JSON.stringify(o);
|
// j = JSON.stringify(o);
|
||||||
// console.log(j);
|
// console.log(j);
|
||||||
//
|
//
|
||||||
// let v = await handlers.handleServerMergePut(j, db, digest, keystore, signer);
|
// v = await handlers.handleServerMergePut(j, db, digest, keystore, signer);
|
||||||
// assert(v);
|
// assert(v);
|
||||||
//
|
//
|
||||||
// j = await handlers.handleNoMergeGet(db, digest, keystore);
|
// j = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
@@ -311,5 +275,88 @@ describe('server', async () => {
|
|||||||
//
|
//
|
||||||
// db.close();
|
// db.close();
|
||||||
// });
|
// });
|
||||||
|
//
|
||||||
|
|
||||||
|
|
||||||
|
await it('server_merge_empty', async () => {
|
||||||
|
const keystore = await createKeystore();
|
||||||
|
const signer = new PGPSigner(keystore);
|
||||||
|
|
||||||
|
const db = await createDatabase(__dirname + '/db.three.sqlite');
|
||||||
|
|
||||||
|
const digest = '0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef';
|
||||||
|
let o:any = {
|
||||||
|
foo: 'bar',
|
||||||
|
xyzzy: 42,
|
||||||
|
}
|
||||||
|
let j:any = JSON.stringify(o);
|
||||||
|
|
||||||
|
let signMaterial = await handlers.handleServerMergePost(j, db, digest, keystore, signer);
|
||||||
|
assert(signMaterial)
|
||||||
|
|
||||||
|
const env = Envelope.fromJSON(signMaterial);
|
||||||
|
|
||||||
|
console.log('envvvv', env);
|
||||||
|
|
||||||
|
const signedData = await signData(env.o['digest'], keystore);
|
||||||
|
console.log('signed', signedData);
|
||||||
|
|
||||||
|
o = {
|
||||||
|
'm': env,
|
||||||
|
's': signedData,
|
||||||
|
}
|
||||||
|
j = JSON.stringify(o);
|
||||||
|
console.log(j);
|
||||||
|
|
||||||
|
let v = await handlers.handleServerMergePut(j, db, digest, keystore, signer);
|
||||||
|
assert(v);
|
||||||
|
|
||||||
|
j = await handlers.handleNoMergeGet(db, digest, keystore);
|
||||||
|
assert(j); // true-ish
|
||||||
|
o = JSON.parse(j[0]);
|
||||||
|
console.log(o);
|
||||||
|
|
||||||
|
db.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
await it('immutable_nodigest', async() => {
|
||||||
|
const keystore = await createKeystore();
|
||||||
|
const db = await createDatabase(__dirname + '/db.three.sqlite');
|
||||||
|
|
||||||
|
const s:string = 'foo';
|
||||||
|
let r;
|
||||||
|
r = await handlers.handleImmutablePost(s, db, undefined, keystore, 'text/plain');
|
||||||
|
assert(r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
|
||||||
|
r = await handlers.handleImmutablePost(s, db, undefined, keystore, 'text/plain');
|
||||||
|
assert(!r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
|
||||||
|
const b:Uint8Array = new TextEncoder().encode(s);
|
||||||
|
r = await handlers.handleImmutablePost(b, db, undefined, keystore, 'text/plain');
|
||||||
|
assert(!r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
});
|
||||||
|
|
||||||
|
await it('immutable_digest', async() => {
|
||||||
|
const keystore = await createKeystore();
|
||||||
|
const db = await createDatabase(__dirname + '/db.three.sqlite');
|
||||||
|
|
||||||
|
const s:string = 'foo';
|
||||||
|
const b:Uint8Array = new TextEncoder().encode(s);
|
||||||
|
let r;
|
||||||
|
r = await handlers.handleImmutablePost(b, db, hashOfFoo, keystore, 'application/octet-stream');
|
||||||
|
assert(r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
|
||||||
|
r = await handlers.handleImmutablePost(b, db, hashOfFoo, keystore, 'application/octet-stream');
|
||||||
|
assert(!r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
|
||||||
|
r = await handlers.handleImmutablePost(s, db, hashOfFoo, keystore, 'text/plain');
|
||||||
|
assert(!r[0]);
|
||||||
|
assert(hashOfFoo == r[1]);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import semver
|
|||||||
|
|
||||||
logg = logging.getLogger()
|
logg = logging.getLogger()
|
||||||
|
|
||||||
version = (0, 4, 0, 'alpha.10')
|
version = (0, 4, 0, 'alpha.11')
|
||||||
|
|
||||||
version_object = semver.VersionInfo(
|
version_object = semver.VersionInfo(
|
||||||
major=version[0],
|
major=version[0],
|
||||||
|
|||||||
@@ -1,22 +1,28 @@
|
|||||||
# syntax = docker/dockerfile:1.2
|
ARG DOCKER_REGISTRY="registry.gitlab.com/grassrootseconomics"
|
||||||
FROM registry.gitlab.com/grassrootseconomics/cic-base-images:python-3.8.6-dev-55da5f4e as dev
|
|
||||||
|
FROM $DOCKER_REGISTRY/cic-base-images:python-3.8.6-dev-e8eb2ee2
|
||||||
|
|
||||||
#RUN pip install $pip_extra_index_url_flag cic-base[full_graph]==0.1.2a62
|
#RUN pip install $pip_extra_index_url_flag cic-base[full_graph]==0.1.2a62
|
||||||
|
RUN apt-get install libffi-dev -y
|
||||||
|
|
||||||
|
|
||||||
|
ARG EXTRA_PIP_INDEX_URL=https://pip.grassrootseconomics.net:8433
|
||||||
|
ARG EXTRA_PIP_ARGS=""
|
||||||
|
ARG PIP_INDEX_URL=https://pypi.org/simple
|
||||||
|
|
||||||
ARG EXTRA_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
|
||||||
ARG GITLAB_PYTHON_REGISTRY="https://gitlab.com/api/v4/projects/27624814/packages/pypi/simple"
|
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
|
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY --extra-index-url $EXTRA_INDEX_URL \
|
--pre \
|
||||||
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN python setup.py install
|
RUN python setup.py install
|
||||||
|
|
||||||
COPY docker/*.sh .
|
COPY docker/*.sh ./
|
||||||
RUN chmod +x *.sh
|
RUN chmod +x /root/*.sh
|
||||||
|
|
||||||
# ini files in config directory defines the configurable parameters for the application
|
# ini files in config directory defines the configurable parameters for the application
|
||||||
# they can all be overridden by environment variables
|
# they can all be overridden by environment variables
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
confini~=0.4.1a1
|
confini>=0.3.6rc4,<0.5.0
|
||||||
africastalking==1.2.3
|
africastalking==1.2.3
|
||||||
SQLAlchemy==1.3.20
|
SQLAlchemy==1.3.20
|
||||||
alembic==1.4.2
|
alembic==1.4.2
|
||||||
psycopg2==2.8.6
|
psycopg2==2.8.6
|
||||||
celery==4.4.7
|
celery==4.4.7
|
||||||
redis==3.5.3
|
redis==3.5.3
|
||||||
|
semver==2.13.0
|
||||||
|
|||||||
22
apps/cic-signer/Dockerfile
Normal file
22
apps/cic-signer/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
ARG DOCKER_REGISTRY=registry.gitlab.com/grassrootseconomics
|
||||||
|
|
||||||
|
FROM $DOCKER_REGISTRY/cic-base-images:python-3.8.6-dev-e8eb2ee2 as dev
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
RUN apt-get install libffi-dev -y
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
ARG EXTRA_PIP_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
||||||
|
ARG EXTRA_PIP_ARGS=""
|
||||||
|
ARG PIP_INDEX_URL="https://pypi.org/simple"
|
||||||
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
|
--pre \
|
||||||
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
|
-r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
#RUN chmod +x *.sh
|
||||||
1
apps/cic-signer/requirements.txt
Normal file
1
apps/cic-signer/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
funga-eth[sql]>=0.5.1a1,<0.6.0
|
||||||
@@ -7,6 +7,7 @@ from typing import Optional
|
|||||||
# third-party imports
|
# third-party imports
|
||||||
from cic_eth.api import Api
|
from cic_eth.api import Api
|
||||||
from cic_eth_aux.erc20_demurrage_token.api import Api as DemurrageApi
|
from cic_eth_aux.erc20_demurrage_token.api import Api as DemurrageApi
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.transaction import from_wei
|
from cic_ussd.account.transaction import from_wei
|
||||||
@@ -102,7 +103,7 @@ def get_cached_available_balance(blockchain_address: str) -> float:
|
|||||||
:rtype: float
|
:rtype: float
|
||||||
"""
|
"""
|
||||||
identifier = bytes.fromhex(blockchain_address)
|
identifier = bytes.fromhex(blockchain_address)
|
||||||
key = cache_data_key(identifier, salt=':cic.balances')
|
key = cache_data_key(identifier, salt=MetadataPointer.BALANCES)
|
||||||
cached_balances = get_cached_data(key=key)
|
cached_balances = get_cached_data(key=key)
|
||||||
if cached_balances:
|
if cached_balances:
|
||||||
return calculate_available_balance(json.loads(cached_balances))
|
return calculate_available_balance(json.loads(cached_balances))
|
||||||
@@ -117,5 +118,5 @@ def get_cached_adjusted_balance(identifier: bytes):
|
|||||||
:return:
|
:return:
|
||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
key = cache_data_key(identifier, ':cic.adjusted_balance')
|
key = cache_data_key(identifier, MetadataPointer.BALANCES_ADJUSTED)
|
||||||
return get_cached_data(key)
|
return get_cached_data(key)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ from typing import Optional
|
|||||||
import celery
|
import celery
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
from cic_eth.api import Api
|
from cic_eth.api import Api
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local import
|
# local import
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
@@ -53,7 +54,7 @@ def get_cached_statement(blockchain_address: str) -> bytes:
|
|||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
identifier = bytes.fromhex(strip_0x(blockchain_address))
|
identifier = bytes.fromhex(strip_0x(blockchain_address))
|
||||||
key = cache_data_key(identifier=identifier, salt=':cic.statement')
|
key = cache_data_key(identifier=identifier, salt=MetadataPointer.STATEMENT)
|
||||||
return get_cached_data(key=key)
|
return get_cached_data(key=key)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from typing import Dict, Optional
|
|||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
from cic_eth.api import Api
|
from cic_eth.api import Api
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
@@ -23,7 +24,7 @@ def get_cached_default_token(chain_str: str) -> Optional[str]:
|
|||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
logg.debug(f'Retrieving default token from cache for chain: {chain_str}')
|
logg.debug(f'Retrieving default token from cache for chain: {chain_str}')
|
||||||
key = cache_data_key(identifier=chain_str.encode('utf-8'), salt=':cic.default_token_data')
|
key = cache_data_key(identifier=chain_str.encode('utf-8'), salt=MetadataPointer.TOKEN_DEFAULT)
|
||||||
return get_cached_data(key=key)
|
return get_cached_data(key=key)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,8 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
# third-party imports
|
# external imports
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
from redis import Redis
|
from redis import Redis
|
||||||
|
|
||||||
logg = logging.getLogger()
|
logg = logging.getLogger()
|
||||||
@@ -38,7 +39,7 @@ def get_cached_data(key: str):
|
|||||||
return cache.get(name=key)
|
return cache.get(name=key)
|
||||||
|
|
||||||
|
|
||||||
def cache_data_key(identifier: bytes, salt: str):
|
def cache_data_key(identifier: bytes, salt: MetadataPointer):
|
||||||
"""
|
"""
|
||||||
:param identifier:
|
:param identifier:
|
||||||
:type identifier:
|
:type identifier:
|
||||||
@@ -49,5 +50,5 @@ def cache_data_key(identifier: bytes, salt: str):
|
|||||||
"""
|
"""
|
||||||
hash_object = hashlib.new("sha256")
|
hash_object = hashlib.new("sha256")
|
||||||
hash_object.update(identifier)
|
hash_object.update(identifier)
|
||||||
hash_object.update(salt.encode(encoding="utf-8"))
|
hash_object.update(salt.value.encode(encoding="utf-8"))
|
||||||
return hash_object.digest().hex()
|
return hash_object.digest().hex()
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import json
|
|||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
from cic_eth.api import Api
|
from cic_eth.api import Api
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.metadata import get_cached_preferred_language, parse_account_metadata
|
from cic_ussd.account.metadata import get_cached_preferred_language, parse_account_metadata
|
||||||
@@ -109,7 +110,7 @@ class Account(SessionBase):
|
|||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
identifier = bytes.fromhex(self.blockchain_address)
|
identifier = bytes.fromhex(self.blockchain_address)
|
||||||
key = cache_data_key(identifier, ':cic.person')
|
key = cache_data_key(identifier, MetadataPointer.PERSON)
|
||||||
account_metadata = get_cached_data(key)
|
account_metadata = get_cached_data(key)
|
||||||
if not account_metadata:
|
if not account_metadata:
|
||||||
return self.phone_number
|
return self.phone_number
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
# external imports
|
# external imports
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from .base import Metadata
|
|
||||||
from .custom import CustomMetadata
|
from .custom import CustomMetadata
|
||||||
from .person import PersonMetadata
|
from .person import PersonMetadata
|
||||||
from .phone import PhonePointerMetadata
|
from .phone import PhonePointerMetadata
|
||||||
|
|||||||
@@ -1,99 +1,30 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
from typing import Dict, Union
|
|
||||||
|
|
||||||
# third-part imports
|
# external imports
|
||||||
from cic_types.models.person import generate_metadata_pointer, Person
|
from cic_types.condiments import MetadataPointer
|
||||||
|
from cic_types.ext.metadata import MetadataRequestsHandler
|
||||||
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.cache import cache_data, get_cached_data
|
from cic_ussd.cache import cache_data, get_cached_data
|
||||||
from cic_ussd.http.requests import error_handler, make_request
|
|
||||||
from cic_ussd.metadata.signer import Signer
|
|
||||||
|
|
||||||
logg = logging.getLogger(__file__)
|
logg = logging.getLogger(__file__)
|
||||||
|
|
||||||
|
|
||||||
class Metadata:
|
class UssdMetadataHandler(MetadataRequestsHandler):
|
||||||
"""
|
def __init__(self, cic_type: MetadataPointer, identifier: bytes):
|
||||||
:cvar base_url: The base url or the metadata server.
|
super().__init__(cic_type, identifier)
|
||||||
:type base_url: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
base_url = None
|
def cache_metadata(self, data: str):
|
||||||
|
"""
|
||||||
|
:param data:
|
||||||
class MetadataRequestsHandler(Metadata):
|
:type data:
|
||||||
|
:return:
|
||||||
def __init__(self, cic_type: str, identifier: bytes, engine: str = 'pgp'):
|
:rtype:
|
||||||
""""""
|
"""
|
||||||
self.cic_type = cic_type
|
cache_data(self.metadata_pointer, data)
|
||||||
self.engine = engine
|
logg.debug(f'caching: {data} with key: {self.metadata_pointer}')
|
||||||
self.headers = {
|
|
||||||
'X-CIC-AUTOMERGE': 'server',
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
self.identifier = identifier
|
|
||||||
self.metadata_pointer = generate_metadata_pointer(
|
|
||||||
identifier=self.identifier,
|
|
||||||
cic_type=self.cic_type
|
|
||||||
)
|
|
||||||
if self.base_url:
|
|
||||||
self.url = os.path.join(self.base_url, self.metadata_pointer)
|
|
||||||
|
|
||||||
def create(self, data: Union[Dict, str]):
|
|
||||||
""""""
|
|
||||||
data = json.dumps(data).encode('utf-8')
|
|
||||||
result = make_request(method='POST', url=self.url, data=data, headers=self.headers)
|
|
||||||
|
|
||||||
error_handler(result=result)
|
|
||||||
metadata = result.json()
|
|
||||||
return self.edit(data=metadata)
|
|
||||||
|
|
||||||
def edit(self, data: Union[Dict, str]):
|
|
||||||
""""""
|
|
||||||
cic_meta_signer = Signer()
|
|
||||||
signature = cic_meta_signer.sign_digest(data=data)
|
|
||||||
algorithm = cic_meta_signer.get_operational_key().get('algo')
|
|
||||||
formatted_data = {
|
|
||||||
'm': json.dumps(data),
|
|
||||||
's': {
|
|
||||||
'engine': self.engine,
|
|
||||||
'algo': algorithm,
|
|
||||||
'data': signature,
|
|
||||||
'digest': data.get('digest'),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
formatted_data = json.dumps(formatted_data)
|
|
||||||
result = make_request(method='PUT', url=self.url, data=formatted_data, headers=self.headers)
|
|
||||||
logg.info(f'signed metadata submission status: {result.status_code}.')
|
|
||||||
error_handler(result=result)
|
|
||||||
try:
|
|
||||||
decoded_identifier = self.identifier.decode("utf-8")
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
decoded_identifier = self.identifier.hex()
|
|
||||||
logg.info(f'identifier: {decoded_identifier}. metadata pointer: {self.metadata_pointer} set to: {data}.')
|
|
||||||
return result
|
|
||||||
|
|
||||||
def query(self):
|
|
||||||
""""""
|
|
||||||
result = make_request(method='GET', url=self.url)
|
|
||||||
error_handler(result=result)
|
|
||||||
result_data = result.json()
|
|
||||||
if not isinstance(result_data, dict):
|
|
||||||
raise ValueError(f'Invalid result data object: {result_data}.')
|
|
||||||
if result.status_code == 200:
|
|
||||||
if self.cic_type == ':cic.person':
|
|
||||||
person = Person()
|
|
||||||
person_data = person.deserialize(person_data=result_data)
|
|
||||||
serialized_person_data = person_data.serialize()
|
|
||||||
data = json.dumps(serialized_person_data)
|
|
||||||
else:
|
|
||||||
data = json.dumps(result_data)
|
|
||||||
cache_data(key=self.metadata_pointer, data=data)
|
|
||||||
logg.debug(f'caching: {data} with key: {self.metadata_pointer}')
|
|
||||||
return result_data
|
|
||||||
|
|
||||||
def get_cached_metadata(self):
|
def get_cached_metadata(self):
|
||||||
""""""
|
""""""
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from .base import MetadataRequestsHandler
|
from .base import UssdMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
class CustomMetadata(MetadataRequestsHandler):
|
class CustomMetadata(UssdMetadataHandler):
|
||||||
|
|
||||||
def __init__(self, identifier: bytes):
|
def __init__(self, identifier: bytes):
|
||||||
super().__init__(cic_type=':cic.custom', identifier=identifier)
|
super().__init__(cic_type=MetadataPointer.CUSTOM, identifier=identifier)
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from .base import MetadataRequestsHandler
|
from .base import UssdMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
class PersonMetadata(MetadataRequestsHandler):
|
class PersonMetadata(UssdMetadataHandler):
|
||||||
|
|
||||||
def __init__(self, identifier: bytes):
|
def __init__(self, identifier: bytes):
|
||||||
super().__init__(cic_type=':cic.person', identifier=identifier)
|
super().__init__(cic_type=MetadataPointer.PERSON, identifier=identifier)
|
||||||
|
|||||||
@@ -2,12 +2,13 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from .base import MetadataRequestsHandler
|
from .base import UssdMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
class PhonePointerMetadata(MetadataRequestsHandler):
|
class PhonePointerMetadata(UssdMetadataHandler):
|
||||||
|
|
||||||
def __init__(self, identifier: bytes):
|
def __init__(self, identifier: bytes):
|
||||||
super().__init__(cic_type=':cic.phone', identifier=identifier)
|
super().__init__(cic_type=MetadataPointer.PHONE, identifier=identifier)
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import celery
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from .base import MetadataRequestsHandler
|
from .base import UssdMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
class PreferencesMetadata(MetadataRequestsHandler):
|
class PreferencesMetadata(UssdMetadataHandler):
|
||||||
|
|
||||||
def __init__(self, identifier: bytes):
|
def __init__(self, identifier: bytes):
|
||||||
super().__init__(cic_type=':cic.preferences', identifier=identifier)
|
super().__init__(cic_type=MetadataPointer.PREFERENCES, identifier=identifier)
|
||||||
|
|||||||
@@ -1,60 +0,0 @@
|
|||||||
# standard imports
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
from typing import Optional
|
|
||||||
from urllib.request import Request, urlopen
|
|
||||||
|
|
||||||
# third-party imports
|
|
||||||
import gnupg
|
|
||||||
|
|
||||||
# local imports
|
|
||||||
|
|
||||||
logg = logging.getLogger()
|
|
||||||
|
|
||||||
|
|
||||||
class Signer:
|
|
||||||
"""
|
|
||||||
:cvar gpg_path:
|
|
||||||
:type gpg_path:
|
|
||||||
:cvar gpg_passphrase:
|
|
||||||
:type gpg_passphrase:
|
|
||||||
:cvar key_file_path:
|
|
||||||
:type key_file_path:
|
|
||||||
|
|
||||||
"""
|
|
||||||
gpg_path: str = None
|
|
||||||
gpg_passphrase: str = None
|
|
||||||
key_file_path: str = None
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.gpg = gnupg.GPG(gnupghome=self.gpg_path)
|
|
||||||
|
|
||||||
with open(self.key_file_path, 'r') as key_file:
|
|
||||||
self.key_data = key_file.read()
|
|
||||||
|
|
||||||
def get_operational_key(self):
|
|
||||||
"""
|
|
||||||
:return:
|
|
||||||
:rtype:
|
|
||||||
"""
|
|
||||||
# import key data into keyring
|
|
||||||
self.gpg.import_keys(key_data=self.key_data)
|
|
||||||
gpg_keys = self.gpg.list_keys()
|
|
||||||
key_algorithm = gpg_keys[0].get('algo')
|
|
||||||
key_id = gpg_keys[0].get("keyid")
|
|
||||||
logg.debug(f'using signing key: {key_id}, algorithm: {key_algorithm}')
|
|
||||||
return gpg_keys[0]
|
|
||||||
|
|
||||||
def sign_digest(self, data: dict):
|
|
||||||
"""
|
|
||||||
:param data:
|
|
||||||
:type data:
|
|
||||||
:return:
|
|
||||||
:rtype:
|
|
||||||
"""
|
|
||||||
digest = data['digest']
|
|
||||||
key_id = self.get_operational_key().get('keyid')
|
|
||||||
signature = self.gpg.sign(digest, passphrase=self.gpg_passphrase, keyid=key_id)
|
|
||||||
return str(signature)
|
|
||||||
|
|
||||||
|
|
||||||
@@ -5,6 +5,7 @@ from datetime import datetime, timedelta
|
|||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import i18n.config
|
import i18n.config
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.balance import (calculate_available_balance,
|
from cic_ussd.account.balance import (calculate_available_balance,
|
||||||
@@ -163,7 +164,7 @@ class MenuProcessor:
|
|||||||
token_symbol = get_default_token_symbol()
|
token_symbol = get_default_token_symbol()
|
||||||
blockchain_address = self.account.blockchain_address
|
blockchain_address = self.account.blockchain_address
|
||||||
balances = get_balances(blockchain_address, chain_str, token_symbol, False)[0]
|
balances = get_balances(blockchain_address, chain_str, token_symbol, False)[0]
|
||||||
key = cache_data_key(self.identifier, ':cic.balances')
|
key = cache_data_key(self.identifier, MetadataPointer.BALANCES)
|
||||||
cache_data(key, json.dumps(balances))
|
cache_data(key, json.dumps(balances))
|
||||||
available_balance = calculate_available_balance(balances)
|
available_balance = calculate_available_balance(balances)
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
@@ -173,7 +174,7 @@ class MenuProcessor:
|
|||||||
else:
|
else:
|
||||||
timestamp = int((now - timedelta(30)).timestamp())
|
timestamp = int((now - timedelta(30)).timestamp())
|
||||||
adjusted_balance = get_adjusted_balance(to_wei(int(available_balance)), chain_str, timestamp, token_symbol)
|
adjusted_balance = get_adjusted_balance(to_wei(int(available_balance)), chain_str, timestamp, token_symbol)
|
||||||
key = cache_data_key(self.identifier, ':cic.adjusted_balance')
|
key = cache_data_key(self.identifier, MetadataPointer.BALANCES_ADJUSTED)
|
||||||
cache_data(key, json.dumps(adjusted_balance))
|
cache_data(key, json.dumps(adjusted_balance))
|
||||||
|
|
||||||
query_statement(blockchain_address)
|
query_statement(blockchain_address)
|
||||||
|
|||||||
@@ -10,14 +10,14 @@ import i18n
|
|||||||
import redis
|
import redis
|
||||||
from chainlib.chain import ChainSpec
|
from chainlib.chain import ChainSpec
|
||||||
from confini import Config
|
from confini import Config
|
||||||
|
from cic_types.ext.metadata import Metadata
|
||||||
|
from cic_types.ext.metadata.signer import Signer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
from cic_ussd.cache import Cache
|
from cic_ussd.cache import Cache
|
||||||
from cic_ussd.db import dsn_from_config
|
from cic_ussd.db import dsn_from_config
|
||||||
from cic_ussd.db.models.base import SessionBase
|
from cic_ussd.db.models.base import SessionBase
|
||||||
from cic_ussd.metadata.signer import Signer
|
|
||||||
from cic_ussd.metadata.base import Metadata
|
|
||||||
from cic_ussd.phone_number import Support
|
from cic_ussd.phone_number import Support
|
||||||
from cic_ussd.session.ussd_session import UssdSession as InMemoryUssdSession
|
from cic_ussd.session.ussd_session import UssdSession as InMemoryUssdSession
|
||||||
from cic_ussd.validator import validate_presence
|
from cic_ussd.validator import validate_presence
|
||||||
@@ -87,11 +87,8 @@ Signer.key_file_path = key_file_path
|
|||||||
i18n.load_path.append(config.get('LOCALE_PATH'))
|
i18n.load_path.append(config.get('LOCALE_PATH'))
|
||||||
i18n.set('fallback', config.get('LOCALE_FALLBACK'))
|
i18n.set('fallback', config.get('LOCALE_FALLBACK'))
|
||||||
|
|
||||||
chain_spec = ChainSpec(
|
chain_spec = ChainSpec.from_chain_str(config.get('CHAIN_SPEC'))
|
||||||
common_name=config.get('CIC_COMMON_NAME'),
|
|
||||||
engine=config.get('CIC_ENGINE'),
|
|
||||||
network_id=config.get('CIC_NETWORK_ID')
|
|
||||||
)
|
|
||||||
|
|
||||||
Chain.spec = chain_spec
|
Chain.spec = chain_spec
|
||||||
Support.phone_number = config.get('OFFICE_SUPPORT_PHONE')
|
Support.phone_number = config.get('OFFICE_SUPPORT_PHONE')
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ import i18n
|
|||||||
import redis
|
import redis
|
||||||
from chainlib.chain import ChainSpec
|
from chainlib.chain import ChainSpec
|
||||||
from confini import Config
|
from confini import Config
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
from cic_types.ext.metadata import Metadata
|
||||||
|
from cic_types.ext.metadata.signer import Signer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
@@ -25,8 +28,6 @@ from cic_ussd.files.local_files import create_local_file_data_stores, json_file_
|
|||||||
from cic_ussd.http.requests import get_request_endpoint, get_request_method
|
from cic_ussd.http.requests import get_request_endpoint, get_request_method
|
||||||
from cic_ussd.http.responses import with_content_headers
|
from cic_ussd.http.responses import with_content_headers
|
||||||
from cic_ussd.menu.ussd_menu import UssdMenu
|
from cic_ussd.menu.ussd_menu import UssdMenu
|
||||||
from cic_ussd.metadata.base import Metadata
|
|
||||||
from cic_ussd.metadata.signer import Signer
|
|
||||||
from cic_ussd.phone_number import process_phone_number, Support, E164Format
|
from cic_ussd.phone_number import process_phone_number, Support, E164Format
|
||||||
from cic_ussd.processor.ussd import handle_menu_operations
|
from cic_ussd.processor.ussd import handle_menu_operations
|
||||||
from cic_ussd.runnable.server_base import exportable_parser, logg
|
from cic_ussd.runnable.server_base import exportable_parser, logg
|
||||||
@@ -96,11 +97,7 @@ celery.Celery(backend=config.get('CELERY_RESULT_URL'), broker=config.get('CELERY
|
|||||||
states = json_file_parser(filepath=config.get('MACHINE_STATES'))
|
states = json_file_parser(filepath=config.get('MACHINE_STATES'))
|
||||||
transitions = json_file_parser(filepath=config.get('MACHINE_TRANSITIONS'))
|
transitions = json_file_parser(filepath=config.get('MACHINE_TRANSITIONS'))
|
||||||
|
|
||||||
chain_spec = ChainSpec(
|
chain_spec = ChainSpec.from_chain_str(config.get('CHAIN_SPEC'))
|
||||||
common_name=config.get('CIC_COMMON_NAME'),
|
|
||||||
engine=config.get('CIC_ENGINE'),
|
|
||||||
network_id=config.get('CIC_NETWORK_ID')
|
|
||||||
)
|
|
||||||
|
|
||||||
Chain.spec = chain_spec
|
Chain.spec = chain_spec
|
||||||
UssdStateMachine.states = states
|
UssdStateMachine.states = states
|
||||||
@@ -113,7 +110,7 @@ default_token_data = query_default_token(chain_str)
|
|||||||
|
|
||||||
# cache default token for re-usability
|
# cache default token for re-usability
|
||||||
if default_token_data:
|
if default_token_data:
|
||||||
cache_key = cache_data_key(chain_str.encode('utf-8'), ':cic.default_token_data')
|
cache_key = cache_data_key(chain_str.encode('utf-8'), MetadataPointer.TOKEN_DEFAULT)
|
||||||
cache_data(key=cache_key, data=json.dumps(default_token_data))
|
cache_data(key=cache_key, data=json.dumps(default_token_data))
|
||||||
else:
|
else:
|
||||||
raise InitializationError(f'Default token data for: {chain_str} not found.')
|
raise InitializationError(f'Default token data for: {chain_str} not found.')
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
# third-party imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.balance import get_balances, calculate_available_balance
|
from cic_ussd.account.balance import get_balances, calculate_available_balance
|
||||||
@@ -87,7 +89,7 @@ def balances_callback(result: list, param: str, status_code: int):
|
|||||||
|
|
||||||
balances = result[0]
|
balances = result[0]
|
||||||
identifier = bytes.fromhex(param)
|
identifier = bytes.fromhex(param)
|
||||||
key = cache_data_key(identifier, ':cic.balances')
|
key = cache_data_key(identifier, MetadataPointer.BALANCES)
|
||||||
cache_data(key, json.dumps(balances))
|
cache_data(key, json.dumps(balances))
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,17 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
# third-party imports
|
# third-party imports
|
||||||
import celery
|
import celery
|
||||||
|
from cic_types.models.person import Person
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.metadata import CustomMetadata, PersonMetadata, PhonePointerMetadata, PreferencesMetadata
|
from cic_ussd.metadata import CustomMetadata, PersonMetadata, PhonePointerMetadata, PreferencesMetadata
|
||||||
from cic_ussd.tasks.base import CriticalMetadataTask
|
from cic_ussd.tasks.base import CriticalMetadataTask
|
||||||
|
|
||||||
celery_app = celery.current_app
|
celery_app = celery.current_app
|
||||||
logg = logging.getLogger().getChild(__name__)
|
logg = logging.getLogger(__file__)
|
||||||
|
|
||||||
|
|
||||||
@celery_app.task
|
@celery_app.task
|
||||||
@@ -22,7 +24,13 @@ def query_person_metadata(blockchain_address: str):
|
|||||||
"""
|
"""
|
||||||
identifier = bytes.fromhex(blockchain_address)
|
identifier = bytes.fromhex(blockchain_address)
|
||||||
person_metadata_client = PersonMetadata(identifier=identifier)
|
person_metadata_client = PersonMetadata(identifier=identifier)
|
||||||
person_metadata_client.query()
|
response = person_metadata_client.query()
|
||||||
|
data = response.json()
|
||||||
|
person = Person()
|
||||||
|
person_data = person.deserialize(person_data=data)
|
||||||
|
serialized_person_data = person_data.serialize()
|
||||||
|
data = json.dumps(serialized_person_data)
|
||||||
|
person_metadata_client.cache_metadata(data=data)
|
||||||
|
|
||||||
|
|
||||||
@celery_app.task
|
@celery_app.task
|
||||||
@@ -76,6 +84,9 @@ def query_preferences_metadata(blockchain_address: str):
|
|||||||
:type blockchain_address: str | Ox-hex
|
:type blockchain_address: str | Ox-hex
|
||||||
"""
|
"""
|
||||||
identifier = bytes.fromhex(blockchain_address)
|
identifier = bytes.fromhex(blockchain_address)
|
||||||
logg.debug(f'Retrieving preferences metadata for address: {blockchain_address}.')
|
logg.debug(f'retrieving preferences metadata for address: {blockchain_address}.')
|
||||||
person_metadata_client = PreferencesMetadata(identifier=identifier)
|
preferences_metadata_client = PreferencesMetadata(identifier=identifier)
|
||||||
return person_metadata_client.query()
|
response = preferences_metadata_client.query()
|
||||||
|
data = json.dumps(response.json())
|
||||||
|
preferences_metadata_client.cache_metadata(data)
|
||||||
|
return data
|
||||||
|
|||||||
@@ -2,9 +2,10 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
# third-party imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
import i18n
|
import i18n
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.metadata import get_cached_preferred_language
|
from cic_ussd.account.metadata import get_cached_preferred_language
|
||||||
@@ -49,7 +50,7 @@ def cache_statement(parsed_transaction: dict, querying_party: str):
|
|||||||
statement_transactions.append(parsed_transaction)
|
statement_transactions.append(parsed_transaction)
|
||||||
data = json.dumps(statement_transactions)
|
data = json.dumps(statement_transactions)
|
||||||
identifier = bytes.fromhex(querying_party)
|
identifier = bytes.fromhex(querying_party)
|
||||||
key = cache_data_key(identifier, ':cic.statement')
|
key = cache_data_key(identifier, MetadataPointer.STATEMENT)
|
||||||
cache_data(key, data)
|
cache_data(key, data)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import semver
|
import semver
|
||||||
|
|
||||||
version = (0, 3, 1, 'alpha.5')
|
version = (0, 3, 1, 'alpha.6')
|
||||||
|
|
||||||
version_object = semver.VersionInfo(
|
version_object = semver.VersionInfo(
|
||||||
major=version[0],
|
major=version[0],
|
||||||
|
|||||||
2
apps/cic-ussd/config/chain.ini
Normal file
2
apps/cic-ussd/config/chain.ini
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[chain]
|
||||||
|
spec =
|
||||||
@@ -1,5 +1,2 @@
|
|||||||
[cic]
|
[cic]
|
||||||
engine = evm
|
|
||||||
common_name = bloxberg
|
|
||||||
network_id = 8996
|
|
||||||
meta_url = http://localhost:63380
|
meta_url = http://localhost:63380
|
||||||
|
|||||||
2
apps/cic-ussd/config/test/chain.ini
Normal file
2
apps/cic-ussd/config/test/chain.ini
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[chain]
|
||||||
|
spec = 'evm:foo:1:bar'
|
||||||
@@ -1,5 +1,2 @@
|
|||||||
[cic]
|
[cic]
|
||||||
engine = evm
|
|
||||||
common_name = bloxberg
|
|
||||||
network_id = 8996
|
|
||||||
meta_url = http://test-meta.io
|
meta_url = http://test-meta.io
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
# syntax = docker/dockerfile:1.2
|
ARG DOCKER_REGISTRY="registry.gitlab.com/grassrootseconomics"
|
||||||
FROM registry.gitlab.com/grassrootseconomics/cic-base-images:python-3.8.6-dev-55da5f4e as dev
|
|
||||||
|
FROM $DOCKER_REGISTRY/cic-base-images:python-3.8.6-dev-e8eb2ee2
|
||||||
|
|
||||||
RUN apt-get install -y redis-server
|
RUN apt-get install -y redis-server
|
||||||
# create secrets directory
|
# create secrets directory
|
||||||
RUN mkdir -vp pgp/keys
|
RUN mkdir -vp pgp/keys
|
||||||
@@ -8,31 +10,34 @@ RUN mkdir -vp pgp/keys
|
|||||||
RUN mkdir -vp cic-ussd
|
RUN mkdir -vp cic-ussd
|
||||||
RUN mkdir -vp data
|
RUN mkdir -vp data
|
||||||
|
|
||||||
ARG EXTRA_INDEX_URL="https://pip.grassrootseconomics.net:8433"
|
ARG EXTRA_PIP_INDEX_URL=https://pip.grassrootseconomics.net:8433
|
||||||
ARG GITLAB_PYTHON_REGISTRY="https://gitlab.com/api/v4/projects/27624814/packages/pypi/simple"
|
ARG EXTRA_PIP_ARGS=""
|
||||||
|
ARG PIP_INDEX_URL=https://pypi.org/simple
|
||||||
|
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY \
|
--pre \
|
||||||
--extra-index-url $EXTRA_INDEX_URL \
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
cic-eth-aux-erc20-demurrage-token~=0.0.2a6
|
cic-eth-aux-erc20-demurrage-token~=0.0.2a7
|
||||||
|
|
||||||
COPY requirements.txt .
|
|
||||||
|
|
||||||
|
COPY *requirements.txt ./
|
||||||
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
RUN --mount=type=cache,mode=0755,target=/root/.cache/pip \
|
||||||
pip install --index-url https://pypi.org/simple \
|
pip install --index-url $PIP_INDEX_URL \
|
||||||
--extra-index-url $GITLAB_PYTHON_REGISTRY --extra-index-url $EXTRA_INDEX_URL \
|
--pre \
|
||||||
-r requirements.txt
|
--extra-index-url $EXTRA_PIP_INDEX_URL $EXTRA_PIP_ARGS \
|
||||||
|
-r requirements.txt
|
||||||
|
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN python setup.py install
|
RUN python setup.py install
|
||||||
|
|
||||||
COPY cic_ussd/db/ussd_menu.json data/
|
COPY cic_ussd/db/ussd_menu.json data/
|
||||||
|
|
||||||
COPY docker/*.sh .
|
COPY docker/*.sh ./
|
||||||
RUN chmod +x /root/*.sh
|
RUN chmod +x /root/*.sh
|
||||||
|
|
||||||
# copy config and migration files to definitive file so they can be referenced in path definitions for running scripts
|
## copy config and migration files to definitive file so they can be referenced in path definitions for running scripts
|
||||||
COPY config/ /usr/local/etc/cic-ussd/
|
COPY config/ /usr/local/etc/cic-ussd/
|
||||||
COPY cic_ussd/db/migrations/ /usr/local/share/cic-ussd/alembic
|
COPY cic_ussd/db/migrations/ /usr/local/share/cic-ussd/alembic
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ billiard==3.6.4.0
|
|||||||
bcrypt==3.2.0
|
bcrypt==3.2.0
|
||||||
celery==4.4.7
|
celery==4.4.7
|
||||||
cffi==1.14.6
|
cffi==1.14.6
|
||||||
cic-eth[services]~=0.12.4a7
|
cic-eth~=0.12.5a1
|
||||||
cic-notify~=0.4.0a10
|
cic-notify~=0.4.0a11
|
||||||
cic-types~=0.1.0a15
|
cic-types~=0.2.1a2
|
||||||
confini>=0.4.1a1,<0.5.0
|
confini>=0.3.6rc4,<0.5.0
|
||||||
phonenumbers==8.12.12
|
phonenumbers==8.12.12
|
||||||
psycopg2==2.8.6
|
psycopg2==2.8.6
|
||||||
python-i18n[YAML]==0.3.9
|
python-i18n[YAML]==0.3.9
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
cic-eth[services]~=0.12.4a13
|
||||||
Faker==8.1.2
|
Faker==8.1.2
|
||||||
faker-e164==0.1.0
|
faker-e164==0.1.0
|
||||||
pytest==6.2.4
|
pytest==6.2.4
|
||||||
|
|||||||
@@ -4,8 +4,7 @@ import time
|
|||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import pytest
|
import pytest
|
||||||
import requests_mock
|
from cic_types.condiments import MetadataPointer
|
||||||
from chainlib.hash import strip_0x
|
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.statement import (filter_statement_transactions,
|
from cic_ussd.account.statement import (filter_statement_transactions,
|
||||||
@@ -48,7 +47,7 @@ def test_generate(activated_account,
|
|||||||
generate(querying_party, None, sender_transaction)
|
generate(querying_party, None, sender_transaction)
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
key = cache_data_key(identifier, ':cic.statement')
|
key = cache_data_key(identifier, MetadataPointer.STATEMENT)
|
||||||
statement = get_cached_data(key)
|
statement = get_cached_data(key)
|
||||||
statement = json.loads(statement)
|
statement = json.loads(statement)
|
||||||
assert len(statement) == 1
|
assert len(statement) == 1
|
||||||
|
|||||||
@@ -5,24 +5,25 @@ import os
|
|||||||
# external imports
|
# external imports
|
||||||
import requests_mock
|
import requests_mock
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
from cic_types.processor import generate_metadata_pointer
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.metadata.base import MetadataRequestsHandler
|
from cic_ussd.metadata.base import UssdMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
|
|
||||||
|
|
||||||
def test_metadata_requests_handler(activated_account,
|
def test_ussd_metadata_handler(activated_account,
|
||||||
init_cache,
|
init_cache,
|
||||||
load_config,
|
load_config,
|
||||||
person_metadata,
|
person_metadata,
|
||||||
setup_metadata_request_handler,
|
setup_metadata_request_handler,
|
||||||
setup_metadata_signer):
|
setup_metadata_signer):
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
||||||
cic_type = ':cic.person'
|
cic_type = MetadataPointer.PERSON
|
||||||
metadata_client = MetadataRequestsHandler(cic_type, identifier)
|
metadata_client = UssdMetadataHandler(cic_type, identifier)
|
||||||
assert metadata_client.cic_type == cic_type
|
assert metadata_client.cic_type == cic_type
|
||||||
assert metadata_client.engine == 'pgp'
|
assert metadata_client.engine == 'pgp'
|
||||||
assert metadata_client.identifier == identifier
|
assert metadata_client.identifier == identifier
|
||||||
@@ -38,7 +39,5 @@ def test_metadata_requests_handler(activated_account,
|
|||||||
assert result.status_code == 200
|
assert result.status_code == 200
|
||||||
person_metadata.pop('digest')
|
person_metadata.pop('digest')
|
||||||
request_mocker.register_uri('GET', metadata_client.url, status_code=200, reason='OK', json=person_metadata)
|
request_mocker.register_uri('GET', metadata_client.url, status_code=200, reason='OK', json=person_metadata)
|
||||||
result = metadata_client.query()
|
result = metadata_client.query().json()
|
||||||
assert result == person_metadata
|
assert result == person_metadata
|
||||||
cached_metadata = metadata_client.get_cached_metadata()
|
|
||||||
assert json.loads(cached_metadata) == person_metadata
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import os
|
import os
|
||||||
# external imports
|
# external imports
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
from cic_types.processor import generate_metadata_pointer
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
@@ -11,8 +11,8 @@ from cic_ussd.metadata import CustomMetadata
|
|||||||
|
|
||||||
|
|
||||||
def test_custom_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
def test_custom_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
||||||
cic_type = ':cic.custom'
|
cic_type = MetadataPointer.CUSTOM
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
custom_metadata_client = CustomMetadata(identifier)
|
custom_metadata_client = CustomMetadata(identifier)
|
||||||
assert custom_metadata_client.cic_type == cic_type
|
assert custom_metadata_client.cic_type == cic_type
|
||||||
assert custom_metadata_client.engine == 'pgp'
|
assert custom_metadata_client.engine == 'pgp'
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import os
|
import os
|
||||||
# external imports
|
# external imports
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
from cic_types.processor import generate_metadata_pointer
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
@@ -11,8 +11,8 @@ from cic_ussd.metadata import PersonMetadata
|
|||||||
|
|
||||||
|
|
||||||
def test_person_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
def test_person_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
||||||
cic_type = ':cic.person'
|
cic_type = MetadataPointer.PERSON
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
person_metadata_client = PersonMetadata(identifier)
|
person_metadata_client = PersonMetadata(identifier)
|
||||||
assert person_metadata_client.cic_type == cic_type
|
assert person_metadata_client.cic_type == cic_type
|
||||||
assert person_metadata_client.engine == 'pgp'
|
assert person_metadata_client.engine == 'pgp'
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import os
|
import os
|
||||||
# external imports
|
# external imports
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
from cic_types.processor import generate_metadata_pointer
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
@@ -12,8 +12,8 @@ from cic_ussd.metadata import PhonePointerMetadata
|
|||||||
|
|
||||||
|
|
||||||
def test_phone_pointer_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
def test_phone_pointer_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
||||||
cic_type = ':cic.phone'
|
cic_type = MetadataPointer.PHONE
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
phone_pointer_metadata = PhonePointerMetadata(identifier)
|
phone_pointer_metadata = PhonePointerMetadata(identifier)
|
||||||
assert phone_pointer_metadata.cic_type == cic_type
|
assert phone_pointer_metadata.cic_type == cic_type
|
||||||
assert phone_pointer_metadata.engine == 'pgp'
|
assert phone_pointer_metadata.engine == 'pgp'
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import os
|
import os
|
||||||
# external imports
|
# external imports
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
from cic_types.processor import generate_metadata_pointer
|
from cic_types.processor import generate_metadata_pointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
@@ -11,8 +11,8 @@ from cic_ussd.metadata import PreferencesMetadata
|
|||||||
|
|
||||||
|
|
||||||
def test_preferences_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
def test_preferences_metadata(activated_account, load_config, setup_metadata_request_handler, setup_metadata_signer):
|
||||||
cic_type = ':cic.preferences'
|
cic_type = MetadataPointer.PREFERENCES
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
preferences_metadata_client = PreferencesMetadata(identifier)
|
preferences_metadata_client = PreferencesMetadata(identifier)
|
||||||
assert preferences_metadata_client.cic_type == cic_type
|
assert preferences_metadata_client.cic_type == cic_type
|
||||||
assert preferences_metadata_client.engine == 'pgp'
|
assert preferences_metadata_client.engine == 'pgp'
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
# standard imports
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
# third-party imports
|
|
||||||
|
|
||||||
# local imports
|
|
||||||
from cic_ussd.metadata.signer import Signer
|
|
||||||
|
|
||||||
|
|
||||||
def test_client(load_config, setup_metadata_signer, person_metadata):
|
|
||||||
signer = Signer()
|
|
||||||
gpg = signer.gpg
|
|
||||||
assert signer.key_data is not None
|
|
||||||
gpg.import_keys(key_data=signer.key_data)
|
|
||||||
gpg_keys = gpg.list_keys()
|
|
||||||
assert signer.get_operational_key() == gpg_keys[0]
|
|
||||||
shutil.rmtree(Signer.gpg_path)
|
|
||||||
@@ -3,7 +3,7 @@ import json
|
|||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.balance import get_cached_available_balance
|
from cic_ussd.account.balance import get_cached_available_balance
|
||||||
@@ -58,7 +58,7 @@ def test_menu_processor(activated_account,
|
|||||||
token_symbol=token_symbol)
|
token_symbol=token_symbol)
|
||||||
|
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
key = cache_data_key(identifier, ':cic.adjusted_balance')
|
key = cache_data_key(identifier, MetadataPointer.BALANCES_ADJUSTED)
|
||||||
adjusted_balance = 45931650.64654012
|
adjusted_balance = 45931650.64654012
|
||||||
cache_data(key, json.dumps(adjusted_balance))
|
cache_data(key, json.dumps(adjusted_balance))
|
||||||
resp = response(activated_account, 'ussd.kenya.account_balances', name, init_database, generic_ussd_session)
|
resp = response(activated_account, 'ussd.kenya.account_balances', name, init_database, generic_ussd_session)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import time
|
|||||||
import i18n
|
import i18n
|
||||||
import requests_mock
|
import requests_mock
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
@@ -45,7 +46,7 @@ def test_handle_menu(activated_account,
|
|||||||
ussd_menu = UssdMenu.find_by_name('initial_language_selection')
|
ussd_menu = UssdMenu.find_by_name('initial_language_selection')
|
||||||
assert menu_resp.get('name') == ussd_menu.get('name')
|
assert menu_resp.get('name') == ussd_menu.get('name')
|
||||||
identifier = bytes.fromhex(strip_0x(pending_account.blockchain_address))
|
identifier = bytes.fromhex(strip_0x(pending_account.blockchain_address))
|
||||||
key = cache_data_key(identifier, ':cic.preferences')
|
key = cache_data_key(identifier, MetadataPointer.PREFERENCES)
|
||||||
cache_data(key, json.dumps(preferences))
|
cache_data(key, json.dumps(preferences))
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
menu_resp = handle_menu(pending_account, init_database)
|
menu_resp = handle_menu(pending_account, init_database)
|
||||||
|
|||||||
@@ -1,20 +1,18 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import json
|
import json
|
||||||
from decimal import Decimal
|
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
import pytest
|
import pytest
|
||||||
import requests_mock
|
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.statement import generate, filter_statement_transactions
|
from cic_ussd.account.statement import filter_statement_transactions
|
||||||
from cic_ussd.account.transaction import transaction_actors
|
from cic_ussd.account.transaction import transaction_actors
|
||||||
from cic_ussd.cache import cache_data_key, get_cached_data
|
from cic_ussd.cache import cache_data_key, get_cached_data
|
||||||
from cic_ussd.db.models.account import Account
|
from cic_ussd.db.models.account import Account
|
||||||
from cic_ussd.error import AccountCreationDataNotFound
|
from cic_ussd.error import AccountCreationDataNotFound
|
||||||
from cic_ussd.metadata import PreferencesMetadata
|
|
||||||
|
|
||||||
|
|
||||||
# test imports
|
# test imports
|
||||||
@@ -89,7 +87,7 @@ def test_balances_callback(activated_account, balances, celery_session_worker):
|
|||||||
[balances, activated_account.blockchain_address, status_code])
|
[balances, activated_account.blockchain_address, status_code])
|
||||||
s_balances_callback.apply_async().get()
|
s_balances_callback.apply_async().get()
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
||||||
key = cache_data_key(identifier, ':cic.balances')
|
key = cache_data_key(identifier, MetadataPointer.BALANCES)
|
||||||
cached_balances = get_cached_data(key)
|
cached_balances = get_cached_data(key)
|
||||||
cached_balances = json.loads(cached_balances)
|
cached_balances = json.loads(cached_balances)
|
||||||
assert cached_balances == balances[0]
|
assert cached_balances == balances[0]
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# standard imports
|
# standard imports
|
||||||
import json
|
import json
|
||||||
import os
|
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
import requests_mock
|
import requests_mock
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.cache import cache_data_key, get_cached_data
|
from cic_ussd.cache import cache_data_key, get_cached_data
|
||||||
@@ -27,7 +27,7 @@ def test_query_person_metadata(activated_account,
|
|||||||
s_query_person_metadata = celery.signature(
|
s_query_person_metadata = celery.signature(
|
||||||
'cic_ussd.tasks.metadata.query_person_metadata', [activated_account.blockchain_address])
|
'cic_ussd.tasks.metadata.query_person_metadata', [activated_account.blockchain_address])
|
||||||
s_query_person_metadata.apply().get()
|
s_query_person_metadata.apply().get()
|
||||||
key = cache_data_key(identifier, ':cic.person')
|
key = cache_data_key(identifier, MetadataPointer.PERSON)
|
||||||
cached_person_metadata = get_cached_data(key)
|
cached_person_metadata = get_cached_data(key)
|
||||||
cached_person_metadata = json.loads(cached_person_metadata)
|
cached_person_metadata = json.loads(cached_person_metadata)
|
||||||
assert cached_person_metadata == person_metadata
|
assert cached_person_metadata == person_metadata
|
||||||
@@ -46,7 +46,7 @@ def test_query_preferences_metadata(activated_account,
|
|||||||
query_preferences_metadata = celery.signature(
|
query_preferences_metadata = celery.signature(
|
||||||
'cic_ussd.tasks.metadata.query_preferences_metadata', [activated_account.blockchain_address])
|
'cic_ussd.tasks.metadata.query_preferences_metadata', [activated_account.blockchain_address])
|
||||||
query_preferences_metadata.apply().get()
|
query_preferences_metadata.apply().get()
|
||||||
key = cache_data_key(identifier, ':cic.preferences')
|
key = cache_data_key(identifier, MetadataPointer.PREFERENCES)
|
||||||
cached_preferences_metadata = get_cached_data(key)
|
cached_preferences_metadata = get_cached_data(key)
|
||||||
cached_preferences_metadata = json.loads(cached_preferences_metadata)
|
cached_preferences_metadata = json.loads(cached_preferences_metadata)
|
||||||
assert cached_preferences_metadata == preferences
|
assert cached_preferences_metadata == preferences
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import json
|
|||||||
# external imports
|
# external imports
|
||||||
import celery
|
import celery
|
||||||
from chainlib.hash import strip_0x
|
from chainlib.hash import strip_0x
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.transaction import transaction_actors
|
from cic_ussd.account.transaction import transaction_actors
|
||||||
@@ -38,7 +39,7 @@ def test_cache_statement(activated_account,
|
|||||||
transaction_result):
|
transaction_result):
|
||||||
recipient_transaction, sender_transaction = transaction_actors(transaction_result)
|
recipient_transaction, sender_transaction = transaction_actors(transaction_result)
|
||||||
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
identifier = bytes.fromhex(strip_0x(activated_account.blockchain_address))
|
||||||
key = cache_data_key(identifier, ':cic.statement')
|
key = cache_data_key(identifier, MetadataPointer.STATEMENT)
|
||||||
cached_statement = get_cached_data(key)
|
cached_statement = get_cached_data(key)
|
||||||
assert cached_statement is None
|
assert cached_statement is None
|
||||||
s_parse_transaction = celery.signature(
|
s_parse_transaction = celery.signature(
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import hashlib
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
# external imports
|
# external imports
|
||||||
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.cache import cache_data, cache_data_key, get_cached_data
|
from cic_ussd.cache import cache_data, cache_data_key, get_cached_data
|
||||||
@@ -12,7 +13,7 @@ from cic_ussd.cache import cache_data, cache_data_key, get_cached_data
|
|||||||
|
|
||||||
def test_cache_data(init_cache):
|
def test_cache_data(init_cache):
|
||||||
identifier = 'some_key'.encode()
|
identifier = 'some_key'.encode()
|
||||||
key = cache_data_key(identifier, ':testing')
|
key = cache_data_key(identifier, MetadataPointer.PERSON)
|
||||||
assert get_cached_data(key) is None
|
assert get_cached_data(key) is None
|
||||||
cache_data(key, json.dumps('some_value'))
|
cache_data(key, json.dumps('some_value'))
|
||||||
assert get_cached_data(key) is not None
|
assert get_cached_data(key) is not None
|
||||||
@@ -20,10 +21,10 @@ def test_cache_data(init_cache):
|
|||||||
|
|
||||||
def test_cache_data_key():
|
def test_cache_data_key():
|
||||||
identifier = 'some_key'.encode()
|
identifier = 'some_key'.encode()
|
||||||
key = cache_data_key(identifier, ':testing')
|
key = cache_data_key(identifier, MetadataPointer.PERSON)
|
||||||
hash_object = hashlib.new("sha256")
|
hash_object = hashlib.new("sha256")
|
||||||
hash_object.update(identifier)
|
hash_object.update(identifier)
|
||||||
hash_object.update(':testing'.encode(encoding="utf-8"))
|
hash_object.update(':cic.person'.encode(encoding="utf-8"))
|
||||||
assert hash_object.digest().hex() == key
|
assert hash_object.digest().hex() == key
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
12
apps/cic-ussd/tests/fixtures/account.py
vendored
12
apps/cic-ussd/tests/fixtures/account.py
vendored
@@ -4,7 +4,7 @@ import random
|
|||||||
|
|
||||||
# external accounts
|
# external accounts
|
||||||
import pytest
|
import pytest
|
||||||
from chainlib.hash import strip_0x
|
from cic_types.condiments import MetadataPointer
|
||||||
|
|
||||||
# local imports
|
# local imports
|
||||||
from cic_ussd.account.chain import Chain
|
from cic_ussd.account.chain import Chain
|
||||||
@@ -56,7 +56,7 @@ def cache_account_creation_data(init_cache, account_creation_data):
|
|||||||
def cache_balances(activated_account, balances, init_cache):
|
def cache_balances(activated_account, balances, init_cache):
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
balances = json.dumps(balances[0])
|
balances = json.dumps(balances[0])
|
||||||
key = cache_data_key(identifier, ':cic.balances')
|
key = cache_data_key(identifier, MetadataPointer.BALANCES)
|
||||||
cache_data(key, balances)
|
cache_data(key, balances)
|
||||||
|
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ def cache_balances(activated_account, balances, init_cache):
|
|||||||
def cache_default_token_data(default_token_data, init_cache, load_chain_spec):
|
def cache_default_token_data(default_token_data, init_cache, load_chain_spec):
|
||||||
chain_str = Chain.spec.__str__()
|
chain_str = Chain.spec.__str__()
|
||||||
data = json.dumps(default_token_data)
|
data = json.dumps(default_token_data)
|
||||||
key = cache_data_key(chain_str.encode('utf-8'), ':cic.default_token_data')
|
key = cache_data_key(chain_str.encode('utf-8'), MetadataPointer.TOKEN_DEFAULT)
|
||||||
cache_data(key, data)
|
cache_data(key, data)
|
||||||
|
|
||||||
|
|
||||||
@@ -72,7 +72,7 @@ def cache_default_token_data(default_token_data, init_cache, load_chain_spec):
|
|||||||
def cache_person_metadata(activated_account, init_cache, person_metadata):
|
def cache_person_metadata(activated_account, init_cache, person_metadata):
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
person = json.dumps(person_metadata)
|
person = json.dumps(person_metadata)
|
||||||
key = cache_data_key(identifier, ':cic.person')
|
key = cache_data_key(identifier, MetadataPointer.PERSON)
|
||||||
cache_data(key, person)
|
cache_data(key, person)
|
||||||
|
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ def cache_person_metadata(activated_account, init_cache, person_metadata):
|
|||||||
def cache_preferences(activated_account, init_cache, preferences):
|
def cache_preferences(activated_account, init_cache, preferences):
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
preferences = json.dumps(preferences)
|
preferences = json.dumps(preferences)
|
||||||
key = cache_data_key(identifier, ':cic.preferences')
|
key = cache_data_key(identifier, MetadataPointer.PREFERENCES)
|
||||||
cache_data(key, preferences)
|
cache_data(key, preferences)
|
||||||
|
|
||||||
|
|
||||||
@@ -88,7 +88,7 @@ def cache_preferences(activated_account, init_cache, preferences):
|
|||||||
def cache_statement(activated_account, init_cache, statement):
|
def cache_statement(activated_account, init_cache, statement):
|
||||||
identifier = bytes.fromhex(activated_account.blockchain_address)
|
identifier = bytes.fromhex(activated_account.blockchain_address)
|
||||||
statement = json.dumps(statement)
|
statement = json.dumps(statement)
|
||||||
key = cache_data_key(identifier, ':cic.statement')
|
key = cache_data_key(identifier, MetadataPointer.STATEMENT)
|
||||||
cache_data(key, statement)
|
cache_data(key, statement)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user