mirror of
https://github.com/grassrootseconomics/cic-chain-events.git
synced 2024-11-22 07:46:46 +01:00
docs: fix minor mistakes
This commit is contained in:
parent
7923384328
commit
6df00ddfce
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
> CIC Chain Events
|
> CIC Chain Events
|
||||||
|
|
||||||
Filters live (and past) transactions on Celo and emits relevant transfer events to a NATS JetStream sink for further processing/indexing.
|
Filters live (and past) transactions on Celo and emits relevant events to a NATS JetStream sink for further processing/indexing.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@ -29,7 +29,7 @@ The base config is described in `config.toml`. Values can be overriden with env
|
|||||||
|
|
||||||
### 3. Start the service
|
### 3. Start the service
|
||||||
|
|
||||||
#### Compiling binary
|
**Compiling the binary**
|
||||||
|
|
||||||
Run `make build` or download pre-compiled binaries from the [releases](https://github.com/grassrootseconomics/cic-chain-events/releases) page.
|
Run `make build` or download pre-compiled binaries from the [releases](https://github.com/grassrootseconomics/cic-chain-events/releases) page.
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ Optional flags:
|
|||||||
- `-debug` - Enable/disable debug level logs
|
- `-debug` - Enable/disable debug level logs
|
||||||
- `-queries` - `queries.sql` file path
|
- `-queries` - `queries.sql` file path
|
||||||
|
|
||||||
#### Docker
|
**Docker**
|
||||||
|
|
||||||
To pull the pre-built docker image:
|
To pull the pre-built docker image:
|
||||||
|
|
||||||
|
14
config.toml
14
config.toml
@ -1,5 +1,6 @@
|
|||||||
[metrics]
|
[metrics]
|
||||||
# Exposes Prometheus metrics
|
# Exposes Prometheus metrics
|
||||||
|
# /metrics endpoint
|
||||||
go_process = true
|
go_process = true
|
||||||
|
|
||||||
# API server
|
# API server
|
||||||
@ -13,25 +14,24 @@ graphql_endpoint = "https://rpc.celo.grassecon.net/graphql"
|
|||||||
ws_endpoint = "wss://socket.celo.grassecon.net"
|
ws_endpoint = "wss://socket.celo.grassecon.net"
|
||||||
|
|
||||||
[syncer]
|
[syncer]
|
||||||
# Number of goroutines assigned to the worker pool
|
# Number of goroutines assigned to the janitor worker pool
|
||||||
janitor_concurrency = 5
|
janitor_concurrency = 5
|
||||||
# Max idle time after which goroutine is returned back to the pool
|
|
||||||
idle_worker_timeout = 1
|
|
||||||
# Syncer start block
|
# Syncer start block
|
||||||
initial_lower_bound = 17269000
|
initial_lower_bound = 17269000
|
||||||
# Max blocks in worker queue awaiting processing
|
# Max blocks in worker queue awaiting processing
|
||||||
janitor_queue_size = 500
|
janitor_queue_size = 500
|
||||||
# Janitor sweep interval, should take into account concurrency and queue_size
|
# Janitor sweep interval
|
||||||
janitor_sweep_interval = 5
|
janitor_sweep_interval = 5
|
||||||
|
|
||||||
[postgres]
|
[postgres]
|
||||||
|
# Default is the Docker container DSN
|
||||||
dsn = "postgres://postgres:postgres@localhost:5432/cic_chain_events"
|
dsn = "postgres://postgres:postgres@localhost:5432/cic_chain_events"
|
||||||
|
|
||||||
# https://docs.nats.io/
|
# https://docs.nats.io/
|
||||||
[jetstream]
|
[jetstream]
|
||||||
endpoint = "nats://localhost:4222"
|
endpoint = "nats://localhost:4222"
|
||||||
stream_name = "CHAIN"
|
stream_name = "CHAIN"
|
||||||
# Duration JetStream should keep the message before GC
|
# Duration JetStream should keep the message before remocing it from the persistent store
|
||||||
persist_duration_hours = 48
|
persist_duration_hours = 48
|
||||||
# Duration to ignore duplicate transactions (e.g. due to restart)
|
# Duration to ignore duplicate transactions (e.g. due to restart)
|
||||||
dedup_duration_hours = 6
|
dedup_duration_hours = 6
|
||||||
@ -41,7 +41,3 @@ stream_subjects = [
|
|||||||
"CHAIN.transferFrom",
|
"CHAIN.transferFrom",
|
||||||
"CHAIN.mintTo"
|
"CHAIN.mintTo"
|
||||||
]
|
]
|
||||||
|
|
||||||
# 77G - snapshot
|
|
||||||
# 111G - decompressed
|
|
||||||
# 112G - latest
|
|
@ -10,15 +10,15 @@ The existing implementation demo's tracking Celo stables transfer events and giv
|
|||||||
|
|
||||||
### Head syncer
|
### Head syncer
|
||||||
|
|
||||||
The head syncer processes newely produced blocks independently by connection to the geth websocket endpoint.
|
The head syncer processes newely produced blocks independently by connecting to the geth websocket endpoint.
|
||||||
|
|
||||||
### Janitor
|
### Janitor
|
||||||
|
|
||||||
The janitor syncer checks for missing (blocks) gaps in the commited block sequence and queues them for processing. It can also function as a historical syncer too process older blocks.
|
The janitor syncer checks for missing (blocks) gaps in the commited block sequence and queues them for processing. It can also function as a historical syncer to process older blocks.
|
||||||
|
|
||||||
With the default `config.toml`, The janitor can process around 950-1000 blocks/min.
|
With the default `config.toml`, The janitor can process around 950-1000 blocks/min.
|
||||||
|
|
||||||
_Ordering_
|
**Ordering**
|
||||||
|
|
||||||
Missed/historical blocks are not guaranteed to be processed in order, however a low concurrency setting would somewhat give an "in-order" behaviour (not to be relied upon in any case).
|
Missed/historical blocks are not guaranteed to be processed in order, however a low concurrency setting would somewhat give an "in-order" behaviour (not to be relied upon in any case).
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ The default GraphQL block fetcher is the recommended fetcher. An experimental RP
|
|||||||
|
|
||||||
## Pipeline
|
## Pipeline
|
||||||
|
|
||||||
The pipeline fetches a whole block with its full transaction and receipt objects, executes all loaded filters serially and finally commits the block value to the db. Blocks are processed atomically by the pipeline; a failure in one of the filters will trigger the janitor to re-queue the block and process the block again.
|
The pipeline fetches a whole block with its full transaction and receipt objects, executes all loaded filters serially and finally commits the block number to the db. Blocks are processed atomically by the pipeline; a failure in one of the filters will trigger the janitor to re-queue the block and process the block again.
|
||||||
|
|
||||||
## Store
|
## Store
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user