diff --git a/README.md b/README.md index 82f0727..0137b30 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ > CIC Chain Events -Filters live (and past) transactions on Celo and emits relevant transfer events to a NATS JetStream sink for further processing/indexing. +Filters live (and past) transactions on Celo and emits relevant events to a NATS JetStream sink for further processing/indexing. ## Prerequisites @@ -29,7 +29,7 @@ The base config is described in `config.toml`. Values can be overriden with env ### 3. Start the service -#### Compiling binary +**Compiling the binary** Run `make build` or download pre-compiled binaries from the [releases](https://github.com/grassrootseconomics/cic-chain-events/releases) page. @@ -41,7 +41,7 @@ Optional flags: - `-debug` - Enable/disable debug level logs - `-queries` - `queries.sql` file path -#### Docker +**Docker** To pull the pre-built docker image: diff --git a/config.toml b/config.toml index c8dcbf5..caa2e08 100644 --- a/config.toml +++ b/config.toml @@ -1,5 +1,6 @@ [metrics] # Exposes Prometheus metrics +# /metrics endpoint go_process = true # API server @@ -13,25 +14,24 @@ graphql_endpoint = "https://rpc.celo.grassecon.net/graphql" ws_endpoint = "wss://socket.celo.grassecon.net" [syncer] -# Number of goroutines assigned to the worker pool +# Number of goroutines assigned to the janitor worker pool janitor_concurrency = 5 -# Max idle time after which goroutine is returned back to the pool -idle_worker_timeout = 1 # Syncer start block initial_lower_bound = 17269000 # Max blocks in worker queue awaiting processing janitor_queue_size = 500 -# Janitor sweep interval, should take into account concurrency and queue_size +# Janitor sweep interval janitor_sweep_interval = 5 [postgres] +# Default is the Docker container DSN dsn = "postgres://postgres:postgres@localhost:5432/cic_chain_events" # https://docs.nats.io/ [jetstream] endpoint = "nats://localhost:4222" stream_name = "CHAIN" -# Duration JetStream should keep the message before GC +# Duration JetStream should keep the message before remocing it from the persistent store persist_duration_hours = 48 # Duration to ignore duplicate transactions (e.g. due to restart) dedup_duration_hours = 6 @@ -41,7 +41,3 @@ stream_subjects = [ "CHAIN.transferFrom", "CHAIN.mintTo" ] - -# 77G - snapshot -# 111G - decompressed -# 112G - latest \ No newline at end of file diff --git a/docs/functionality.md b/docs/functionality.md index d01b746..b7f2ad2 100644 --- a/docs/functionality.md +++ b/docs/functionality.md @@ -10,15 +10,15 @@ The existing implementation demo's tracking Celo stables transfer events and giv ### Head syncer -The head syncer processes newely produced blocks independently by connection to the geth websocket endpoint. +The head syncer processes newely produced blocks independently by connecting to the geth websocket endpoint. ### Janitor -The janitor syncer checks for missing (blocks) gaps in the commited block sequence and queues them for processing. It can also function as a historical syncer too process older blocks. +The janitor syncer checks for missing (blocks) gaps in the commited block sequence and queues them for processing. It can also function as a historical syncer to process older blocks. With the default `config.toml`, The janitor can process around 950-1000 blocks/min. -_Ordering_ +**Ordering** Missed/historical blocks are not guaranteed to be processed in order, however a low concurrency setting would somewhat give an "in-order" behaviour (not to be relied upon in any case). @@ -28,7 +28,7 @@ The default GraphQL block fetcher is the recommended fetcher. An experimental RP ## Pipeline -The pipeline fetches a whole block with its full transaction and receipt objects, executes all loaded filters serially and finally commits the block value to the db. Blocks are processed atomically by the pipeline; a failure in one of the filters will trigger the janitor to re-queue the block and process the block again. +The pipeline fetches a whole block with its full transaction and receipt objects, executes all loaded filters serially and finally commits the block number to the db. Blocks are processed atomically by the pipeline; a failure in one of the filters will trigger the janitor to re-queue the block and process the block again. ## Store