cic-chain-events/cmd/main.go
Mohamed Sohail 20fc30c34a
feat: RPC block fetcher (#15)
* feat: init base logic for rpc fetcher

* feat: rpc block fetcher, move filters to internal

* move filters to internal folder
* rpc block fetcher
* add benchmarks:

goos: linux
goarch: amd64
pkg: github.com/grassrootseconomics/cic-chain-events/pkg/fetch
cpu: AMD EPYC Processor
Benchmark_RPC
Benchmark_RPC/RPC_Block_Fetcher_Benchmark
Benchmark_RPC/RPC_Block_Fetcher_Benchmark-4                   25          46000646 ns/op          221697 B/op        844 allocs/op
Benchmark_GraphQL
Benchmark_GraphQL/GraphQL_Block_Fetcher_Benchmark
Benchmark_GraphQL/GraphQL_Block_Fetcher_Benchmark-4           56          21219962 ns/op           56686 B/op         94 allocs/op
PASS
ok      github.com/grassrootseconomics/cic-chain-events/pkg/fetch       2.920s

* inline-docs: Describe RPC fetcher
2023-01-19 11:42:59 +03:00

136 lines
3.2 KiB
Go

package main
import (
"context"
"flag"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/grassrootseconomics/cic-chain-events/internal/pipeline"
"github.com/grassrootseconomics/cic-chain-events/internal/pool"
"github.com/grassrootseconomics/cic-chain-events/internal/syncer"
"github.com/grassrootseconomics/cic-chain-events/internal/filter"
"github.com/knadh/goyesql/v2"
"github.com/knadh/koanf"
"github.com/zerodha/logf"
)
var (
confFlag string
debugFlag bool
queriesFlag string
ko *koanf.Koanf
lo logf.Logger
q goyesql.Queries
)
func init() {
flag.StringVar(&confFlag, "config", "config.toml", "Config file location")
flag.BoolVar(&debugFlag, "log", true, "Enable debug logging")
flag.StringVar(&queriesFlag, "queries", "queries.sql", "Queries file location")
flag.Parse()
lo = initLogger(debugFlag)
ko = initConfig(confFlag)
q = initQueries(queriesFlag)
}
func main() {
syncerStats := &syncer.Stats{}
wg := &sync.WaitGroup{}
apiServer := initApiServer()
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
janitorWorkerPool := pool.NewPool(ctx, pool.Opts{
Concurrency: ko.MustInt("syncer.janitor_concurrency"),
QueueSize: ko.MustInt("syncer.janitor_queue_size"),
})
pgStore, err := initPgStore()
if err != nil {
lo.Fatal("main: critical error loading pg store", "error", err)
}
graphqlFetcher := initFetcher()
pipeline := pipeline.NewPipeline(pipeline.PipelineOpts{
BlockFetcher: graphqlFetcher,
Filters: []filter.Filter{
initAddressFilter(),
initDecodeFilter(),
},
Logg: lo,
Store: pgStore,
})
headSyncerWorker := pool.NewPool(ctx, pool.Opts{
Concurrency: 1,
QueueSize: 1,
})
headSyncer, err := syncer.NewHeadSyncer(syncer.HeadSyncerOpts{
Logg: lo,
Pipeline: pipeline,
Pool: headSyncerWorker,
Stats: syncerStats,
WsEndpoint: ko.MustString("chain.ws_endpoint"),
})
if err != nil {
lo.Fatal("main: crticial error loading head syncer", "error", err)
}
janitor := syncer.NewJanitor(syncer.JanitorOpts{
BatchSize: uint64(ko.MustInt64("syncer.janitor_queue_size")),
Logg: lo,
Pipeline: pipeline,
Pool: janitorWorkerPool,
Stats: syncerStats,
Store: pgStore,
SweepInterval: time.Second * time.Duration(ko.MustInt64("syncer.janitor_sweep_interval")),
})
wg.Add(1)
go func() {
defer wg.Done()
if err := headSyncer.Start(ctx); err != nil {
lo.Fatal("main: critical error starting head syncer", "error", err)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
if err := janitor.Start(ctx); err != nil {
lo.Fatal("main: critical error starting janitor", "error", err)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
lo.Info("starting API server")
if err := apiServer.Start(ko.MustString("api.address")); err != nil {
if strings.Contains(err.Error(), "Server closed") {
lo.Info("main: shutting down server")
} else {
lo.Fatal("main: critical error shutting down server", "err", err)
}
}
}()
<-ctx.Done()
if err := apiServer.Shutdown(ctx); err != nil {
lo.Error("main: could not gracefully shutdown api server", "err", err)
}
wg.Wait()
}