cic-chain-events/cmd/main.go

143 lines
3.3 KiB
Go
Raw Normal View History

2023-01-05 12:45:09 +01:00
package main
import (
"context"
"flag"
"os"
"os/signal"
"strings"
2023-01-05 12:45:09 +01:00
"sync"
"syscall"
"time"
"github.com/grassrootseconomics/cic-chain-events/internal/filter"
2023-01-05 12:45:09 +01:00
"github.com/grassrootseconomics/cic-chain-events/internal/pipeline"
"github.com/grassrootseconomics/cic-chain-events/internal/pool"
2023-01-05 12:45:09 +01:00
"github.com/grassrootseconomics/cic-chain-events/internal/syncer"
"github.com/knadh/goyesql/v2"
"github.com/knadh/koanf"
"github.com/zerodha/logf"
)
var (
confFlag string
debugFlag bool
queriesFlag string
ko *koanf.Koanf
lo logf.Logger
q goyesql.Queries
)
func init() {
flag.StringVar(&confFlag, "config", "config.toml", "Config file location")
flag.BoolVar(&debugFlag, "log", true, "Enable debug logging")
flag.StringVar(&queriesFlag, "queries", "queries.sql", "Queries file location")
flag.Parse()
lo = initLogger(debugFlag)
ko = initConfig(confFlag)
q = initQueries(queriesFlag)
}
func main() {
syncerStats := &syncer.Stats{}
wg := &sync.WaitGroup{}
apiServer := initApiServer()
2023-01-05 12:45:09 +01:00
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
janitorWorkerPool := pool.NewPool(ctx, pool.Opts{
Concurrency: ko.MustInt("syncer.janitor_concurrency"),
QueueSize: ko.MustInt("syncer.janitor_queue_size"),
})
2023-01-05 12:45:09 +01:00
pgStore, err := initPgStore()
if err != nil {
lo.Fatal("main: critical error loading pg store", "error", err)
2023-01-05 12:45:09 +01:00
}
jsCtx, err := initJetStream()
if err != nil {
lo.Fatal("main: critical error loading jetstream context", "error", err)
}
2023-01-05 12:45:09 +01:00
graphqlFetcher := initFetcher()
pipeline := pipeline.NewPipeline(pipeline.PipelineOpts{
BlockFetcher: graphqlFetcher,
Filters: []filter.Filter{
initAddressFilter(),
initGasGiftFilter(jsCtx),
initTransferFilter(jsCtx),
initRegisterFilter(jsCtx),
2023-01-05 12:45:09 +01:00
},
Logg: lo,
Store: pgStore,
})
headSyncerWorker := pool.NewPool(ctx, pool.Opts{
Concurrency: 1,
QueueSize: 1,
})
2023-01-05 12:45:09 +01:00
headSyncer, err := syncer.NewHeadSyncer(syncer.HeadSyncerOpts{
Logg: lo,
Pipeline: pipeline,
Pool: headSyncerWorker,
2023-01-05 12:45:09 +01:00
Stats: syncerStats,
WsEndpoint: ko.MustString("chain.ws_endpoint"),
})
if err != nil {
lo.Fatal("main: crticial error loading head syncer", "error", err)
2023-01-05 12:45:09 +01:00
}
janitor := syncer.NewJanitor(syncer.JanitorOpts{
BatchSize: uint64(ko.MustInt64("syncer.janitor_queue_size")),
2023-01-05 12:45:09 +01:00
Logg: lo,
Pipeline: pipeline,
Pool: janitorWorkerPool,
2023-01-05 12:45:09 +01:00
Stats: syncerStats,
Store: pgStore,
SweepInterval: time.Second * time.Duration(ko.MustInt64("syncer.janitor_sweep_interval")),
2023-01-05 12:45:09 +01:00
})
wg.Add(1)
go func() {
defer wg.Done()
if err := headSyncer.Start(ctx); err != nil {
lo.Fatal("main: critical error starting head syncer", "error", err)
2023-01-05 12:45:09 +01:00
}
}()
wg.Add(1)
go func() {
defer wg.Done()
if err := janitor.Start(ctx); err != nil {
lo.Fatal("main: critical error starting janitor", "error", err)
2023-01-05 12:45:09 +01:00
}
}()
wg.Add(1)
go func() {
defer wg.Done()
lo.Info("starting API server")
if err := apiServer.Start(ko.MustString("api.address")); err != nil {
if strings.Contains(err.Error(), "Server closed") {
lo.Info("main: shutting down server")
} else {
lo.Fatal("main: critical error shutting down server", "err", err)
2023-01-05 12:45:09 +01:00
}
}
}()
2023-01-05 12:45:09 +01:00
<-ctx.Done()
if err := apiServer.Shutdown(ctx); err != nil {
lo.Error("main: could not gracefully shutdown api server", "err", err)
}
wg.Wait()
2023-01-05 12:45:09 +01:00
}