2023-01-05 12:45:09 +01:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"flag"
|
|
|
|
"os"
|
|
|
|
"os/signal"
|
2023-01-06 12:32:20 +01:00
|
|
|
"strings"
|
2023-01-05 12:45:09 +01:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2023-01-06 12:32:20 +01:00
|
|
|
"github.com/grassrootseconomics/cic-chain-events/internal/api"
|
2023-01-05 12:45:09 +01:00
|
|
|
"github.com/grassrootseconomics/cic-chain-events/internal/pipeline"
|
|
|
|
"github.com/grassrootseconomics/cic-chain-events/internal/syncer"
|
2023-01-14 10:20:16 +01:00
|
|
|
"github.com/grassrootseconomics/cic-chain-events/pkg/filter"
|
2023-01-05 12:45:09 +01:00
|
|
|
"github.com/knadh/goyesql/v2"
|
|
|
|
"github.com/knadh/koanf"
|
|
|
|
"github.com/zerodha/logf"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
confFlag string
|
|
|
|
debugFlag bool
|
|
|
|
queriesFlag string
|
|
|
|
|
|
|
|
ko *koanf.Koanf
|
|
|
|
lo logf.Logger
|
|
|
|
q goyesql.Queries
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
flag.StringVar(&confFlag, "config", "config.toml", "Config file location")
|
|
|
|
flag.BoolVar(&debugFlag, "log", true, "Enable debug logging")
|
|
|
|
flag.StringVar(&queriesFlag, "queries", "queries.sql", "Queries file location")
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
lo = initLogger(debugFlag)
|
|
|
|
ko = initConfig(confFlag)
|
|
|
|
q = initQueries(queriesFlag)
|
|
|
|
}
|
|
|
|
|
|
|
|
func main() {
|
2023-01-16 11:57:05 +01:00
|
|
|
// p := profiler.New(profiler.Conf{
|
|
|
|
// DirPath: "profiles",
|
|
|
|
// Quiet: true,
|
|
|
|
// NoShutdownHook: false,
|
|
|
|
// }, profiler.Cpu, profiler.Mem)
|
|
|
|
// p.Start()
|
|
|
|
|
2023-01-05 12:45:09 +01:00
|
|
|
syncerStats := &syncer.Stats{}
|
|
|
|
wg := &sync.WaitGroup{}
|
2023-01-06 12:32:20 +01:00
|
|
|
apiServer := initApiServer()
|
2023-01-05 12:45:09 +01:00
|
|
|
|
|
|
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
|
|
|
defer stop()
|
|
|
|
|
2023-01-11 09:13:59 +01:00
|
|
|
workerPool := initWorkerPool(ctx)
|
|
|
|
|
2023-01-05 12:45:09 +01:00
|
|
|
pgStore, err := initPgStore()
|
|
|
|
if err != nil {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Fatal("main: critical error loading pg store", "error", err)
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|
2023-01-11 09:13:59 +01:00
|
|
|
|
2023-01-05 12:45:09 +01:00
|
|
|
graphqlFetcher := initFetcher()
|
|
|
|
|
|
|
|
pipeline := pipeline.NewPipeline(pipeline.PipelineOpts{
|
|
|
|
BlockFetcher: graphqlFetcher,
|
|
|
|
Filters: []filter.Filter{
|
2023-01-06 12:32:20 +01:00
|
|
|
initAddressFilter(),
|
2023-01-14 10:12:34 +01:00
|
|
|
initDecodeFilter(),
|
2023-01-05 12:45:09 +01:00
|
|
|
},
|
|
|
|
Logg: lo,
|
|
|
|
Store: pgStore,
|
|
|
|
})
|
|
|
|
|
|
|
|
headSyncer, err := syncer.NewHeadSyncer(syncer.HeadSyncerOpts{
|
|
|
|
Logg: lo,
|
|
|
|
Pipeline: pipeline,
|
|
|
|
Pool: workerPool,
|
|
|
|
Stats: syncerStats,
|
|
|
|
WsEndpoint: ko.MustString("chain.ws_endpoint"),
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Fatal("main: crticial error loading head syncer", "error", err)
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
janitor := syncer.NewJanitor(syncer.JanitorOpts{
|
2023-01-11 09:13:59 +01:00
|
|
|
BatchSize: uint64(ko.MustInt64("syncer.batch_size")),
|
|
|
|
HeadBlockLag: uint64(ko.MustInt64("syncer.head_block_lag")),
|
2023-01-05 12:45:09 +01:00
|
|
|
Logg: lo,
|
|
|
|
Pipeline: pipeline,
|
|
|
|
Pool: workerPool,
|
|
|
|
Stats: syncerStats,
|
|
|
|
Store: pgStore,
|
2023-01-11 09:13:59 +01:00
|
|
|
SweepInterval: time.Second * time.Duration(ko.MustInt64("syncer.sweep_interval")),
|
2023-01-05 12:45:09 +01:00
|
|
|
})
|
|
|
|
|
2023-01-11 09:42:00 +01:00
|
|
|
apiServer.GET("/stats", api.StatsHandler(syncerStats, workerPool))
|
2023-01-06 12:32:20 +01:00
|
|
|
|
2023-01-05 12:45:09 +01:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := headSyncer.Start(ctx); err != nil {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Fatal("main: critical error starting head syncer", "error", err)
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := janitor.Start(ctx); err != nil {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Fatal("main: critical error starting janitor", "error", err)
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-01-06 12:32:20 +01:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
lo.Info("starting API server")
|
|
|
|
if err := apiServer.Start(ko.MustString("api.address")); err != nil {
|
|
|
|
if strings.Contains(err.Error(), "Server closed") {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Info("main: shutting down server")
|
2023-01-06 12:32:20 +01:00
|
|
|
} else {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Fatal("main: critical error shutting down server", "err", err)
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|
2023-01-06 12:32:20 +01:00
|
|
|
}
|
|
|
|
}()
|
2023-01-05 12:45:09 +01:00
|
|
|
|
|
|
|
<-ctx.Done()
|
|
|
|
|
|
|
|
workerPool.Stop()
|
2023-01-11 09:13:59 +01:00
|
|
|
|
2023-01-06 12:32:20 +01:00
|
|
|
if err := apiServer.Shutdown(ctx); err != nil {
|
2023-01-11 09:13:59 +01:00
|
|
|
lo.Error("main: could not gracefully shutdown api server", "err", err)
|
2023-01-06 12:32:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
2023-01-16 11:57:05 +01:00
|
|
|
// p.Stop()
|
2023-01-05 12:45:09 +01:00
|
|
|
}
|