ussd-data-connect/internal/syncer/syncer.go

118 lines
2.5 KiB
Go
Raw Normal View History

2025-01-02 10:05:09 +01:00
package syncer
import (
"context"
"log/slog"
"time"
"git.grassecon.net/urdt/ussd-data-connect/internal/pub"
2025-01-02 10:05:09 +01:00
"git.grassecon.net/urdt/ussd-data-connect/internal/store"
"git.grassecon.net/urdt/ussd-data-connect/pkg/data"
"git.grassecon.net/urdt/ussd-data-connect/pkg/event"
2025-01-02 10:05:09 +01:00
"github.com/georgysavva/scany/v2/pgxscan"
"github.com/jackc/pgx/v5"
)
const syncInterval = time.Second * 5
2025-01-02 10:05:09 +01:00
type (
KVRow struct {
Key []byte `db:"key"`
Value []byte `db:"value"`
Updated time.Time `db:"updated"`
}
SyncerOpts struct {
Pub pub.Pub
2025-01-02 10:05:09 +01:00
Logg *slog.Logger
Store *store.Store
}
Syncer struct {
interval time.Duration
done chan struct{}
pub pub.Pub
logg *slog.Logger
2025-01-02 10:05:09 +01:00
store *store.Store
}
)
func New(o SyncerOpts) *Syncer {
return &Syncer{
done: make(chan struct{}),
interval: syncInterval,
pub: o.Pub,
2025-01-02 10:05:09 +01:00
logg: o.Logg,
store: o.Store,
}
}
func (s *Syncer) Run() {
ticker := time.NewTicker(s.interval)
s.logg.Info("syncer ticker started")
for {
select {
case <-s.done:
ticker.Stop()
return
case <-ticker.C:
s.logg.Debug("syncer tick")
if err := s.Process(context.Background()); err != nil {
2025-01-02 10:05:09 +01:00
s.logg.Error("failed to process sync tick", "error", err)
}
}
}
}
func (s *Syncer) Stop() {
close(s.done)
}
func (s *Syncer) Process(ctx context.Context) error {
2025-01-02 10:05:09 +01:00
return s.store.ExecuteTransaction(ctx, func(tx pgx.Tx) error {
rows, err := tx.Query(ctx, s.store.Queries.ExtractEntries)
if err != nil {
return err
}
defer rows.Close()
rs := pgxscan.NewRowScanner(rows)
var batchTimestamp *time.Time
for rows.Next() {
var row KVRow
if err := rs.Scan(&row); err != nil {
return err
}
decodedKeyDataType, sessionID := data.DecodeKey(row.Key)
2025-01-02 10:05:09 +01:00
if _, ok := data.ValidDataTypeLookup[decodedKeyDataType]; ok {
if batchTimestamp == nil {
batchTimestamp = &row.Updated
}
decodedValue := data.DecodeValue(row.Value)
2025-01-02 10:05:09 +01:00
s.logg.Debug("processing row", "batch_timestamp", batchTimestamp, "key_type", decodedKeyDataType, "session_id", sessionID, "value", decodedValue, "timestamp", row.Updated)
if err := s.pub.Send(ctx, event.Event{
Timestamp: row.Updated.Unix(),
Type: decodedKeyDataType,
Value: decodedValue,
}); err != nil {
return err
}
2025-01-02 10:05:09 +01:00
}
}
if err := rows.Err(); err != nil {
return err
}
if batchTimestamp != nil {
_, err = tx.Exec(ctx, s.store.Queries.UpdateCursor, batchTimestamp)
if err != nil {
return err
}
}
return nil
})
}