105 lines
2.1 KiB
Go
105 lines
2.1 KiB
Go
|
package syncer
|
||
|
|
||
|
import (
|
||
|
"context"
|
||
|
"log/slog"
|
||
|
"time"
|
||
|
|
||
|
"git.grassecon.net/urdt/ussd-data-connect/internal/store"
|
||
|
"git.grassecon.net/urdt/ussd-data-connect/pkg/data"
|
||
|
"github.com/georgysavva/scany/v2/pgxscan"
|
||
|
"github.com/jackc/pgx/v5"
|
||
|
)
|
||
|
|
||
|
const syncInterval = time.Second * 10
|
||
|
|
||
|
type (
|
||
|
KVRow struct {
|
||
|
Key []byte `db:"key"`
|
||
|
Value []byte `db:"value"`
|
||
|
Updated time.Time `db:"updated"`
|
||
|
}
|
||
|
|
||
|
SyncerOpts struct {
|
||
|
Logg *slog.Logger
|
||
|
Store *store.Store
|
||
|
}
|
||
|
|
||
|
Syncer struct {
|
||
|
logg *slog.Logger
|
||
|
interval time.Duration
|
||
|
done chan struct{}
|
||
|
store *store.Store
|
||
|
}
|
||
|
)
|
||
|
|
||
|
func New(o SyncerOpts) *Syncer {
|
||
|
return &Syncer{
|
||
|
done: make(chan struct{}),
|
||
|
interval: syncInterval,
|
||
|
logg: o.Logg,
|
||
|
store: o.Store,
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (s *Syncer) Run() {
|
||
|
ticker := time.NewTicker(s.interval)
|
||
|
s.logg.Info("syncer ticker started")
|
||
|
for {
|
||
|
select {
|
||
|
case <-s.done:
|
||
|
ticker.Stop()
|
||
|
return
|
||
|
case <-ticker.C:
|
||
|
s.logg.Debug("syncer tick")
|
||
|
if err := s.process(context.Background()); err != nil {
|
||
|
s.logg.Error("failed to process sync tick", "error", err)
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (s *Syncer) Stop() {
|
||
|
close(s.done)
|
||
|
}
|
||
|
|
||
|
func (s *Syncer) process(ctx context.Context) error {
|
||
|
return s.store.ExecuteTransaction(ctx, func(tx pgx.Tx) error {
|
||
|
rows, err := tx.Query(ctx, s.store.Queries.ExtractEntries)
|
||
|
if err != nil {
|
||
|
return err
|
||
|
}
|
||
|
defer rows.Close()
|
||
|
|
||
|
rs := pgxscan.NewRowScanner(rows)
|
||
|
var batchTimestamp *time.Time
|
||
|
for rows.Next() {
|
||
|
var row KVRow
|
||
|
if err := rs.Scan(&row); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
if batchTimestamp == nil {
|
||
|
batchTimestamp = &row.Updated
|
||
|
}
|
||
|
decodedKeyDataType, sessionID := data.DecodeKey(row.Key)
|
||
|
decodedValue := data.DecodeValue(row.Value)
|
||
|
|
||
|
if _, ok := data.ValidDataTypeLookup[decodedKeyDataType]; ok {
|
||
|
s.logg.Debug("processing row", "batch_timestamp", batchTimestamp, "key_type", decodedKeyDataType, "session_id", sessionID, "value", decodedValue, "timestamp", row.Updated)
|
||
|
}
|
||
|
}
|
||
|
if err := rows.Err(); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
|
||
|
if batchTimestamp != nil {
|
||
|
_, err = tx.Exec(ctx, s.store.Queries.UpdateCursor, batchTimestamp)
|
||
|
if err != nil {
|
||
|
return err
|
||
|
}
|
||
|
}
|
||
|
|
||
|
return nil
|
||
|
})
|
||
|
}
|