2025-01-02 10:05:09 +01:00
|
|
|
package syncer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"log/slog"
|
|
|
|
"time"
|
|
|
|
|
2025-01-03 08:35:45 +01:00
|
|
|
"git.grassecon.net/urdt/ussd-data-connect/internal/pub"
|
2025-01-02 10:05:09 +01:00
|
|
|
"git.grassecon.net/urdt/ussd-data-connect/internal/store"
|
|
|
|
"git.grassecon.net/urdt/ussd-data-connect/pkg/data"
|
2025-01-03 08:35:45 +01:00
|
|
|
"git.grassecon.net/urdt/ussd-data-connect/pkg/event"
|
2025-01-02 10:05:09 +01:00
|
|
|
"github.com/georgysavva/scany/v2/pgxscan"
|
|
|
|
"github.com/jackc/pgx/v5"
|
|
|
|
)
|
|
|
|
|
2025-01-03 08:35:45 +01:00
|
|
|
const syncInterval = time.Second * 5
|
2025-01-02 10:05:09 +01:00
|
|
|
|
|
|
|
type (
|
|
|
|
KVRow struct {
|
|
|
|
Key []byte `db:"key"`
|
|
|
|
Value []byte `db:"value"`
|
|
|
|
Updated time.Time `db:"updated"`
|
|
|
|
}
|
|
|
|
|
|
|
|
SyncerOpts struct {
|
2025-01-03 08:35:45 +01:00
|
|
|
Pub pub.Pub
|
2025-01-02 10:05:09 +01:00
|
|
|
Logg *slog.Logger
|
|
|
|
Store *store.Store
|
|
|
|
}
|
|
|
|
|
|
|
|
Syncer struct {
|
|
|
|
interval time.Duration
|
|
|
|
done chan struct{}
|
2025-01-03 08:35:45 +01:00
|
|
|
pub pub.Pub
|
|
|
|
logg *slog.Logger
|
2025-01-02 10:05:09 +01:00
|
|
|
store *store.Store
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func New(o SyncerOpts) *Syncer {
|
|
|
|
return &Syncer{
|
|
|
|
done: make(chan struct{}),
|
|
|
|
interval: syncInterval,
|
2025-01-03 08:35:45 +01:00
|
|
|
pub: o.Pub,
|
2025-01-02 10:05:09 +01:00
|
|
|
logg: o.Logg,
|
|
|
|
store: o.Store,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Syncer) Run() {
|
|
|
|
ticker := time.NewTicker(s.interval)
|
|
|
|
s.logg.Info("syncer ticker started")
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.done:
|
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
s.logg.Debug("syncer tick")
|
2025-01-03 08:35:45 +01:00
|
|
|
if err := s.Process(context.Background()); err != nil {
|
2025-01-02 10:05:09 +01:00
|
|
|
s.logg.Error("failed to process sync tick", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Syncer) Stop() {
|
|
|
|
close(s.done)
|
|
|
|
}
|
|
|
|
|
2025-01-03 08:35:45 +01:00
|
|
|
func (s *Syncer) Process(ctx context.Context) error {
|
2025-01-02 10:05:09 +01:00
|
|
|
return s.store.ExecuteTransaction(ctx, func(tx pgx.Tx) error {
|
|
|
|
rows, err := tx.Query(ctx, s.store.Queries.ExtractEntries)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
rs := pgxscan.NewRowScanner(rows)
|
|
|
|
var batchTimestamp *time.Time
|
|
|
|
for rows.Next() {
|
|
|
|
var row KVRow
|
|
|
|
if err := rs.Scan(&row); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2025-01-03 08:35:45 +01:00
|
|
|
decodedKeyDataType, sessionID := data.DecodeKey(row.Key)
|
2025-01-02 10:05:09 +01:00
|
|
|
if _, ok := data.ValidDataTypeLookup[decodedKeyDataType]; ok {
|
2025-01-03 08:35:45 +01:00
|
|
|
if batchTimestamp == nil {
|
|
|
|
batchTimestamp = &row.Updated
|
|
|
|
}
|
|
|
|
decodedValue := data.DecodeValue(row.Value)
|
|
|
|
|
2025-01-02 10:05:09 +01:00
|
|
|
s.logg.Debug("processing row", "batch_timestamp", batchTimestamp, "key_type", decodedKeyDataType, "session_id", sessionID, "value", decodedValue, "timestamp", row.Updated)
|
2025-01-03 08:35:45 +01:00
|
|
|
if err := s.pub.Send(ctx, event.Event{
|
|
|
|
Timestamp: row.Updated.Unix(),
|
|
|
|
Type: decodedKeyDataType,
|
|
|
|
Value: decodedValue,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2025-01-02 10:05:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if batchTimestamp != nil {
|
|
|
|
_, err = tx.Exec(ctx, s.store.Queries.UpdateCursor, batchTimestamp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|