fix: mint and burn func signatures, adjustable batch size for pool and backfill buffer

This commit is contained in:
Mohamed Sohail 2024-09-05 14:24:32 +03:00
parent 591518bbb5
commit 22ffc224ca
Signed by: kamikazechaser
GPG Key ID: 7DD45520C01CD85D
7 changed files with 32 additions and 29 deletions

View File

@ -108,6 +108,7 @@ func main() {
poolOpts := pool.PoolOpts{ poolOpts := pool.PoolOpts{
Logg: lo, Logg: lo,
WorkerCount: ko.Int("core.pool_size"), WorkerCount: ko.Int("core.pool_size"),
BatchSize: ko.MustInt("core.batch_size"),
Processor: blockProcessor, Processor: blockProcessor,
} }
if ko.Int("core.pool_size") <= 0 { if ko.Int("core.pool_size") <= 0 {
@ -136,9 +137,10 @@ func main() {
} }
backfill := backfill.New(backfill.BackfillOpts{ backfill := backfill.New(backfill.BackfillOpts{
DB: db, BatchSize: ko.MustInt("core.batch_size"),
Logg: lo, DB: db,
Pool: workerPool, Logg: lo,
Pool: workerPool,
}) })
apiServer := &http.Server{ apiServer := &http.Server{

View File

@ -11,6 +11,7 @@ db_type = "bolt"
# Defaults to (nproc * 3) # Defaults to (nproc * 3)
pool_size = 0 pool_size = 0
# If you are using an archive node, set this to true # If you are using an archive node, set this to true
batch_size = 100
[redis] [redis]
dsn = "127.0.0.1:6379" dsn = "127.0.0.1:6379"

View File

@ -11,34 +11,35 @@ import (
type ( type (
BackfillOpts struct { BackfillOpts struct {
DB db.DB BatchSize int
Logg *slog.Logger DB db.DB
Pool *pool.Pool Logg *slog.Logger
Pool *pool.Pool
} }
Backfill struct { Backfill struct {
db db.DB batchSize int
logg *slog.Logger db db.DB
pool *pool.Pool logg *slog.Logger
stopCh chan struct{} pool *pool.Pool
ticker *time.Ticker stopCh chan struct{}
ticker *time.Ticker
} }
) )
const ( const (
idleCheckInterval = 60 * time.Second idleCheckInterval = 60 * time.Second
busyCheckInterval = 1 * time.Second busyCheckInterval = 1 * time.Second
maxPoolSizePush = 100
) )
func New(o BackfillOpts) *Backfill { func New(o BackfillOpts) *Backfill {
return &Backfill{ return &Backfill{
db: o.DB, batchSize: o.BatchSize,
logg: o.Logg, db: o.DB,
pool: o.Pool, logg: o.Logg,
stopCh: make(chan struct{}), pool: o.Pool,
ticker: time.NewTicker(idleCheckInterval), stopCh: make(chan struct{}),
ticker: time.NewTicker(idleCheckInterval),
} }
} }
@ -90,13 +91,13 @@ func (b *Backfill) Run(skipLatest bool) error {
if missingBlocksCount > 0 { if missingBlocksCount > 0 {
b.logg.Info("found missing blocks", "skip_latest", skipLatest, "missing_blocks_count", missingBlocksCount) b.logg.Info("found missing blocks", "skip_latest", skipLatest, "missing_blocks_count", missingBlocksCount)
buffer := make([]uint, maxPoolSizePush) buffer := make([]uint, b.batchSize)
j := uint(0) j := uint(0)
pushedCount := 0 pushedCount := 0
j, buffer = missingBlocks.NextSetMany(j, buffer) j, buffer = missingBlocks.NextSetMany(j, buffer)
for ; len(buffer) > 0; j, buffer = missingBlocks.NextSetMany(j, buffer) { for ; len(buffer) > 0; j, buffer = missingBlocks.NextSetMany(j, buffer) {
for k := range buffer { for k := range buffer {
if pushedCount >= maxPoolSizePush { if pushedCount >= b.batchSize {
break break
} }
@ -104,11 +105,11 @@ func (b *Backfill) Run(skipLatest bool) error {
b.logg.Debug("pushed block from backfill", "block", buffer[k]) b.logg.Debug("pushed block from backfill", "block", buffer[k])
pushedCount++ pushedCount++
} }
j += 1 j++
} }
} }
if missingBlocksCount > maxPoolSizePush { if missingBlocksCount > uint(b.batchSize) {
b.ticker.Reset(busyCheckInterval) b.ticker.Reset(busyCheckInterval)
} else { } else {
b.ticker.Reset(idleCheckInterval) b.ticker.Reset(idleCheckInterval)

View File

@ -11,6 +11,7 @@ import (
type ( type (
PoolOpts struct { PoolOpts struct {
BatchSize int
Logg *slog.Logger Logg *slog.Logger
WorkerCount int WorkerCount int
Processor *processor.Processor Processor *processor.Processor
@ -23,14 +24,12 @@ type (
} }
) )
const blocksBuffer = 100
func New(o PoolOpts) *Pool { func New(o PoolOpts) *Pool {
return &Pool{ return &Pool{
logg: o.Logg, logg: o.Logg,
workerPool: pond.New( workerPool: pond.New(
o.WorkerCount, o.WorkerCount,
blocksBuffer, o.BatchSize,
pond.Strategy(pond.Balanced()), pond.Strategy(pond.Balanced()),
pond.PanicHandler(panicHandler(o.Logg)), pond.PanicHandler(panicHandler(o.Logg)),
), ),

View File

@ -68,7 +68,7 @@ func (p *Processor) ProcessBlock(ctx context.Context, blockNumber uint64) error
Timestamp: block.Time(), Timestamp: block.Time(),
}, },
); err != nil && !errors.Is(err, context.Canceled) { ); err != nil && !errors.Is(err, context.Canceled) {
return err return fmt.Errorf("route success transaction error: tx %s: %v", receipt.TxHash.Hex(), err)
} }
} }
} }
@ -100,7 +100,7 @@ func (p *Processor) ProcessBlock(ctx context.Context, blockNumber uint64) error
TxHash: receipt.TxHash.Hex(), TxHash: receipt.TxHash.Hex(),
}, },
); err != nil && !errors.Is(err, context.Canceled) { ); err != nil && !errors.Is(err, context.Canceled) {
return err return fmt.Errorf("route revert transaction error: tx %s: %v", receipt.TxHash.Hex(), err)
} }
} }
} }

View File

@ -17,7 +17,7 @@ var (
_ Handler = (*tokenBurnHandler)(nil) _ Handler = (*tokenBurnHandler)(nil)
tokenBurnEvent = w3.MustNewEvent("Burn(address indexed _tokenBurner, uint256 _value)") tokenBurnEvent = w3.MustNewEvent("Burn(address indexed _tokenBurner, uint256 _value)")
tokenBurnToSig = w3.MustNewFunc("Burn(uint256)", "bool") tokenBurnToSig = w3.MustNewFunc("burn(uint256)", "bool")
) )
func (h *tokenBurnHandler) Name() string { func (h *tokenBurnHandler) Name() string {

View File

@ -17,7 +17,7 @@ var (
_ Handler = (*tokenMintHandler)(nil) _ Handler = (*tokenMintHandler)(nil)
tokenMintEvent = w3.MustNewEvent("Mint(address indexed _tokenMinter, address indexed _beneficiary, uint256 _value)") tokenMintEvent = w3.MustNewEvent("Mint(address indexed _tokenMinter, address indexed _beneficiary, uint256 _value)")
tokenMintToSig = w3.MustNewFunc("MintTo(address, uint256)", "bool") tokenMintToSig = w3.MustNewFunc("mintTo(address, uint256)", "bool")
) )
func (h *tokenMintHandler) Name() string { func (h *tokenMintHandler) Name() string {