add vars to tune concurrency poller (#2428)

This commit is contained in:
Mohammed Nafees
2025-10-23 17:36:12 +02:00
committed by GitHub
parent b78c61e797
commit cf5c5989ff
5 changed files with 64 additions and 36 deletions

View File

@@ -634,6 +634,8 @@ func createControllerLayer(dc *database.Layer, cf *server.ServerConfigFile, vers
&queueLogger,
cf.Runtime.SingleQueueLimit,
cf.Runtime.SchedulerConcurrencyRateLimit,
cf.Runtime.SchedulerConcurrencyPollingMinInterval,
cf.Runtime.SchedulerConcurrencyPollingMaxInterval,
)
if err != nil {

View File

@@ -259,6 +259,12 @@ type ConfigFileRuntime struct {
// SchedulerConcurrencyRateLimit is the rate limit for scheduler concurrency strategy execution (per second)
SchedulerConcurrencyRateLimit int `mapstructure:"schedulerConcurrencyRateLimit" json:"schedulerConcurrencyRateLimit,omitempty" default:"20"`
// SchedulerConcurrencyPollingMinInterval is the minimum interval for concurrency polling
SchedulerConcurrencyPollingMinInterval time.Duration `mapstructure:"schedulerConcurrencyPollingMinInterval" json:"schedulerConcurrencyPollingMinInterval,omitempty" default:"500ms"`
// SchedulerConcurrencyPollingMaxInterval is the maximum interval for concurrency polling
SchedulerConcurrencyPollingMaxInterval time.Duration `mapstructure:"schedulerConcurrencyPollingMaxInterval" json:"schedulerConcurrencyPollingMaxInterval,omitempty" default:"5s"`
// LogIngestionEnabled controls whether the server enables log ingestion for tasks
LogIngestionEnabled bool `mapstructure:"logIngestionEnabled" json:"logIngestionEnabled,omitempty" default:"true"`
@@ -650,6 +656,8 @@ func BindAllEnv(v *viper.Viper) {
_ = v.BindEnv("runtime.grpcStaticStreamWindowSize", "SERVER_GRPC_STATIC_STREAM_WINDOW_SIZE")
_ = v.BindEnv("runtime.grpcRateLimit", "SERVER_GRPC_RATE_LIMIT")
_ = v.BindEnv("runtime.schedulerConcurrencyRateLimit", "SCHEDULER_CONCURRENCY_RATE_LIMIT")
_ = v.BindEnv("runtime.schedulerConcurrencyPollingMinInterval", "SCHEDULER_CONCURRENCY_POLLING_MIN_INTERVAL")
_ = v.BindEnv("runtime.schedulerConcurrencyPollingMaxInterval", "SCHEDULER_CONCURRENCY_POLLING_MAX_INTERVAL")
_ = v.BindEnv("runtime.shutdownWait", "SERVER_SHUTDOWN_WAIT")
_ = v.BindEnv("servicesString", "SERVER_SERVICES")
_ = v.BindEnv("pausedControllers", "SERVER_PAUSED_CONTROLLERS")

View File

@@ -40,6 +40,10 @@ type ConcurrencyManager struct {
isCleanedUp bool
rateLimiter *rate.Limiter
minPollingInterval time.Duration
maxPollingInterval time.Duration
}
func newConcurrencyManager(conf *sharedConfig, tenantId pgtype.UUID, strategy *sqlcv1.V1StepConcurrency, resultsCh chan<- *ConcurrencyResults) *ConcurrencyManager {
@@ -56,6 +60,8 @@ func newConcurrencyManager(conf *sharedConfig, tenantId pgtype.UUID, strategy *s
resultsCh: resultsCh,
notifyMu: newMu(conf.l),
rateLimiter: newConcurrencyRateLimiter(conf.schedulerConcurrencyRateLimit),
minPollingInterval: conf.schedulerConcurrencyPollingMinInterval,
maxPollingInterval: conf.schedulerConcurrencyPollingMaxInterval,
}
ctx, cancel := context.WithCancel(context.Background())
@@ -96,7 +102,10 @@ func (c *ConcurrencyManager) notify(ctx context.Context) {
}
func (c *ConcurrencyManager) loopConcurrency(ctx context.Context) {
ticker := randomticker.NewRandomTicker(500*time.Millisecond, 5*time.Second)
ticker := randomticker.NewRandomTicker(
c.minPollingInterval,
c.maxPollingInterval,
)
defer ticker.Stop()
for {

View File

@@ -3,6 +3,7 @@ package v1
import (
"context"
"sync"
"time"
"github.com/rs/zerolog"
@@ -19,6 +20,10 @@ type sharedConfig struct {
singleQueueLimit int
schedulerConcurrencyRateLimit int
schedulerConcurrencyPollingMinInterval time.Duration
schedulerConcurrencyPollingMaxInterval time.Duration
}
// SchedulingPool is responsible for managing a pool of tenantManagers.
@@ -35,17 +40,19 @@ type SchedulingPool struct {
concurrencyResultsCh chan *ConcurrencyResults
}
func NewSchedulingPool(repo v1.SchedulerRepository, l *zerolog.Logger, singleQueueLimit int, schedulerConcurrencyRateLimit int) (*SchedulingPool, func() error, error) {
func NewSchedulingPool(repo v1.SchedulerRepository, l *zerolog.Logger, singleQueueLimit int, schedulerConcurrencyRateLimit int, schedulerConcurrencyPollingMinInterval time.Duration, schedulerConcurrencyPollingMaxInterval time.Duration) (*SchedulingPool, func() error, error) {
resultsCh := make(chan *QueueResults, 1000)
concurrencyResultsCh := make(chan *ConcurrencyResults, 1000)
s := &SchedulingPool{
Extensions: &Extensions{},
cf: &sharedConfig{
repo: repo,
l: l,
singleQueueLimit: singleQueueLimit,
schedulerConcurrencyRateLimit: schedulerConcurrencyRateLimit,
repo: repo,
l: l,
singleQueueLimit: singleQueueLimit,
schedulerConcurrencyRateLimit: schedulerConcurrencyRateLimit,
schedulerConcurrencyPollingMinInterval: schedulerConcurrencyPollingMinInterval,
schedulerConcurrencyPollingMaxInterval: schedulerConcurrencyPollingMaxInterval,
},
resultsCh: resultsCh,
concurrencyResultsCh: concurrencyResultsCh,