mirror of
https://github.com/folbricht/routedns.git
synced 2025-12-30 14:10:03 -06:00
Add async Redis SET on cache miss for reduced latency (#472)
* Add async Redis SET on cache miss for reduced latency * Some changes to address PR comments
This commit is contained in:
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -12,27 +13,51 @@ import (
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
// asyncWriteSemCapacity limits concurrent background Redis writes.
|
||||
redisAsyncWriteSemCapacity = 256
|
||||
)
|
||||
|
||||
type redisBackend struct {
|
||||
client *redis.Client
|
||||
opt RedisBackendOptions
|
||||
client *redis.Client
|
||||
opt RedisBackendOptions
|
||||
asyncWriteSem chan struct{}
|
||||
asyncSkipped *expvar.Int
|
||||
}
|
||||
|
||||
type RedisBackendOptions struct {
|
||||
RedisOptions redis.Options
|
||||
KeyPrefix string
|
||||
SyncSet bool // When true, perform Redis SET synchronously. Default is false (async writes).
|
||||
}
|
||||
|
||||
var _ CacheBackend = (*redisBackend)(nil)
|
||||
|
||||
func NewRedisBackend(opt RedisBackendOptions) *redisBackend {
|
||||
b := &redisBackend{
|
||||
client: redis.NewClient(&opt.RedisOptions),
|
||||
opt: opt,
|
||||
client: redis.NewClient(&opt.RedisOptions),
|
||||
opt: opt,
|
||||
asyncWriteSem: make(chan struct{}, redisAsyncWriteSemCapacity),
|
||||
asyncSkipped: getVarInt("cache", "redis", "async-skipped"),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *redisBackend) Store(query *dns.Msg, item *cacheAnswer) {
|
||||
// TTL guard: skip storing if already expired
|
||||
ttl := time.Until(item.Expiry)
|
||||
if ttl <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if b.opt.SyncSet {
|
||||
b.storeSync(query, item, ttl)
|
||||
} else {
|
||||
b.storeAsync(query, item, ttl)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *redisBackend) storeSync(query *dns.Msg, item *cacheAnswer, ttl time.Duration) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
key := b.keyFromQuery(query)
|
||||
@@ -41,11 +66,25 @@ func (b *redisBackend) Store(query *dns.Msg, item *cacheAnswer) {
|
||||
Log.Error("failed to marshal cache record", "error", err)
|
||||
return
|
||||
}
|
||||
if err := b.client.Set(ctx, key, value, time.Until(item.Expiry)).Err(); err != nil {
|
||||
if err := b.client.Set(ctx, key, value, ttl).Err(); err != nil {
|
||||
Log.Error("failed to write to redis", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *redisBackend) storeAsync(query *dns.Msg, item *cacheAnswer, ttl time.Duration) {
|
||||
// Non-blocking semaphore acquire
|
||||
select {
|
||||
case b.asyncWriteSem <- struct{}{}:
|
||||
go func() {
|
||||
defer func() { <-b.asyncWriteSem }()
|
||||
b.storeSync(query, item, ttl)
|
||||
}()
|
||||
default:
|
||||
// Semaphore full, skip async store (best-effort caching)
|
||||
b.asyncSkipped.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *redisBackend) Lookup(q *dns.Msg) (*dns.Msg, bool, bool) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
@@ -91,7 +91,8 @@ type cacheBackend struct {
|
||||
RedisKeyPrefix string `toml:"redis-key-prefix"` // Prefix any cache entry
|
||||
RedisMaxRetries int `toml:"redis-max-retries"` // Maximum number of retries before giving up. Default is 3 retries; -1 (not 0) disables retries.
|
||||
RedisMinRetryBackoff int `toml:"redis-min-retry-backoff"` // Minimum back-off between each retry. Default is 8 milliseconds; -1 disables back-off.
|
||||
RedisMaxRetryBackoff int `toml:"redis-max-retry-backoff"` // Maximum back-off between each retry. Default is 512 milliseconds; -1 disables back-off.
|
||||
RedisMaxRetryBackoff int `toml:"redis-max-retry-backoff"` // Maximum back-off between each retry. Default is 512 milliseconds; -1 disables back-off.
|
||||
RedisSyncSet bool `toml:"redis-sync-set"` // When true, perform Redis SET synchronously. Default is false (async writes).
|
||||
}
|
||||
|
||||
type group struct {
|
||||
@@ -214,7 +215,7 @@ type route struct {
|
||||
Class string
|
||||
Name string
|
||||
Source string
|
||||
ECSSource string `toml:"ecs-source"`
|
||||
ECSSource string `toml:"ecs-source"`
|
||||
Weekdays []string // 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'
|
||||
After, Before string // Hour:Minute in 24h format, for example "14:30"
|
||||
Invert bool // Invert the result of the match
|
||||
|
||||
@@ -684,6 +684,7 @@ func instantiateGroup(id string, g group, resolvers map[string]rdns.Resolver) er
|
||||
MaxRetryBackoff: maxRetryBackoff,
|
||||
},
|
||||
KeyPrefix: g.Backend.RedisKeyPrefix,
|
||||
SyncSet: g.Backend.RedisSyncSet,
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("unsupported cache backend %q", g.Backend.Type)
|
||||
|
||||
@@ -393,6 +393,7 @@ The `redis` backend stores cached items in a Redis database. This allows multipl
|
||||
- `redis-max-retries` - Maximum number of retries before giving up. Default is 3 retries; -1 (not 0) disables retries.
|
||||
- `redis-min-retry-backoff` - Minimum back-off between each retry in milliseconds. Default is 8 milliseconds; -1 disables back-off.
|
||||
- `redis-max-retry-backoff` - Maximum back-off between each retry in milliseconds. Default is 512 milliseconds; -1 disables back-off.
|
||||
- `redis-sync-set` - When true, performs Redis SET operations synchronously. Default is false (async writes), meaning the response is returned immediately while the cache entry is written in the background. Note: With async mode, there is a brief window where a second identical query may also result in a miss until the background write completes.
|
||||
|
||||
#### Examples
|
||||
|
||||
|
||||
Reference in New Issue
Block a user