Start migration from allow_replay to max_replay_count logic

This commit is contained in:
Taras Kushnir
2025-08-23 14:53:32 +03:00
parent 387f59599c
commit 1d62e7f150
14 changed files with 170 additions and 71 deletions

View File

@@ -413,10 +413,8 @@ func (s *Server) Verify(ctx context.Context, data []byte, expectedOwner puzzle.O
return result, nil
}
if (puzzleObject != nil) && (property != nil) && !property.AllowReplay {
if cerr := s.BusinessDB.CachePuzzle(ctx, puzzleObject, tnow); cerr != nil {
slog.ErrorContext(ctx, "Failed to cache puzzle", "puzzleID", puzzleObject.PuzzleID, common.ErrAttr(cerr))
}
if (puzzleObject != nil) && (property != nil) && (property.MaxReplayCount > 0) {
s.BusinessDB.CacheVerifiedPuzzle(ctx, puzzleObject, tnow)
} else if puzzleObject != nil {
slog.Log(ctx, common.LevelTrace, "Skipping caching puzzle", "puzzleID", puzzleObject.PuzzleID)
}
@@ -561,11 +559,6 @@ func (s *Server) verifyPuzzleValid(ctx context.Context, payload *puzzle.VerifyPa
}
}
if s.BusinessDB.CheckPuzzleCached(ctx, p) {
plog.WarnContext(ctx, "Puzzle is already cached")
return p, nil, puzzle.VerifiedBeforeError
}
// the reason we delay accessing DB for API key and not for sitekey is that sitekey comes from a signed puzzle payload
// and API key is a rather random string in HTTP header so has a higher chance of misuse
sitekey := db.UUIDToSiteKey(pgtype.UUID{Valid: true, Bytes: p.PropertyID})
@@ -582,6 +575,16 @@ func (s *Server) verifyPuzzleValid(ctx context.Context, payload *puzzle.VerifyPa
}
}
var maxCount uint32 = 1
if (property != nil) && (property.MaxReplayCount > 0) {
maxCount = uint32(property.MaxReplayCount)
}
if s.BusinessDB.CheckVerifiedPuzzle(ctx, p, maxCount) {
plog.WarnContext(ctx, "Puzzle is already cached", "count", maxCount)
return p, nil, puzzle.VerifiedBeforeError
}
if payload.NeedsExtraSalt() {
if serr := payload.VerifySignature(ctx, s.Salt.Value(), property.Salt); serr != nil {
return p, nil, puzzle.IntegrityError

View File

@@ -329,9 +329,26 @@ func TestVerifyPuzzleAllowReplay(t *testing.T) {
if err != nil {
t.Fatal(err)
}
const maxReplayCount = 3
// this should be still cached so we don't need to actually update DB
property.AllowReplay = true
property.MaxReplayCount = maxReplayCount
for _ = range maxReplayCount {
resp, err := verifySuite(payload, apiKey)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusOK {
t.Errorf("Unexpected submit status code %d", resp.StatusCode)
}
if err := checkVerifyError(resp, puzzle.VerifyNoError); err != nil {
t.Fatal(err)
}
}
// now it should trigger an error
resp, err := verifySuite(payload, apiKey)
if err != nil {
t.Fatal(err)
@@ -341,13 +358,7 @@ func TestVerifyPuzzleAllowReplay(t *testing.T) {
t.Errorf("Unexpected submit status code %d", resp.StatusCode)
}
// now second time the same
resp, err = verifySuite(payload, apiKey)
if err != nil {
t.Fatal(err)
}
if err := checkVerifyError(resp, puzzle.VerifyNoError); err != nil {
if err := checkVerifyError(resp, puzzle.VerifiedBeforeError); err != nil {
t.Fatal(err)
}
}

View File

@@ -43,6 +43,7 @@ const (
ParamVersion = "version"
ParamPortalSolution = "pc_portal_solution"
ParamTerms = "terms"
ParamMaxReplayCount = "max_replay_count"
)
var (

View File

@@ -45,7 +45,7 @@ type BusinessStore struct {
cacheOnlyImpl *BusinessStoreImpl
Cache common.Cache[CacheKey, any]
// this could have been a bloom/cuckoo filter with expiration, if they existed
puzzleCache common.Cache[uint32, uint32]
puzzleCache *puzzleCache
MaintenanceMode atomic.Bool
}
@@ -53,8 +53,8 @@ type Implementor interface {
Impl() *BusinessStoreImpl
WithTx(ctx context.Context, fn func(*BusinessStoreImpl) error) error
Ping(ctx context.Context) error
CheckPuzzleCached(ctx context.Context, p *puzzle.Puzzle) bool
CachePuzzle(ctx context.Context, p *puzzle.Puzzle, tnow time.Time) error
CheckVerifiedPuzzle(ctx context.Context, p *puzzle.Puzzle, maxCount uint32) bool
CacheVerifiedPuzzle(ctx context.Context, p *puzzle.Puzzle, tnow time.Time)
CacheHitRatio() float64
}
@@ -74,21 +74,12 @@ func NewBusiness(pool *pgxpool.Pool) *BusinessStore {
}
func NewBusinessEx(pool *pgxpool.Pool, cache common.Cache[CacheKey, any]) *BusinessStore {
const maxPuzzleCacheSize = 100_000
var puzzleCache common.Cache[uint32, uint32]
var err error
puzzleCache, err = NewMemoryCache[uint32, uint32]("puzzle", maxPuzzleCacheSize, 0 /*missing value*/, defaultCacheTTL, defaultCacheRefresh, negativeCacheTTL)
if err != nil {
slog.Error("Failed to create puzzle memory cache", common.ErrAttr(err))
puzzleCache = NewStaticCache[uint32, uint32](maxPuzzleCacheSize, 0 /*missing value*/)
}
return &BusinessStore{
Pool: pool,
defaultImpl: &BusinessStoreImpl{cache: cache, querier: dbgen.New(pool)},
cacheOnlyImpl: &BusinessStoreImpl{cache: cache},
Cache: cache,
puzzleCache: puzzleCache,
puzzleCache: newPuzzleCache(puzzle.DefaultValidityPeriod),
}
}
@@ -150,28 +141,28 @@ func (s *BusinessStore) CacheHitRatio() float64 {
return s.Cache.HitRatio()
}
func (s *BusinessStore) CheckPuzzleCached(ctx context.Context, p *puzzle.Puzzle) bool {
func (s *BusinessStore) CheckVerifiedPuzzle(ctx context.Context, p *puzzle.Puzzle, maxCount uint32) bool {
if p == nil || p.IsZero() {
return false
}
// purely theoretically there's still a chance of cache collision, but it's so negligible that it's allowed
// (HashKey() and HashValue() have to match during puzzle.DefaultValidityPeriod on the same server
value, err := s.puzzleCache.Get(ctx, p.HashKey())
return (err == nil) && (p.HashValue() == value)
// (HashKey() has to match during puzzle.DefaultValidityPeriod on the same server)
return !s.puzzleCache.CheckCount(ctx, p.HashKey(), maxCount)
}
func (s *BusinessStore) CachePuzzle(ctx context.Context, p *puzzle.Puzzle, tnow time.Time) error {
func (s *BusinessStore) CacheVerifiedPuzzle(ctx context.Context, p *puzzle.Puzzle, tnow time.Time) {
if p == nil || p.IsZero() {
slog.Log(ctx, common.LevelTrace, "Skipping caching zero puzzle")
return nil
return
}
// this check should have been done before in the pipeline. Here the check only to safeguard storing in cache
if !tnow.Before(p.Expiration) {
slog.WarnContext(ctx, "Skipping caching expired puzzle", "now", tnow, "expiration", p.Expiration)
return nil
return
}
return s.puzzleCache.SetWithTTL(ctx, p.HashKey(), p.HashValue(), p.Expiration.Sub(tnow))
value := s.puzzleCache.Inc(ctx, p.HashKey(), p.Expiration.Sub(tnow))
slog.Log(ctx, common.LevelTrace, "Cached verified puzzle", "times", value)
}

View File

@@ -220,6 +220,7 @@ type Property struct {
AllowSubdomains bool `db:"allow_subdomains" json:"allow_subdomains"`
AllowLocalhost bool `db:"allow_localhost" json:"allow_localhost"`
AllowReplay bool `db:"allow_replay" json:"allow_replay"`
MaxReplayCount int32 `db:"max_replay_count" json:"max_replay_count"`
}
type Subscription struct {

View File

@@ -15,7 +15,7 @@ import (
const createProperty = `-- name: CreateProperty :one
INSERT INTO backend.properties (name, org_id, creator_id, org_owner_id, domain, level, growth)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay
RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count
`
type CreatePropertyParams struct {
@@ -57,6 +57,7 @@ func (q *Queries) CreateProperty(ctx context.Context, arg *CreatePropertyParams)
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}
@@ -71,7 +72,7 @@ func (q *Queries) DeleteProperties(ctx context.Context, dollar_1 []int32) error
}
const getOrgProperties = `-- name: GetOrgProperties :many
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE org_id = $1 AND deleted_at IS NULL ORDER BY created_at
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE org_id = $1 AND deleted_at IS NULL ORDER BY created_at
`
func (q *Queries) GetOrgProperties(ctx context.Context, orgID pgtype.Int4) ([]*Property, error) {
@@ -101,6 +102,7 @@ func (q *Queries) GetOrgProperties(ctx context.Context, orgID pgtype.Int4) ([]*P
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
); err != nil {
return nil, err
}
@@ -113,7 +115,7 @@ func (q *Queries) GetOrgProperties(ctx context.Context, orgID pgtype.Int4) ([]*P
}
const getOrgPropertyByName = `-- name: GetOrgPropertyByName :one
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE org_id = $1 AND name = $2 AND deleted_at IS NULL
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE org_id = $1 AND name = $2 AND deleted_at IS NULL
`
type GetOrgPropertyByNameParams struct {
@@ -142,12 +144,13 @@ func (q *Queries) GetOrgPropertyByName(ctx context.Context, arg *GetOrgPropertyB
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}
const getProperties = `-- name: GetProperties :many
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay FROM backend.properties LIMIT $1
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count FROM backend.properties LIMIT $1
`
func (q *Queries) GetProperties(ctx context.Context, limit int32) ([]*Property, error) {
@@ -177,6 +180,7 @@ func (q *Queries) GetProperties(ctx context.Context, limit int32) ([]*Property,
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
); err != nil {
return nil, err
}
@@ -189,7 +193,7 @@ func (q *Queries) GetProperties(ctx context.Context, limit int32) ([]*Property,
}
const getPropertiesByExternalID = `-- name: GetPropertiesByExternalID :many
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE external_id = ANY($1::UUID[])
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE external_id = ANY($1::UUID[])
`
func (q *Queries) GetPropertiesByExternalID(ctx context.Context, dollar_1 []pgtype.UUID) ([]*Property, error) {
@@ -219,6 +223,7 @@ func (q *Queries) GetPropertiesByExternalID(ctx context.Context, dollar_1 []pgty
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
); err != nil {
return nil, err
}
@@ -231,7 +236,7 @@ func (q *Queries) GetPropertiesByExternalID(ctx context.Context, dollar_1 []pgty
}
const getPropertiesByID = `-- name: GetPropertiesByID :many
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE id = ANY($1::INT[])
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE id = ANY($1::INT[])
`
func (q *Queries) GetPropertiesByID(ctx context.Context, dollar_1 []int32) ([]*Property, error) {
@@ -261,6 +266,7 @@ func (q *Queries) GetPropertiesByID(ctx context.Context, dollar_1 []int32) ([]*P
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
); err != nil {
return nil, err
}
@@ -273,7 +279,7 @@ func (q *Queries) GetPropertiesByID(ctx context.Context, dollar_1 []int32) ([]*P
}
const getPropertyByExternalID = `-- name: GetPropertyByExternalID :one
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE external_id = $1
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE external_id = $1
`
func (q *Queries) GetPropertyByExternalID(ctx context.Context, externalID pgtype.UUID) (*Property, error) {
@@ -297,12 +303,13 @@ func (q *Queries) GetPropertyByExternalID(ctx context.Context, externalID pgtype
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}
const getPropertyByID = `-- name: GetPropertyByID :one
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay from backend.properties WHERE id = $1
SELECT id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count from backend.properties WHERE id = $1
`
func (q *Queries) GetPropertyByID(ctx context.Context, id int32) (*Property, error) {
@@ -326,12 +333,13 @@ func (q *Queries) GetPropertyByID(ctx context.Context, id int32) (*Property, err
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}
const getSoftDeletedProperties = `-- name: GetSoftDeletedProperties :many
SELECT p.id, p.name, p.external_id, p.org_id, p.creator_id, p.org_owner_id, p.domain, p.level, p.salt, p.growth, p.created_at, p.updated_at, p.deleted_at, p.validity_interval, p.allow_subdomains, p.allow_localhost, p.allow_replay
SELECT p.id, p.name, p.external_id, p.org_id, p.creator_id, p.org_owner_id, p.domain, p.level, p.salt, p.growth, p.created_at, p.updated_at, p.deleted_at, p.validity_interval, p.allow_subdomains, p.allow_localhost, p.allow_replay, p.max_replay_count
FROM backend.properties p
JOIN backend.organizations o ON p.org_id = o.id
JOIN backend.users u ON o.user_id = u.id
@@ -378,6 +386,7 @@ func (q *Queries) GetSoftDeletedProperties(ctx context.Context, arg *GetSoftDele
&i.Property.AllowSubdomains,
&i.Property.AllowLocalhost,
&i.Property.AllowReplay,
&i.Property.MaxReplayCount,
); err != nil {
return nil, err
}
@@ -401,7 +410,7 @@ func (q *Queries) GetUserPropertiesCount(ctx context.Context, orgOwnerID pgtype.
}
const softDeleteProperty = `-- name: SoftDeleteProperty :one
UPDATE backend.properties SET deleted_at = NOW(), updated_at = NOW(), name = name || ' deleted_' || substr(md5(random()::text), 1, 8) WHERE id = $1 RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay
UPDATE backend.properties SET deleted_at = NOW(), updated_at = NOW(), name = name || ' deleted_' || substr(md5(random()::text), 1, 8) WHERE id = $1 RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count
`
func (q *Queries) SoftDeleteProperty(ctx context.Context, id int32) (*Property, error) {
@@ -425,14 +434,15 @@ func (q *Queries) SoftDeleteProperty(ctx context.Context, id int32) (*Property,
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}
const updateProperty = `-- name: UpdateProperty :one
UPDATE backend.properties SET name = $2, level = $3, growth = $4, validity_interval = $5, allow_subdomains = $6, allow_localhost = $7, allow_replay = $8, updated_at = NOW()
UPDATE backend.properties SET name = $2, level = $3, growth = $4, validity_interval = $5, allow_subdomains = $6, allow_localhost = $7, max_replay_count = $8, updated_at = NOW()
WHERE id = $1
RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay
RETURNING id, name, external_id, org_id, creator_id, org_owner_id, domain, level, salt, growth, created_at, updated_at, deleted_at, validity_interval, allow_subdomains, allow_localhost, allow_replay, max_replay_count
`
type UpdatePropertyParams struct {
@@ -443,7 +453,7 @@ type UpdatePropertyParams struct {
ValidityInterval time.Duration `db:"validity_interval" json:"validity_interval"`
AllowSubdomains bool `db:"allow_subdomains" json:"allow_subdomains"`
AllowLocalhost bool `db:"allow_localhost" json:"allow_localhost"`
AllowReplay bool `db:"allow_replay" json:"allow_replay"`
MaxReplayCount int32 `db:"max_replay_count" json:"max_replay_count"`
}
func (q *Queries) UpdateProperty(ctx context.Context, arg *UpdatePropertyParams) (*Property, error) {
@@ -455,7 +465,7 @@ func (q *Queries) UpdateProperty(ctx context.Context, arg *UpdatePropertyParams)
arg.ValidityInterval,
arg.AllowSubdomains,
arg.AllowLocalhost,
arg.AllowReplay,
arg.MaxReplayCount,
)
var i Property
err := row.Scan(
@@ -476,6 +486,7 @@ func (q *Queries) UpdateProperty(ctx context.Context, arg *UpdatePropertyParams)
&i.AllowSubdomains,
&i.AllowLocalhost,
&i.AllowReplay,
&i.MaxReplayCount,
)
return &i, err
}

View File

@@ -0,0 +1 @@
ALTER TABLE backend.properties DROP COLUMN max_replay_count;

View File

@@ -0,0 +1 @@
ALTER TABLE backend.properties ADD COLUMN max_replay_count INTEGER NOT NULL DEFAULT 1;

51
pkg/db/puzzlecache.go Normal file
View File

@@ -0,0 +1,51 @@
package db
import (
"context"
"time"
"github.com/maypok86/otter/v2"
)
type puzzleCache struct {
store *otter.Cache[uint64, uint32]
}
func newPuzzleCache(expiryTTL time.Duration) *puzzleCache {
const maxSize = 500_000
const initialSize = 1_000
return &puzzleCache{
store: otter.Must(&otter.Options[uint64, uint32]{
MaximumSize: maxSize,
InitialCapacity: initialSize,
ExpiryCalculator: otter.ExpiryAccessing[uint64, uint32](expiryTTL),
}),
}
}
func (pc *puzzleCache) CheckCount(ctx context.Context, key uint64, maxCount uint32) bool {
if count, ok := pc.store.GetIfPresent(key); ok {
return count < maxCount
}
return true
}
func puzzleCacheRemapInc(oldValue uint32, found bool) (newValue uint32, op otter.ComputeOp) {
if !found {
return 1, otter.WriteOp
}
return oldValue + 1, otter.WriteOp
}
func (pc *puzzleCache) Inc(ctx context.Context, key uint64, ttl time.Duration) uint32 {
value, _ := pc.store.Compute(key, puzzleCacheRemapInc)
if value == 1 {
pc.store.SetExpiresAfter(key, ttl)
}
return value
}

View File

@@ -13,7 +13,7 @@ VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING *;
-- name: UpdateProperty :one
UPDATE backend.properties SET name = $2, level = $3, growth = $4, validity_interval = $5, allow_subdomains = $6, allow_localhost = $7, allow_replay = $8, updated_at = NOW()
UPDATE backend.properties SET name = $2, level = $3, growth = $4, validity_interval = $5, allow_subdomains = $6, allow_localhost = $7, max_replay_count = $8, updated_at = NOW()
WHERE id = $1
RETURNING *;

View File

@@ -56,6 +56,7 @@ type userProperty struct {
Level int
Growth int
ValidityInterval int
MaxReplayCount int
AllowSubdomains bool
AllowLocalhost bool
AllowReplay bool
@@ -107,7 +108,7 @@ func createDifficultyLevelsRenderContext() difficultyLevelsRenderContext {
}
func propertyToUserProperty(p *dbgen.Property) *userProperty {
return &userProperty{
up := &userProperty{
ID: strconv.Itoa(int(p.ID)),
OrgID: strconv.Itoa(int(p.OrgID.Int32)),
Name: p.Name,
@@ -115,10 +116,13 @@ func propertyToUserProperty(p *dbgen.Property) *userProperty {
Level: int(p.Level.Int16),
Growth: growthLevelToIndex(p.Growth),
ValidityInterval: validityIntervalToIndex(p.ValidityInterval),
AllowReplay: p.AllowReplay,
AllowReplay: (p.MaxReplayCount > 1),
MaxReplayCount: max(1, int(p.MaxReplayCount)),
AllowSubdomains: p.AllowSubdomains,
AllowLocalhost: p.AllowLocalhost,
}
return up
}
func propertiesToUserProperties(ctx context.Context, properties []*dbgen.Property) []*userProperty {
@@ -218,6 +222,23 @@ func validityIntervalFromIndex(ctx context.Context, index string) time.Duration
}
}
func parseMaxReplayCount(ctx context.Context, value string) int32 {
i, err := strconv.Atoi(value)
if err != nil {
slog.ErrorContext(ctx, "Failed to parse max replay count", "value", value, common.ErrAttr(err))
return 1
}
const maxValue = 1_000_000
const minValue = 1
if (i < minValue) || (i > maxValue) {
slog.ErrorContext(ctx, "Invalid value of max replay count", "value", value)
}
return max(minValue, min(int32(i), maxValue))
}
func difficultyLevelFromValue(ctx context.Context, value string) common.DifficultyLevel {
i, err := strconv.Atoi(value)
if err != nil {
@@ -761,16 +782,20 @@ func (s *Server) putProperty(w http.ResponseWriter, r *http.Request) (Model, str
validityInterval := validityIntervalFromIndex(ctx, r.FormValue(common.ParamValidityInterval))
_, allowSubdomains := r.Form[common.ParamAllowSubdomains]
_, allowLocalhost := r.Form[common.ParamAllowLocalhost]
_, allowReplay := r.Form[common.ParamAllowReplay]
var maxReplayCount int32 = 1
if _, allowReplay := r.Form[common.ParamAllowReplay]; allowReplay {
maxReplayCount = parseMaxReplayCount(ctx, r.FormValue(common.ParamMaxReplayCount))
}
if (name != property.Name) ||
(int16(difficulty) != property.Level.Int16) ||
(growth != property.Growth) ||
(validityInterval != property.ValidityInterval) ||
(allowReplay != property.AllowReplay) ||
(maxReplayCount != property.MaxReplayCount) ||
(allowSubdomains != property.AllowSubdomains) ||
(allowLocalhost != property.AllowLocalhost) {
if updatedProperty, err := s.Store.Impl().UpdateProperty(ctx, &dbgen.UpdatePropertyParams{
params := &dbgen.UpdatePropertyParams{
ID: property.ID,
Name: name,
Level: db.Int2(int16(difficulty)),
@@ -778,8 +803,10 @@ func (s *Server) putProperty(w http.ResponseWriter, r *http.Request) (Model, str
ValidityInterval: validityInterval,
AllowSubdomains: allowSubdomains,
AllowLocalhost: allowLocalhost,
AllowReplay: allowReplay,
}); err != nil {
MaxReplayCount: maxReplayCount,
}
if updatedProperty, err := s.Store.Impl().UpdateProperty(ctx, params); err != nil {
renderCtx.ErrorMessage = "Failed to update settings. Please try again."
} else {
slog.DebugContext(ctx, "Edited property", "propID", property.ID, "orgID", org.ID)

View File

@@ -56,6 +56,7 @@ type RenderConstants struct {
AllowReplay string
IgnoreError string
Terms string
MaxReplayCount string
}
func NewRenderConstants() *RenderConstants {
@@ -103,6 +104,7 @@ func NewRenderConstants() *RenderConstants {
AllowReplay: common.ParamAllowReplay,
IgnoreError: common.ParamIgnoreError,
Terms: common.ParamTerms,
MaxReplayCount: common.ParamMaxReplayCount,
}
}

View File

@@ -62,22 +62,17 @@ func (p *Puzzle) Init(validityPeriod time.Duration) error {
return nil
}
func (p *Puzzle) HashKey() uint32 {
hasher := fnv.New32a()
func (p *Puzzle) HashKey() uint64 {
hasher := fnv.New64a()
hasher.Write(p.PropertyID[:])
var pidBytes [8]byte
binary.LittleEndian.PutUint64(pidBytes[:], p.PuzzleID)
hasher.Write(pidBytes[:])
return hasher.Sum32()
}
func (p *Puzzle) HashValue() uint32 {
hasher := fnv.New32a()
hasher.Write(p.UserData[:])
return hasher.Sum32()
return hasher.Sum64()
}
func NextPuzzleID() uint64 {

View File

@@ -65,7 +65,7 @@
</div>
</div>
<div class="col-span-full">
<div class="col-span-full" x-data="{replayEnabled: {{ $.Params.Property.AllowReplay }}}">
<label for="{{ .Const.ValidityInterval }}" class="pc-internal-form-label tooltip" data-tooltip="Period during which a single captcha puzzle can be verified"> Verification window </label>
<div class="mt-2">
<select name="{{ .Const.ValidityInterval }}" {{ if not .Params.CanEdit }}disabled{{ end }} class="pc-internal-form-select {{ if not .Params.CanEdit }}pc-internal-form-select-disabled{{ end }}">
@@ -81,7 +81,7 @@
<div class="mt-2 flex gap-3">
<div class="flex h-6 shrink-0 items-center">
<div class="group grid size-4 grid-cols-1">
<input id="{{ .Const.AllowReplay }}" aria-describedby="{{ .Const.AllowReplay }}-description" name="{{ .Const.AllowReplay }}" type="checkbox" {{ if $.Params.Property.AllowReplay }}checked{{ end }} class="col-start-1 row-start-1 pc-internal-form-checkbox">
<input id="{{ .Const.AllowReplay }}" x-model="replayEnabled" aria-describedby="{{ .Const.AllowReplay }}-description" name="{{ .Const.AllowReplay }}" type="checkbox" class="col-start-1 row-start-1 pc-internal-form-checkbox">
<svg class="pointer-events-none col-start-1 row-start-1 size-3.5 self-center justify-self-center stroke-white group-has-[:disabled]:stroke-gray-950/25" viewBox="0 0 14 14" fill="none">
<path class="opacity-0 group-has-[:checked]:opacity-100" d="M3 8L6 11L11 3.5" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" />
<path class="opacity-0 group-has-[:indeterminate]:opacity-100" d="M3 7H11" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" />
@@ -97,6 +97,10 @@
{{- end }}
</div>
</div>
<div class="mt-2">
<input type="number" :disabled="!replayEnabled" name="{{ .Const.MaxReplayCount }}" min="1" max="1000000" placeholder="2" value="{{ $.Params.Property.MaxReplayCount }}" class="pc-internal-form-input-base" :class="replayEnabled ? 'pc-form-input-normal' : 'pc-form-input-disabled'" />
</div>
</div>
<div class="col-span-full">