Cache chart stats also on the server level

This commit is contained in:
Taras Kushnir
2025-12-07 20:13:18 +01:00
parent 55377576e2
commit 9151c22370
8 changed files with 52 additions and 13 deletions
+1 -1
View File
@@ -147,7 +147,7 @@ func run(ctx context.Context, cfg common.ConfigStore, stderr io.Writer, listener
defer clickhouse.Close()
businessDB := db.NewBusiness(pool)
timeSeriesDB := db.NewTimeSeries(clickhouse)
timeSeriesDB := db.NewTimeSeries(clickhouse, businessDB.Cache)
puzzleVerifier := api.NewVerifier(cfg, businessDB)
+1 -1
View File
@@ -59,7 +59,7 @@ func TestMain(m *testing.M) {
panic(dberr)
}
timeSeries = db.NewTimeSeries(clickhouse)
timeSeries = db.NewTimeSeries(clickhouse, cache)
var err error
cache, err = db.NewMemoryCache[db.CacheKey, any]("default", 1000, &struct{}{}, 1*time.Minute, 3*time.Minute, 30*time.Second)
+1 -1
View File
@@ -49,7 +49,7 @@ var (
JSONContentHeaders = map[string][]string{
HeaderContentType: []string{ContentTypeJSON},
}
PrivateCacheControl1h = []string{"private, max-age=60"}
PrivateCacheControl1m = []string{"private, max-age=60"}
)
func NoopMiddleware(next http.Handler) http.Handler {
+10
View File
@@ -214,6 +214,8 @@ const (
propertyAuditLogsCacheKeyPrefix
orgAuditLogsCacheKeyPrefix
userPropertiesCountCachePrefix
userAccountStatsCachePrefix
propertyStatsCachePrefix
// Add new fields _above_
CACHE_KEY_PREFIXES_COUNT
)
@@ -248,6 +250,8 @@ func init() {
cachePrefixToStrings[propertyAuditLogsCacheKeyPrefix] = "propAuditLogs/"
cachePrefixToStrings[orgAuditLogsCacheKeyPrefix] = "orgAuditLogs/"
cachePrefixToStrings[userPropertiesCountCachePrefix] = "userPropertiesCount/"
cachePrefixToStrings[userAccountStatsCachePrefix] = "userAccountStats/"
cachePrefixToStrings[propertyStatsCachePrefix] = "propertyStats/"
for i, v := range cachePrefixToStrings {
if len(v) == 0 {
@@ -351,3 +355,9 @@ func orgAuditLogsCacheKey(orgID int32) CacheKey {
func userPropertiesCountCacheKey(userID int32) CacheKey {
return Int32CacheKey(userPropertiesCountCachePrefix, userID)
}
func userAccountStatsCacheKey(userID int32, key string) CacheKey {
return CacheKey{Prefix: userAccountStatsCachePrefix, IntValue: userID, StrValue: key}
}
func propertyStatsCacheKey(propertyID int32, key string) CacheKey {
return CacheKey{Prefix: propertyStatsCachePrefix, IntValue: propertyID, StrValue: key}
}
+35 -6
View File
@@ -29,6 +29,7 @@ const (
type TimeSeriesDB struct {
Clickhouse *sql.DB
Cache common.Cache[CacheKey, any]
statsQueryTemplate *template.Template
maintenanceMode atomic.Bool
}
@@ -44,7 +45,7 @@ func idsToString(ids []int32) string {
return idsStr
}
func NewTimeSeries(clickhouse *sql.DB) *TimeSeriesDB {
func NewTimeSeries(clickhouse *sql.DB, cache common.Cache[CacheKey, any]) *TimeSeriesDB {
// ClickHouse docs:
// The join (a search in the right table) is run before filtering in WHERE and before aggregation.
const statsQuery = `WITH requests AS
@@ -79,6 +80,7 @@ SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling
return &TimeSeriesDB{
statsQueryTemplate: template.Must(template.New("stats").Parse(statsQuery)),
Clickhouse: clickhouse,
Cache: cache,
}
}
@@ -235,6 +237,14 @@ func (ts *TimeSeriesDB) RetrieveAccountStats(ctx context.Context, userID int32,
return nil, ErrMaintenance
}
fromStr := from.Format(time.DateTime)
cacheKey := userAccountStatsCacheKey(userID, fromStr)
if stats, err := FetchCachedArray[common.TimeCount](ctx, ts.Cache, cacheKey); (err == nil) && (len(stats) > 0) {
slog.DebugContext(ctx, "User account stats were cached", "userID", userID, "key", cacheKey, "count", len(stats))
return stats, nil
}
query := `SELECT timestamp, sum(count) as count
FROM %s FINAL
WHERE user_id = {user_id:UInt32} AND timestamp >= {timestamp:DateTime}
@@ -242,7 +252,7 @@ GROUP BY timestamp
ORDER BY timestamp`
rows, err := ts.Clickhouse.Query(fmt.Sprintf(query, AccessLogTableName1mo),
clickhouse.Named("user_id", strconv.Itoa(int(userID))),
clickhouse.Named("timestamp", from.Format(time.DateTime)))
clickhouse.Named("timestamp", fromStr))
if err != nil {
slog.ErrorContext(ctx, "Failed to execute account stats query", common.ErrAttr(err))
return nil, err
@@ -261,6 +271,8 @@ ORDER BY timestamp`
results = append(results, bc)
}
_ = ts.Cache.Set(ctx, cacheKey, results)
return results, nil
}
@@ -275,34 +287,45 @@ func (ts *TimeSeriesDB) RetrievePropertyStatsByPeriod(ctx context.Context, orgID
var verificationsTable string
var timeFunction string
var interval string
var cacheKey *CacheKey
switch period {
case common.TimePeriodToday:
timeFrom = tnow.AddDate(0, 0, -1)
timeFrom = tnow.AddDate(0, 0, -1).Truncate(1 * time.Hour)
requestsTable = "request_logs_1h"
verificationsTable = "verify_logs_1h"
timeFunction = "toStartOfHour(%s)"
interval = "INTERVAL 1 HOUR"
// in server we only cache the "today" as this is the default chart in the UI
cacheKey = new(CacheKey)
*cacheKey = propertyStatsCacheKey(propertyID, timeFrom.Format(time.DateTime))
case common.TimePeriodWeek:
timeFrom = tnow.AddDate(0, 0, -7)
timeFrom = tnow.AddDate(0, 0, -7).Truncate(6 * time.Hour)
requestsTable = "request_logs_1d"
verificationsTable = "verify_logs_1d"
timeFunction = "toStartOfInterval(%s, INTERVAL 6 HOUR)"
interval = "INTERVAL 6 HOUR"
case common.TimePeriodMonth:
timeFrom = tnow.AddDate(0, -1, 0)
timeFrom = tnow.AddDate(0, -1, 0).Truncate(24 * time.Hour)
requestsTable = "request_logs_1d"
verificationsTable = "verify_logs_1d"
timeFunction = "toStartOfDay(%s)"
interval = "INTERVAL 1 DAY"
case common.TimePeriodYear:
timeFrom = tnow.AddDate(-1, 0, 0)
timeFrom = tnow.AddDate(-1, 0, 0).Truncate(24 * time.Hour)
requestsTable = "request_logs_1d"
verificationsTable = "verify_logs_1d"
timeFunction = "toStartOfMonth(%s)"
interval = "INTERVAL 1 MONTH"
}
if cacheKey != nil {
if stats, err := FetchCachedArray[common.TimePeriodStat](ctx, ts.Cache, *cacheKey); (err == nil) && (len(stats) > 0) {
slog.DebugContext(ctx, "Property stats were cached", "orgID", orgID, "propertyID", propertyID, "key", *cacheKey, "count", len(stats))
return stats, nil
}
}
data := struct {
RequestsTable string
VerifiesTable string
@@ -353,6 +376,12 @@ func (ts *TimeSeriesDB) RetrievePropertyStatsByPeriod(ctx context.Context, orgID
slog.InfoContext(ctx, "Fetched time period stats", "count", len(results), "orgID", orgID, "propID", propertyID,
"from", timeFrom, "period", period)
if cacheKey != nil {
const propertyStatsCacheTTL = 5 * time.Minute
// we have 5 min buffers for updates and we do NOT delete this cache item
ts.Cache.SetWithTTL(ctx, *cacheKey, results, propertyStatsCacheTTL)
}
return results, nil
}
+1 -1
View File
@@ -631,7 +631,7 @@ func (s *Server) getPropertyStats(w http.ResponseWriter, r *http.Request) {
cacheHeaders := map[string][]string{
common.HeaderETag: []string{etag},
common.HeaderCacheControl: common.PrivateCacheControl1h,
common.HeaderCacheControl: common.PrivateCacheControl1m,
}
common.SendJSONResponse(ctx, w, response, cacheHeaders)
+1 -1
View File
@@ -102,7 +102,7 @@ func TestMain(m *testing.M) {
panic(dberr)
}
timeSeries = db.NewTimeSeries(clickhouse)
timeSeries = db.NewTimeSeries(clickhouse, cache)
levels := difficulty.NewLevels(timeSeries, 100, 5*time.Minute)
levels.Init(2*time.Second, 5*time.Minute)
+2 -2
View File
@@ -685,7 +685,7 @@ func (s *Server) getAccountStats(w http.ResponseWriter, r *http.Request) {
return
}
timeFrom := time.Now().UTC().AddDate(-1 /*years*/, 0 /*months*/, 0 /*days*/)
timeFrom := time.Now().UTC().AddDate(-1 /*years*/, 0 /*months*/, 0 /*days*/).Truncate(24 * time.Hour)
etag := common.GenerateETag(strconv.Itoa(int(user.ID)), timeFrom.Format(time.RFC3339))
if etagHeader := r.Header.Get(common.HeaderIfNoneMatch); len(etagHeader) > 0 && (etagHeader == etag) {
w.WriteHeader(http.StatusNotModified)
@@ -724,7 +724,7 @@ func (s *Server) getAccountStats(w http.ResponseWriter, r *http.Request) {
cacheHeaders := map[string][]string{
common.HeaderETag: []string{etag},
common.HeaderCacheControl: common.PrivateCacheControl1h,
common.HeaderCacheControl: common.PrivateCacheControl1m,
}
common.SendJSONResponse(ctx, w, response, cacheHeaders)