Bump reva

This commit is contained in:
André Duffeck
2025-07-15 09:59:45 +02:00
parent e21fe7a4fe
commit 63ab8f789e
49 changed files with 13621 additions and 847 deletions

10
go.mod
View File

@@ -64,7 +64,7 @@ require (
github.com/onsi/gomega v1.37.0
github.com/open-policy-agent/opa v1.6.0
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23
github.com/orcaman/concurrent-map v1.0.0
github.com/pkg/errors v0.9.1
github.com/pkg/xattr v0.4.12
@@ -97,7 +97,7 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0
go.opentelemetry.io/otel/sdk v1.37.0
go.opentelemetry.io/otel/trace v1.37.0
golang.org/x/crypto v0.39.0
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac
golang.org/x/image v0.28.0
golang.org/x/net v0.41.0
@@ -317,9 +317,9 @@ require (
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yashtewari/glob-intersection v0.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.6.1 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.1 // indirect
go.etcd.io/etcd/client/v3 v3.6.1 // indirect
go.etcd.io/etcd/api/v3 v3.6.2 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.2 // indirect
go.etcd.io/etcd/client/v3 v3.6.2 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect

20
go.sum
View File

@@ -868,8 +868,8 @@ github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-202505121527
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce h1:tjbIYsW5CFsEbCf5B/KN0Mo1oKU/K+oipgFm2B6wzG4=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717 h1:khqL0AenfN0vt6oXgvbqH4UIuJk+2+oxWSJKcal4GYQ=
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717/go.mod h1:hSIUWU8JUaX+y0cVjbh6VaW6Mh0uJ/azFPx5cSVhQfc=
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23 h1:FY6l12zi57efPXe9kVU1U6FB6HMuAV/t0XJPEU2XVDw=
github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23/go.mod h1:5Zur6s3GoCbhdU09voU8EO+Ls71NiHgWYmhcvmngjwY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -1157,12 +1157,12 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.etcd.io/etcd/api/v3 v3.6.1 h1:yJ9WlDih9HT457QPuHt/TH/XtsdN2tubyxyQHSHPsEo=
go.etcd.io/etcd/api/v3 v3.6.1/go.mod h1:lnfuqoGsXMlZdTJlact3IB56o3bWp1DIlXPIGKRArto=
go.etcd.io/etcd/client/pkg/v3 v3.6.1 h1:CxDVv8ggphmamrXM4Of8aCC8QHzDM4tGcVr9p2BSoGk=
go.etcd.io/etcd/client/pkg/v3 v3.6.1/go.mod h1:aTkCp+6ixcVTZmrJGa7/Mc5nMNs59PEgBbq+HCmWyMc=
go.etcd.io/etcd/client/v3 v3.6.1 h1:KelkcizJGsskUXlsxjVrSmINvMMga0VWwFF0tSPGEP0=
go.etcd.io/etcd/client/v3 v3.6.1/go.mod h1:fCbPUdjWNLfx1A6ATo9syUmFVxqHH9bCnPLBZmnLmMY=
go.etcd.io/etcd/api/v3 v3.6.2 h1:25aCkIMjUmiiOtnBIp6PhNj4KdcURuBak0hU2P1fgRc=
go.etcd.io/etcd/api/v3 v3.6.2/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
go.etcd.io/etcd/client/pkg/v3 v3.6.2 h1:zw+HRghi/G8fKpgKdOcEKpnBTE4OO39T6MegA0RopVU=
go.etcd.io/etcd/client/pkg/v3 v3.6.2/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
go.etcd.io/etcd/client/v3 v3.6.2 h1:RgmcLJxkpHqpFvgKNwAQHX3K+wsSARMXKgjmUSpoSKQ=
go.etcd.io/etcd/client/v3 v3.6.2/go.mod h1:PL7e5QMKzjybn0FosgiWvCUDzvdChpo5UgGR4Sk4Gzc=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -1239,8 +1239,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=

View File

@@ -0,0 +1,73 @@
// Copyright 2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package syncx
import "sync"
// Map is a type-safe wrapper around sync.Map.
// It is safe for concurrent use.
// The zero value of Map is an empty map ready to use.
type Map[K comparable, V any] struct {
m sync.Map
}
func (m *Map[K, V]) Load(key K) (V, bool) {
v, ok := m.m.Load(key)
if !ok {
var empty V
return empty, false
}
return v.(V), true
}
func (m *Map[K, V]) Store(key K, value V) {
m.m.Store(key, value)
}
func (m *Map[K, V]) Delete(key K) {
m.m.Delete(key)
}
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
m.m.Range(func(key, value any) bool {
return f(key.(K), value.(V))
})
}
func (m *Map[K, V]) LoadOrStore(key K, value V) (V, bool) {
v, loaded := m.m.LoadOrStore(key, value)
return v.(V), loaded
}
func (m *Map[K, V]) LoadAndDelete(key K) (V, bool) {
v, ok := m.m.LoadAndDelete(key)
if !ok {
var empty V
return empty, false
}
return v.(V), true
}
func (m *Map[K, V]) CompareAndSwap(key K, old, new V) bool {
return m.m.CompareAndSwap(key, old, new)
}
func (m *Map[K, V]) CompareAndDelete(key K, value V) bool {
return m.m.CompareAndDelete(key, value)
}
func (m *Map[K, V]) Swap(key K, value V) (V, bool) {
previous, loaded := m.m.Swap(key, value)
return previous.(V), loaded
}

1040
vendor/github.com/nats-io/nats.go/jetstream/README.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

158
vendor/github.com/nats-io/nats.go/jetstream/api.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"encoding/json"
"strings"
)
type (
apiResponse struct {
Type string `json:"type"`
Error *APIError `json:"error,omitempty"`
}
// apiPaged includes variables used to create paged responses from the JSON API
apiPaged struct {
Total int `json:"total"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
)
// Request API subjects for JetStream.
const (
// DefaultAPIPrefix is the default prefix for the JetStream API.
DefaultAPIPrefix = "$JS.API."
// jsDomainT is used to create JetStream API prefix by specifying only Domain
jsDomainT = "$JS.%s.API."
// jsExtDomainT is used to create a StreamSource External APIPrefix
jsExtDomainT = "$JS.%s.API"
// apiAccountInfo is for obtaining general information about JetStream.
apiAccountInfo = "INFO"
// apiConsumerCreateT is used to create consumers.
apiConsumerCreateT = "CONSUMER.CREATE.%s.%s"
// apiConsumerCreateT is used to create consumers.
// it accepts stream name, consumer name and filter subject
apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s"
// apiConsumerInfoT is used to create consumers.
apiConsumerInfoT = "CONSUMER.INFO.%s.%s"
// apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode.
apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s"
// apiConsumerDeleteT is used to delete consumers.
apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s"
// apiConsumerPauseT is used to pause a consumer.
apiConsumerPauseT = "CONSUMER.PAUSE.%s.%s"
// apiConsumerListT is used to return all detailed consumer information
apiConsumerListT = "CONSUMER.LIST.%s"
// apiConsumerNamesT is used to return a list with all consumer names for the stream.
apiConsumerNamesT = "CONSUMER.NAMES.%s"
// apiStreams can lookup a stream by subject.
apiStreams = "STREAM.NAMES"
// apiStreamCreateT is the endpoint to create new streams.
apiStreamCreateT = "STREAM.CREATE.%s"
// apiStreamInfoT is the endpoint to get information on a stream.
apiStreamInfoT = "STREAM.INFO.%s"
// apiStreamUpdateT is the endpoint to update existing streams.
apiStreamUpdateT = "STREAM.UPDATE.%s"
// apiStreamDeleteT is the endpoint to delete streams.
apiStreamDeleteT = "STREAM.DELETE.%s"
// apiStreamPurgeT is the endpoint to purge streams.
apiStreamPurgeT = "STREAM.PURGE.%s"
// apiStreamListT is the endpoint that will return all detailed stream information
apiStreamListT = "STREAM.LIST"
// apiMsgGetT is the endpoint to get a message.
apiMsgGetT = "STREAM.MSG.GET.%s"
// apiMsgGetT is the endpoint to perform a direct get of a message.
apiDirectMsgGetT = "DIRECT.GET.%s"
// apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject.
apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s"
// apiMsgDeleteT is the endpoint to remove a message.
apiMsgDeleteT = "STREAM.MSG.DELETE.%s"
// apiConsumerUnpinT is the endpoint to unpin a consumer.
apiConsumerUnpinT = "CONSUMER.UNPIN.%s.%s"
)
func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) {
jsMsg, err := js.apiRequest(ctx, subject, data...)
if err != nil {
return nil, err
}
if err := json.Unmarshal(jsMsg.Data(), resp); err != nil {
return nil, err
}
return jsMsg, nil
}
// a RequestWithContext with tracing via TraceCB
func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) {
subj = js.apiSubject(subj)
var req []byte
if len(data) > 0 {
req = data[0]
}
if js.opts.clientTrace != nil {
ctrace := js.opts.clientTrace
if ctrace.RequestSent != nil {
ctrace.RequestSent(subj, req)
}
}
resp, err := js.conn.RequestWithContext(ctx, subj, req)
if err != nil {
return nil, err
}
if js.opts.clientTrace != nil {
ctrace := js.opts.clientTrace
if ctrace.ResponseReceived != nil {
ctrace.ResponseReceived(subj, resp.Data, resp.Header)
}
}
return js.toJSMsg(resp), nil
}
func (js *jetStream) apiSubject(subj string) string {
if js.opts.apiPrefix == "" {
return subj
}
var b strings.Builder
b.WriteString(js.opts.apiPrefix)
b.WriteString(subj)
return b.String()
}

410
vendor/github.com/nats-io/nats.go/jetstream/consumer.go generated vendored Normal file
View File

@@ -0,0 +1,410 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/nats-io/nats.go/internal/syncx"
"github.com/nats-io/nuid"
)
type (
// Consumer contains methods for fetching/processing messages from a stream,
// as well as fetching consumer info.
//
// This package provides two implementations of Consumer interface:
//
// - Standard named/ephemeral pull consumers. These consumers are created using
// CreateConsumer method on Stream or JetStream interface. They can be
// explicitly configured (using [ConsumerConfig]) and managed by the user,
// either from this package or externally.
//
// - Ordered consumers. These consumers are created using OrderedConsumer
// method on Stream or JetStream interface. They are managed by the library
// and provide a simple way to consume messages from a stream. Ordered
// consumers are ephemeral in-memory pull consumers and are resilient to
// deletes and restarts. They provide limited configuration options
// using [OrderedConsumerConfig].
//
// Consumer provides method for optimized continuous consumption of messages
// using Consume and Messages methods, as well as simple one-off messages
// retrieval using Fetch and Next methods.
Consumer interface {
// Fetch is used to retrieve up to a provided number of messages from a
// stream. This method will send a single request and deliver either all
// requested messages unless time out is met earlier. Fetch timeout
// defaults to 30 seconds and can be configured using FetchMaxWait
// option.
//
// By default, Fetch uses a 5s idle heartbeat for requests longer than
// 10 seconds. For shorter requests, the idle heartbeat is disabled.
// This can be configured using FetchHeartbeat option. If a client does
// not receive a heartbeat message from a stream for more than 2 times
// the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
//
// Fetch is non-blocking and returns MessageBatch, exposing a channel
// for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks. After the channel is closed,
// MessageBatch.Error() should be checked to see if there was an error
// during message delivery (e.g. missing heartbeat).
Fetch(batch int, opts ...FetchOpt) (MessageBatch, error)
// FetchBytes is used to retrieve up to a provided bytes from the
// stream. This method will send a single request and deliver the
// provided number of bytes unless time out is met earlier. FetchBytes
// timeout defaults to 30 seconds and can be configured using
// FetchMaxWait option.
//
// By default, FetchBytes uses a 5s idle heartbeat for requests longer than
// 10 seconds. For shorter requests, the idle heartbeat is disabled.
// This can be configured using FetchHeartbeat option. If a client does
// not receive a heartbeat message from a stream for more than 2 times
// the idle heartbeat setting, Fetch will return ErrNoHeartbeat.
//
// FetchBytes is non-blocking and returns MessageBatch, exposing a channel
// for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks. After the channel is closed,
// MessageBatch.Error() should be checked to see if there was an error
// during message delivery (e.g. missing heartbeat).
FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error)
// FetchNoWait is used to retrieve up to a provided number of messages
// from a stream. Unlike Fetch, FetchNoWait will only deliver messages
// that are currently available in the stream and will not wait for new
// messages to arrive, even if batch size is not met.
//
// FetchNoWait is non-blocking and returns MessageBatch, exposing a
// channel for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks. After the channel is closed,
// MessageBatch.Error() should be checked to see if there was an error
// during message delivery (e.g. missing heartbeat).
FetchNoWait(batch int) (MessageBatch, error)
// Consume will continuously receive messages and handle them
// with the provided callback function. Consume can be configured using
// PullConsumeOpt options:
//
// - Error handling and monitoring can be configured using ConsumeErrHandler
// option, which provides information about errors encountered during
// consumption (both transient and terminal)
// - Consume can be configured to stop after a certain number of
// messages is received using StopAfter option.
// - Consume can be optimized for throughput or memory usage using
// PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
// Unless there is a specific use case, these options should not be used.
//
// Consume returns a ConsumeContext, which can be used to stop or drain
// the consumer.
Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error)
// Messages returns MessagesContext, allowing continuously iterating
// over messages on a stream. Messages can be configured using
// PullMessagesOpt options:
//
// - Messages can be optimized for throughput or memory usage using
// PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
// Unless there is a specific use case, these options should not be used.
// - WithMessagesErrOnMissingHeartbeat can be used to enable/disable
// erroring out on MessagesContext.Next when a heartbeat is missing.
// This option is enabled by default.
Messages(opts ...PullMessagesOpt) (MessagesContext, error)
// Next is used to retrieve the next message from the consumer. This
// method will block until the message is retrieved or timeout is
// reached.
Next(opts ...FetchOpt) (Msg, error)
// Info fetches current ConsumerInfo from the server.
Info(context.Context) (*ConsumerInfo, error)
// CachedInfo returns ConsumerInfo currently cached on this consumer.
// This method does not perform any network requests. The cached
// ConsumerInfo is updated on every call to Info and Update.
CachedInfo() *ConsumerInfo
}
createConsumerRequest struct {
Stream string `json:"stream_name"`
Config *ConsumerConfig `json:"config"`
Action string `json:"action"`
}
)
// Info fetches current ConsumerInfo from the server.
func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
ctx, cancel := p.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
infoSubject := fmt.Sprintf(apiConsumerInfoT, p.stream, p.name)
var resp consumerInfoResponse
if _, err := p.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
p.info = resp.ConsumerInfo
return resp.ConsumerInfo, nil
}
// CachedInfo returns ConsumerInfo currently cached on this consumer.
// This method does not perform any network requests. The cached
// ConsumerInfo is updated on every call to Info and Update.
func (p *pullConsumer) CachedInfo() *ConsumerInfo {
return p.info
}
func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
req := createConsumerRequest{
Stream: stream,
Config: &cfg,
Action: action,
}
reqJSON, err := json.Marshal(req)
if err != nil {
return nil, err
}
consumerName := cfg.Name
if consumerName == "" {
if cfg.Durable != "" {
consumerName = cfg.Durable
} else {
consumerName = generateConsName()
}
}
if err := validateConsumerName(consumerName); err != nil {
return nil, err
}
var ccSubj string
if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 {
if err := validateSubject(cfg.FilterSubject); err != nil {
return nil, err
}
ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject)
} else {
ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName)
}
var resp consumerInfoResponse
if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
return nil, ErrStreamNotFound
}
return nil, resp.Error
}
// check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo
if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 {
return nil, ErrConsumerMultipleFilterSubjectsNotSupported
}
return &pullConsumer{
js: js,
stream: stream,
name: resp.Name,
durable: cfg.Durable != "",
info: resp.ConsumerInfo,
subs: syncx.Map[string, *pullSubscription]{},
}, nil
}
const (
consumerActionCreate = "create"
consumerActionUpdate = "update"
consumerActionCreateOrUpdate = ""
)
func generateConsName() string {
name := nuid.Next()
sha := sha256.New()
sha.Write([]byte(name))
b := sha.Sum(nil)
for i := 0; i < 8; i++ {
b[i] = rdigits[int(b[i]%base)]
}
return string(b[:8])
}
func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(name); err != nil {
return nil, err
}
infoSubject := fmt.Sprintf(apiConsumerInfoT, stream, name)
var resp consumerInfoResponse
if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
cons := &pullConsumer{
js: js,
stream: stream,
name: name,
durable: resp.Config.Durable != "",
info: resp.ConsumerInfo,
subs: syncx.Map[string, *pullSubscription]{},
}
return cons, nil
}
func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(consumer); err != nil {
return err
}
deleteSubject := fmt.Sprintf(apiConsumerDeleteT, stream, consumer)
var resp consumerDeleteResponse
if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil {
return err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return ErrConsumerNotFound
}
return resp.Error
}
return nil
}
func pauseConsumer(ctx context.Context, js *jetStream, stream, consumer string, pauseUntil *time.Time) (*ConsumerPauseResponse, error) {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(consumer); err != nil {
return nil, err
}
subject := fmt.Sprintf(apiConsumerPauseT, stream, consumer)
var resp consumerPauseApiResponse
req, err := json.Marshal(consumerPauseRequest{
PauseUntil: pauseUntil,
})
if err != nil {
return nil, err
}
if _, err := js.apiRequestJSON(ctx, subject, &resp, req); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
return &ConsumerPauseResponse{
Paused: resp.Paused,
PauseUntil: resp.PauseUntil,
PauseRemaining: resp.PauseRemaining,
}, nil
}
func resumeConsumer(ctx context.Context, js *jetStream, stream, consumer string) (*ConsumerPauseResponse, error) {
return pauseConsumer(ctx, js, stream, consumer, nil)
}
func validateConsumerName(dur string) error {
if dur == "" {
return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, "name is required")
}
if strings.ContainsAny(dur, ">*. /\\") {
return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, dur)
}
return nil
}
func unpinConsumer(ctx context.Context, js *jetStream, stream, consumer, group string) error {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(consumer); err != nil {
return err
}
unpinSubject := fmt.Sprintf(apiConsumerUnpinT, stream, consumer)
var req = consumerUnpinRequest{
Group: group,
}
reqJSON, err := json.Marshal(req)
if err != nil {
return err
}
var resp apiResponse
if _, err := js.apiRequestJSON(ctx, unpinSubject, &resp, reqJSON); err != nil {
return err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return ErrConsumerNotFound
}
return resp.Error
}
return nil
}

View File

@@ -0,0 +1,544 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"encoding/json"
"fmt"
"time"
)
type (
// ConsumerInfo is the detailed information about a JetStream consumer.
ConsumerInfo struct {
// Stream specifies the name of the stream that the consumer is bound
// to.
Stream string `json:"stream_name"`
// Name represents the unique identifier for the consumer. This can be
// either set explicitly by the client or generated automatically if not
// set.
Name string `json:"name"`
// Created is the timestamp when the consumer was created.
Created time.Time `json:"created"`
// Config contains the configuration settings of the consumer, set when
// creating or updating the consumer.
Config ConsumerConfig `json:"config"`
// Delivered holds information about the most recently delivered
// message, including its sequence numbers and timestamp.
Delivered SequenceInfo `json:"delivered"`
// AckFloor indicates the message before the first unacknowledged
// message.
AckFloor SequenceInfo `json:"ack_floor"`
// NumAckPending is the number of messages that have been delivered but
// not yet acknowledged.
NumAckPending int `json:"num_ack_pending"`
// NumRedelivered counts the number of messages that have been
// redelivered and not yet acknowledged. Each message is counted only
// once, even if it has been redelivered multiple times. This count is
// reset when the message is eventually acknowledged.
NumRedelivered int `json:"num_redelivered"`
// NumWaiting is the count of active pull requests. It is only relevant
// for pull-based consumers.
NumWaiting int `json:"num_waiting"`
// NumPending is the number of messages that match the consumer's
// filter, but have not been delivered yet.
NumPending uint64 `json:"num_pending"`
// Cluster contains information about the cluster to which this consumer
// belongs (if applicable).
Cluster *ClusterInfo `json:"cluster,omitempty"`
// PushBound indicates whether at least one subscription exists for the
// delivery subject of this consumer. This is only applicable to
// push-based consumers.
PushBound bool `json:"push_bound,omitempty"`
// TimeStamp indicates when the info was gathered by the server.
TimeStamp time.Time `json:"ts"`
// PriorityGroups contains the information about the currently defined priority groups
PriorityGroups []PriorityGroupState `json:"priority_groups,omitempty"`
// Paused indicates whether the consumer is paused.
Paused bool `json:"paused,omitempty"`
// PauseRemaining contains the amount of time left until the consumer
// unpauses. It will only be non-zero if the consumer is currently paused.
PauseRemaining time.Duration `json:"pause_remaining,omitempty"`
}
PriorityGroupState struct {
// Group this status is for.
Group string `json:"group"`
// PinnedClientID is the generated ID of the pinned client.
PinnedClientID string `json:"pinned_client_id,omitempty"`
// PinnedTS is the timestamp when the client was pinned.
PinnedTS time.Time `json:"pinned_ts,omitempty"`
}
// ConsumerConfig is the configuration of a JetStream consumer.
ConsumerConfig struct {
// Name is an optional name for the consumer. If not set, one is
// generated automatically.
//
// Name cannot contain whitespace, ., *, >, path separators (forward or
// backwards slash), and non-printable characters.
Name string `json:"name,omitempty"`
// Durable is an optional durable name for the consumer. If both Durable
// and Name are set, they have to be equal. Unless InactiveThreshold is set, a
// durable consumer will not be cleaned up automatically.
//
// Durable cannot contain whitespace, ., *, >, path separators (forward or
// backwards slash), and non-printable characters.
Durable string `json:"durable_name,omitempty"`
// Description provides an optional description of the consumer.
Description string `json:"description,omitempty"`
// DeliverPolicy defines from which point to start delivering messages
// from the stream. Defaults to DeliverAllPolicy.
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
// OptStartSeq is an optional sequence number from which to start
// message delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartSequencePolicy.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is an optional time from which to start message
// delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartTimePolicy.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// AckPolicy defines the acknowledgement policy for the consumer.
// Defaults to AckExplicitPolicy.
AckPolicy AckPolicy `json:"ack_policy"`
// AckWait defines how long the server will wait for an acknowledgement
// before resending a message. If not set, server default is 30 seconds.
AckWait time.Duration `json:"ack_wait,omitempty"`
// MaxDeliver defines the maximum number of delivery attempts for a
// message. Applies to any message that is re-sent due to ack policy.
// If not set, server default is -1 (unlimited).
MaxDeliver int `json:"max_deliver,omitempty"`
// BackOff specifies the optional back-off intervals for retrying
// message delivery after a failed acknowledgement. It overrides
// AckWait.
//
// BackOff only applies to messages not acknowledged in specified time,
// not messages that were nack'ed.
//
// The number of intervals specified must be lower or equal to
// MaxDeliver. If the number of intervals is lower, the last interval is
// used for all remaining attempts.
BackOff []time.Duration `json:"backoff,omitempty"`
// FilterSubject can be used to filter messages delivered from the
// stream. FilterSubject is exclusive with FilterSubjects.
FilterSubject string `json:"filter_subject,omitempty"`
// ReplayPolicy defines the rate at which messages are sent to the
// consumer. If ReplayOriginalPolicy is set, messages are sent in the
// same intervals in which they were stored on stream. This can be used
// e.g. to simulate production traffic in development environments. If
// ReplayInstantPolicy is set, messages are sent as fast as possible.
// Defaults to ReplayInstantPolicy.
ReplayPolicy ReplayPolicy `json:"replay_policy"`
// RateLimit specifies an optional maximum rate of message delivery in
// bits per second.
RateLimit uint64 `json:"rate_limit_bps,omitempty"`
// SampleFrequency is an optional frequency for sampling how often
// acknowledgements are sampled for observability. See
// https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream
SampleFrequency string `json:"sample_freq,omitempty"`
// MaxWaiting is a maximum number of pull requests waiting to be
// fulfilled. If not set, this will inherit settings from stream's
// ConsumerLimits or (if those are not set) from account settings. If
// neither are set, server default is 512.
MaxWaiting int `json:"max_waiting,omitempty"`
// MaxAckPending is a maximum number of outstanding unacknowledged
// messages. Once this limit is reached, the server will suspend sending
// messages to the consumer. If not set, server default is 1000.
// Set to -1 for unlimited.
MaxAckPending int `json:"max_ack_pending,omitempty"`
// HeadersOnly indicates whether only headers of messages should be sent
// (and no payload). Defaults to false.
HeadersOnly bool `json:"headers_only,omitempty"`
// MaxRequestBatch is the optional maximum batch size a single pull
// request can make. When set with MaxRequestMaxBytes, the batch size
// will be constrained by whichever limit is hit first.
MaxRequestBatch int `json:"max_batch,omitempty"`
// MaxRequestExpires is the maximum duration a single pull request will
// wait for messages to be available to pull.
MaxRequestExpires time.Duration `json:"max_expires,omitempty"`
// MaxRequestMaxBytes is the optional maximum total bytes that can be
// requested in a given batch. When set with MaxRequestBatch, the batch
// size will be constrained by whichever limit is hit first.
MaxRequestMaxBytes int `json:"max_bytes,omitempty"`
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
// Durable consumers will not be cleaned up by default, but if
// InactiveThreshold is set, they will be. If not set, this will inherit
// settings from stream's ConsumerLimits. If neither are set, server
// default is 5 seconds.
//
// A consumer is considered inactive there are not pull requests
// received by the server (for pull consumers), or no interest detected
// on deliver subject (for push consumers), not if there are no
// messages to be delivered.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// Replicas the number of replicas for the consumer's state. By default,
// consumers inherit the number of replicas from the stream.
Replicas int `json:"num_replicas"`
// MemoryStorage is a flag to force the consumer to use memory storage
// rather than inherit the storage type from the stream.
MemoryStorage bool `json:"mem_storage,omitempty"`
// FilterSubjects allows filtering messages from a stream by subject.
// This field is exclusive with FilterSubject. Requires nats-server
// v2.10.0 or later.
FilterSubjects []string `json:"filter_subjects,omitempty"`
// Metadata is a set of application-defined key-value pairs for
// associating metadata on the consumer. This feature requires
// nats-server v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
// PauseUntil is for suspending the consumer until the deadline.
PauseUntil *time.Time `json:"pause_until,omitempty"`
// PriorityPolicy represents he priority policy the consumer is set to.
// Requires nats-server v2.11.0 or later.
PriorityPolicy PriorityPolicy `json:"priority_policy,omitempty"`
// PinnedTTL represents the time after which the client will be unpinned
// if no new pull requests are sent.Used with PriorityPolicyPinned.
// Requires nats-server v2.11.0 or later.
PinnedTTL time.Duration `json:"priority_timeout,omitempty"`
// PriorityGroups is a list of priority groups this consumer supports.
PriorityGroups []string `json:"priority_groups,omitempty"`
}
// OrderedConsumerConfig is the configuration of an ordered JetStream
// consumer. For more information, see [Ordered Consumers] in README
//
// [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers
OrderedConsumerConfig struct {
// FilterSubjects allows filtering messages from a stream by subject.
// This field is exclusive with FilterSubject. Requires nats-server
// v2.10.0 or later.
FilterSubjects []string `json:"filter_subjects,omitempty"`
// DeliverPolicy defines from which point to start delivering messages
// from the stream. Defaults to DeliverAllPolicy.
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
// OptStartSeq is an optional sequence number from which to start
// message delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartSequencePolicy.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is an optional time from which to start message
// delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartTimePolicy.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// ReplayPolicy defines the rate at which messages are sent to the
// consumer. If ReplayOriginalPolicy is set, messages are sent in the
// same intervals in which they were stored on stream. This can be used
// e.g. to simulate production traffic in development environments. If
// ReplayInstantPolicy is set, messages are sent as fast as possible.
// Defaults to ReplayInstantPolicy.
ReplayPolicy ReplayPolicy `json:"replay_policy"`
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
// Defaults to 5m.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// HeadersOnly indicates whether only headers of messages should be sent
// (and no payload). Defaults to false.
HeadersOnly bool `json:"headers_only,omitempty"`
// Maximum number of attempts for the consumer to be recreated in a
// single recreation cycle. Defaults to unlimited.
MaxResetAttempts int
// Metadata is a set of application-defined key-value pairs for
// associating metadata on the consumer. This feature requires
// nats-server v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
}
// DeliverPolicy determines from which point to start delivering messages.
DeliverPolicy int
// AckPolicy determines how the consumer should acknowledge delivered
// messages.
AckPolicy int
// ReplayPolicy determines how the consumer should replay messages it
// already has queued in the stream.
ReplayPolicy int
// SequenceInfo has both the consumer and the stream sequence and last
// activity.
SequenceInfo struct {
Consumer uint64 `json:"consumer_seq"`
Stream uint64 `json:"stream_seq"`
Last *time.Time `json:"last_active,omitempty"`
}
// PriorityPolicy determines the priority policy the consumer is set to.
PriorityPolicy int
)
const (
// PriorityPolicyNone is the default priority policy.
PriorityPolicyNone PriorityPolicy = iota
// PriorityPolicyPinned is the priority policy that pins a consumer to a
// specific client.
PriorityPolicyPinned
// PriorityPolicyOverflow is the priority policy that allows for
// restricting when a consumer will receive messages based on the number of
// pending messages or acks.
PriorityPolicyOverflow
)
func (p *PriorityPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(""):
*p = PriorityPolicyNone
case jsonString("pinned_client"):
*p = PriorityPolicyPinned
case jsonString("overflow"):
*p = PriorityPolicyOverflow
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p PriorityPolicy) MarshalJSON() ([]byte, error) {
switch p {
case PriorityPolicyNone:
return json.Marshal("")
case PriorityPolicyPinned:
return json.Marshal("pinned_client")
case PriorityPolicyOverflow:
return json.Marshal("overflow")
}
return nil, fmt.Errorf("nats: unknown priority policy %v", p)
}
const (
// DeliverAllPolicy starts delivering messages from the very beginning of a
// stream. This is the default.
DeliverAllPolicy DeliverPolicy = iota
// DeliverLastPolicy will start the consumer with the last sequence
// received.
DeliverLastPolicy
// DeliverNewPolicy will only deliver new messages that are sent after the
// consumer is created.
DeliverNewPolicy
// DeliverByStartSequencePolicy will deliver messages starting from a given
// sequence configured with OptStartSeq in ConsumerConfig.
DeliverByStartSequencePolicy
// DeliverByStartTimePolicy will deliver messages starting from a given time
// configured with OptStartTime in ConsumerConfig.
DeliverByStartTimePolicy
// DeliverLastPerSubjectPolicy will start the consumer with the last message
// for all subjects received.
DeliverLastPerSubjectPolicy
)
func (p *DeliverPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("all"), jsonString("undefined"):
*p = DeliverAllPolicy
case jsonString("last"):
*p = DeliverLastPolicy
case jsonString("new"):
*p = DeliverNewPolicy
case jsonString("by_start_sequence"):
*p = DeliverByStartSequencePolicy
case jsonString("by_start_time"):
*p = DeliverByStartTimePolicy
case jsonString("last_per_subject"):
*p = DeliverLastPerSubjectPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p DeliverPolicy) MarshalJSON() ([]byte, error) {
switch p {
case DeliverAllPolicy:
return json.Marshal("all")
case DeliverLastPolicy:
return json.Marshal("last")
case DeliverNewPolicy:
return json.Marshal("new")
case DeliverByStartSequencePolicy:
return json.Marshal("by_start_sequence")
case DeliverByStartTimePolicy:
return json.Marshal("by_start_time")
case DeliverLastPerSubjectPolicy:
return json.Marshal("last_per_subject")
}
return nil, fmt.Errorf("nats: unknown deliver policy %v", p)
}
func (p DeliverPolicy) String() string {
switch p {
case DeliverAllPolicy:
return "all"
case DeliverLastPolicy:
return "last"
case DeliverNewPolicy:
return "new"
case DeliverByStartSequencePolicy:
return "by_start_sequence"
case DeliverByStartTimePolicy:
return "by_start_time"
case DeliverLastPerSubjectPolicy:
return "last_per_subject"
}
return ""
}
const (
// AckExplicitPolicy requires ack or nack for all messages.
AckExplicitPolicy AckPolicy = iota
// AckAllPolicy when acking a sequence number, this implicitly acks all
// sequences below this one as well.
AckAllPolicy
// AckNonePolicy requires no acks for delivered messages.
AckNonePolicy
)
func (p *AckPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("none"):
*p = AckNonePolicy
case jsonString("all"):
*p = AckAllPolicy
case jsonString("explicit"):
*p = AckExplicitPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p AckPolicy) MarshalJSON() ([]byte, error) {
switch p {
case AckNonePolicy:
return json.Marshal("none")
case AckAllPolicy:
return json.Marshal("all")
case AckExplicitPolicy:
return json.Marshal("explicit")
}
return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p)
}
func (p AckPolicy) String() string {
switch p {
case AckNonePolicy:
return "AckNone"
case AckAllPolicy:
return "AckAll"
case AckExplicitPolicy:
return "AckExplicit"
}
return "Unknown AckPolicy"
}
const (
// ReplayInstantPolicy will replay messages as fast as possible.
ReplayInstantPolicy ReplayPolicy = iota
// ReplayOriginalPolicy will maintain the same timing as the messages were
// received.
ReplayOriginalPolicy
)
func (p *ReplayPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("instant"):
*p = ReplayInstantPolicy
case jsonString("original"):
*p = ReplayOriginalPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p ReplayPolicy) MarshalJSON() ([]byte, error) {
switch p {
case ReplayOriginalPolicy:
return json.Marshal("original")
case ReplayInstantPolicy:
return json.Marshal("instant")
}
return nil, fmt.Errorf("nats: unknown replay policy %v", p)
}
func (p ReplayPolicy) String() string {
switch p {
case ReplayOriginalPolicy:
return "original"
case ReplayInstantPolicy:
return "instant"
}
return ""
}

444
vendor/github.com/nats-io/nats.go/jetstream/errors.go generated vendored Normal file
View File

@@ -0,0 +1,444 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"errors"
"fmt"
)
type (
// JetStreamError is an error result that happens when using JetStream.
// In case of client-side error, [APIError] returns nil.
JetStreamError interface {
APIError() *APIError
error
}
jsError struct {
apiErr *APIError
message string
}
// APIError is included in all API responses if there was an error.
APIError struct {
Code int `json:"code"`
ErrorCode ErrorCode `json:"err_code"`
Description string `json:"description,omitempty"`
}
// ErrorCode represents error_code returned in response from JetStream API.
ErrorCode uint16
)
const (
JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
JSErrCodeJetStreamNotEnabled ErrorCode = 10076
JSErrCodeStreamNotFound ErrorCode = 10059
JSErrCodeStreamNameInUse ErrorCode = 10058
JSErrCodeConsumerCreate ErrorCode = 10012
JSErrCodeConsumerNotFound ErrorCode = 10014
JSErrCodeConsumerNameExists ErrorCode = 10013
JSErrCodeConsumerAlreadyExists ErrorCode = 10105
JSErrCodeConsumerExists ErrorCode = 10148
JSErrCodeDuplicateFilterSubjects ErrorCode = 10136
JSErrCodeOverlappingFilterSubjects ErrorCode = 10138
JSErrCodeConsumerEmptyFilter ErrorCode = 10139
JSErrCodeConsumerDoesNotExist ErrorCode = 10149
JSErrCodeMessageNotFound ErrorCode = 10037
JSErrCodeBadRequest ErrorCode = 10003
JSErrCodeStreamWrongLastSequence ErrorCode = 10071
)
var (
// JetStream API errors
// ErrJetStreamNotEnabled is an error returned when JetStream is not
// enabled.
//
// Note: This error will not be returned in clustered mode, even if each
// server in the cluster does not have JetStream enabled. In clustered mode,
// requests will time out instead.
ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}}
// ErrJetStreamNotEnabledForAccount is an error returned when JetStream is
// not enabled for an account.
ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}}
// ErrStreamNotFound is an error returned when stream with given name does
// not exist.
ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}}
// ErrStreamNameAlreadyInUse is returned when a stream with given name
// already exists and has a different configuration.
ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}}
// ErrStreamSubjectTransformNotSupported is returned when the connected
// nats-server version does not support setting the stream subject
// transform. If this error is returned when executing CreateStream(), the
// stream with invalid configuration was already created in the server.
ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceSubjectTransformNotSupported is returned when the
// connected nats-server version does not support setting the stream source
// subject transform. If this error is returned when executing
// CreateStream(), the stream with invalid configuration was already created
// in the server.
ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceNotSupported is returned when the connected nats-server
// version does not support setting the stream sources. If this error is
// returned when executing CreateStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"}
// ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the
// connected nats-server version does not support setting the stream
// sources. If this error is returned when executing CreateStream(), the
// stream with invalid configuration was already created in the server.
ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"}
// ErrConsumerNotFound is an error returned when consumer with given name
// does not exist.
ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}}
// ErrConsumerExists is returned when attempting to create a consumer with
// CreateConsumer but a consumer with given name already exists.
ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}}
// ErrConsumerNameExists is returned when attempting to update a consumer
// with UpdateConsumer but a consumer with given name does not exist.
ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}}
// ErrMsgNotFound is returned when message with provided sequence number
// does not exist.
ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}}
// ErrBadRequest is returned when invalid request is sent to JetStream API.
ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}}
// ErrConsumerCreate is returned when nats-server reports error when
// creating consumer (e.g. illegal update).
ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}}
// ErrDuplicateFilterSubjects is returned when both FilterSubject and
// FilterSubjects are specified when creating consumer.
ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}}
// ErrDuplicateFilterSubjects is returned when filter subjects overlap when
// creating consumer.
ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}}
// ErrEmptyFilter is returned when a filter in FilterSubjects is empty.
ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}}
// Client errors
// ErrConsumerMultipleFilterSubjectsNotSupported is returned when the
// connected nats-server version does not support setting multiple filter
// subjects with filter_subjects field. If this error is returned when
// executing AddConsumer(), the consumer with invalid configuration was
// already created in the server.
ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"}
// ErrConsumerNotFound is an error returned when consumer with given name
// does not exist.
ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"}
// ErrInvalidJSAck is returned when JetStream ack from message publish is
// invalid.
ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"}
// ErrStreamNameRequired is returned when the provided stream name is empty.
ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"}
// ErrMsgAlreadyAckd is returned when attempting to acknowledge message more
// than once.
ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"}
// ErrNoStreamResponse is returned when there is no response from stream
// (e.g. no responders error).
ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"}
// ErrNotJSMessage is returned when attempting to get metadata from non
// JetStream message.
ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"}
// ErrInvalidStreamName is returned when the provided stream name is invalid
// (contains '.').
ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"}
// ErrInvalidSubject is returned when the provided subject name is invalid.
ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"}
// ErrInvalidConsumerName is returned when the provided consumer name is
// invalid (contains '.').
ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"}
// ErrNoMessages is returned when no messages are currently available for a
// consumer.
ErrNoMessages JetStreamError = &jsError{message: "no messages"}
// ErrPinIDMismatch is returned when Pin ID sent in the request does not match
// the currently pinned consumer subscriber ID on the server.
ErrPinIDMismatch JetStreamError = &jsError{message: "pin ID mismatch"}
// ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set
// on a pull request.
ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"}
// ErrBatchCompleted is returned when a fetch request sent the whole batch,
// but there are still bytes left. This is applicable only when MaxBytes is
// set on a pull request.
ErrBatchCompleted JetStreamError = &jsError{message: "batch completed"}
// ErrConsumerDeleted is returned when attempting to send pull request to a
// consumer which does not exist.
ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"}
// ErrConsumerLeadershipChanged is returned when pending requests are no
// longer valid after leadership has changed.
ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"}
// ErrHandlerRequired is returned when no handler func is provided in
// Stream().
ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"}
// ErrEndOfData is returned when iterating over paged API from JetStream
// reaches end of data.
ErrEndOfData JetStreamError = &jsError{message: "end of data reached"}
// ErrNoHeartbeat is received when no message is received in IdleHeartbeat
// time (if set).
ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"}
// ErrConsumerHasActiveSubscription is returned when a consumer is already
// subscribed to a stream.
ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"}
// ErrMsgNotBound is returned when given message is not bound to any
// subscription.
ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"}
// ErrMsgNoReply is returned when attempting to reply to a message without a
// reply subject.
ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"}
// ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message
// is unsuccessful.
ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"}
// ErrAsyncPublishReplySubjectSet is returned when reply subject is set on
// async message publish.
ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"}
// ErrTooManyStalledMsgs is returned when too many outstanding async
// messages are waiting for ack.
ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"}
// ErrInvalidOption is returned when there is a collision between options.
ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"}
// ErrMsgIteratorClosed is returned when attempting to get message from a
// closed iterator.
ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"}
// ErrOrderedConsumerReset is returned when resetting ordered consumer fails
// due to too many attempts.
ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"}
// ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already
// used to process messages using Fetch (or FetchBytes).
ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"}
// ErrOrderConsumerUsedAsConsume is returned when ordered consumer was
// already used to process messages using Consume or Messages.
ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"}
// ErrOrderedConsumerConcurrentRequests is returned when attempting to run
// concurrent operations on ordered consumers.
ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"}
// ErrOrderedConsumerNotCreated is returned when trying to get consumer info
// of an ordered consumer which was not yet created.
ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"}
// ErrJetStreamPublisherClosed is returned for each unfinished ack future when JetStream.Cleanup is called.
ErrJetStreamPublisherClosed JetStreamError = &jsError{message: "jetstream context closed"}
// ErrAsyncPublishTimeout is returned when waiting for ack on async publish
ErrAsyncPublishTimeout JetStreamError = &jsError{message: "timeout waiting for ack"}
// KeyValue Errors
// ErrKeyExists is returned when attempting to create a key that already
// exists.
ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"}
// ErrKeyValueConfigRequired is returned when attempting to create a bucket
// without a config.
ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"}
// ErrInvalidBucketName is returned when attempting to create a bucket with
// an invalid name.
ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"}
// ErrInvalidKey is returned when attempting to create a key with an invalid
// name.
ErrInvalidKey JetStreamError = &jsError{message: "invalid key"}
// ErrBucketExists is returned when attempting to create a bucket that
// already exists and has a different configuration.
ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"}
// ErrBucketNotFound is returned when attempting to access a bucket that
// does not exist.
ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"}
// ErrBadBucket is returned when attempting to access a bucket that is not a
// key-value store.
ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"}
// ErrKeyNotFound is returned when attempting to access a key that does not
// exist.
ErrKeyNotFound JetStreamError = &jsError{message: "key not found"}
// ErrKeyDeleted is returned when attempting to access a key that was
// deleted.
ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"}
// ErrHistoryToLarge is returned when provided history limit is larger than
// 64.
ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"}
// ErrNoKeysFound is returned when no keys are found.
ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"}
// ErrTTLOnDeleteNotSupported is returned when attempting to set a TTL
// on a delete operation.
ErrTTLOnDeleteNotSupported JetStreamError = &jsError{message: "TTL is not supported on delete"}
// ErrLimitMarkerTTLNotSupported is returned when the connected jetstream API
// does not support setting the LimitMarkerTTL.
ErrLimitMarkerTTLNotSupported JetStreamError = &jsError{message: "limit marker TTLs not supported by server"}
// ErrObjectConfigRequired is returned when attempting to create an object
// without a config.
ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"}
// ErrBadObjectMeta is returned when the meta information of an object is
// invalid.
ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"}
// ErrObjectNotFound is returned when an object is not found.
ErrObjectNotFound JetStreamError = &jsError{message: "object not found"}
// ErrInvalidStoreName is returned when the name of an object-store is
// invalid.
ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"}
// ErrDigestMismatch is returned when the digests of an object do not match.
ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"}
// ErrInvalidDigestFormat is returned when the digest hash of an object has
// an invalid format.
ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"}
// ErrNoObjectsFound is returned when no objects are found.
ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"}
// ErrObjectAlreadyExists is returned when an object with the same name
// already exists.
ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"}
// ErrNameRequired is returned when a name is required.
ErrNameRequired JetStreamError = &jsError{message: "name is required"}
// ErrLinkNotAllowed is returned when a link cannot be set when putting the
// object in a bucket.
ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"}
// ErrObjectRequired is returned when an object is required.
ErrObjectRequired = &jsError{message: "object required"}
// ErrNoLinkToDeleted is returned when it is not allowed to link to a
// deleted object.
ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"}
// ErrNoLinkToLink is returned when it is not allowed to link to another
// link.
ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"}
// ErrCantGetBucket is returned when an invalid Get is attempted on an
// object that is a link to a bucket.
ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"}
// ErrBucketRequired is returned when a bucket is required.
ErrBucketRequired JetStreamError = &jsError{message: "bucket required"}
// ErrBucketMalformed is returned when a bucket is malformed.
ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"}
// ErrUpdateMetaDeleted is returned when the meta information of a deleted
// object cannot be updated.
ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"}
)
// Error prints the JetStream API error code and description.
func (e *APIError) Error() string {
return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description)
}
// APIError implements the JetStreamError interface.
func (e *APIError) APIError() *APIError {
return e
}
// Is matches against an APIError.
func (e *APIError) Is(err error) bool {
if e == nil {
return false
}
// Extract internal APIError to match against.
var aerr *APIError
ok := errors.As(err, &aerr)
if !ok {
return ok
}
return e.ErrorCode == aerr.ErrorCode
}
func (err *jsError) APIError() *APIError {
return err.apiErr
}
func (err *jsError) Error() string {
if err.apiErr != nil && err.apiErr.Description != "" {
return err.apiErr.Error()
}
return fmt.Sprintf("nats: %s", err.message)
}
func (err *jsError) Unwrap() error {
// Allow matching to embedded APIError in case there is one.
if err.apiErr == nil {
return nil
}
return err.apiErr
}

1156
vendor/github.com/nats-io/nats.go/jetstream/jetstream.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,630 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"fmt"
"time"
)
type pullOptFunc func(*consumeOpts) error
func (fn pullOptFunc) configureConsume(opts *consumeOpts) error {
return fn(opts)
}
func (fn pullOptFunc) configureMessages(opts *consumeOpts) error {
return fn(opts)
}
// WithClientTrace enables request/response API calls tracing.
func WithClientTrace(ct *ClientTrace) JetStreamOpt {
return func(opts *JetStreamOptions) error {
opts.clientTrace = ct
return nil
}
}
// WithPublishAsyncErrHandler sets error handler for async message publish.
func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt {
return func(opts *JetStreamOptions) error {
opts.publisherOpts.aecb = cb
return nil
}
}
// WithPublishAsyncMaxPending sets the maximum outstanding async publishes that
// can be inflight at one time.
func WithPublishAsyncMaxPending(max int) JetStreamOpt {
return func(opts *JetStreamOptions) error {
if max < 1 {
return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption)
}
opts.publisherOpts.maxpa = max
return nil
}
}
// WithPublishAsyncTimeout sets the timeout for async message publish.
// If not provided, timeout is disabled.
func WithPublishAsyncTimeout(dur time.Duration) JetStreamOpt {
return func(opts *JetStreamOptions) error {
opts.publisherOpts.ackTimeout = dur
return nil
}
}
// WithDefaultTimeout sets the default timeout for JetStream API requests.
// It is used when context used for the request does not have a deadline set.
// If not provided, a default of 5 seconds will be used.
func WithDefaultTimeout(timeout time.Duration) JetStreamOpt {
return func(opts *JetStreamOptions) error {
if timeout <= 0 {
return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
}
opts.DefaultTimeout = timeout
return nil
}
}
// WithPurgeSubject sets a specific subject for which messages on a stream will
// be purged
func WithPurgeSubject(subject string) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
req.Subject = subject
return nil
}
}
// WithPurgeSequence is used to set a specific sequence number up to which (but
// not including) messages will be purged from a stream Can be combined with
// [WithPurgeSubject] option, but not with [WithPurgeKeep]
func WithPurgeSequence(sequence uint64) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
if req.Keep != 0 {
return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
}
req.Sequence = sequence
return nil
}
}
// WithPurgeKeep sets the number of messages to be kept in the stream after
// purge. Can be combined with [WithPurgeSubject] option, but not with
// [WithPurgeSequence]
func WithPurgeKeep(keep uint64) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
if req.Sequence != 0 {
return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
}
req.Keep = keep
return nil
}
}
// WithGetMsgSubject sets the stream subject from which the message should be
// retrieved. Server will return a first message with a seq >= to the input seq
// that has the specified subject.
func WithGetMsgSubject(subject string) GetMsgOpt {
return func(req *apiMsgGetRequest) error {
req.NextFor = subject
return nil
}
}
// PullMaxMessages limits the number of messages to be buffered in the client.
// If not provided, a default of 500 messages will be used.
// This option is exclusive with PullMaxBytes.
//
// PullMaxMessages implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullMaxMessages int
func (max PullMaxMessages) configureConsume(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
opts.MaxMessages = int(max)
return nil
}
func (max PullMaxMessages) configureMessages(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
opts.MaxMessages = int(max)
return nil
}
type pullMaxMessagesWithBytesLimit struct {
maxMessages int
maxBytes int
}
// PullMaxMessagesWithBytesLimit limits the number of messages to be buffered
// in the client. Additionally, it sets the maximum size a single fetch request
// can have. Note that this will not limit the total size of messages buffered
// in the client, but rather can serve as a way to limit what nats server will
// have to internally buffer for a single fetch request.
//
// The byte limit should never be set to a value lower than the maximum message
// size that can be expected from the server. If the byte limit is lower than
// the maximum message size, the consumer will stall and not be able to consume
// messages.
//
// This is an advanced option and should be used with caution. Most users should
// use [PullMaxMessages] or [PullMaxBytes] instead.
//
// PullMaxMessagesWithBytesLimit implements both PullConsumeOpt and
// PullMessagesOpt, allowing it to configure Consumer.Consume and Consumer.Messages.
func PullMaxMessagesWithBytesLimit(maxMessages, byteLimit int) pullMaxMessagesWithBytesLimit {
return pullMaxMessagesWithBytesLimit{maxMessages, byteLimit}
}
func (m pullMaxMessagesWithBytesLimit) configureConsume(opts *consumeOpts) error {
if m.maxMessages <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
if m.maxBytes <= 0 {
return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption)
}
if opts.MaxMessages > 0 {
return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption)
}
opts.MaxMessages = m.maxMessages
opts.MaxBytes = m.maxBytes
opts.LimitSize = true
return nil
}
func (m pullMaxMessagesWithBytesLimit) configureMessages(opts *consumeOpts) error {
if m.maxMessages <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
if m.maxBytes <= 0 {
return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption)
}
if opts.MaxMessages > 0 {
return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption)
}
opts.MaxMessages = m.maxMessages
opts.MaxBytes = m.maxBytes
opts.LimitSize = true
return nil
}
// PullExpiry sets timeout on a single pull request, waiting until at least one
// message is available.
// If not provided, a default of 30 seconds will be used.
//
// PullExpiry implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullExpiry time.Duration
func (exp PullExpiry) configureConsume(opts *consumeOpts) error {
expiry := time.Duration(exp)
if expiry < time.Second {
return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
}
opts.Expires = expiry
return nil
}
func (exp PullExpiry) configureMessages(opts *consumeOpts) error {
expiry := time.Duration(exp)
if expiry < time.Second {
return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
}
opts.Expires = expiry
return nil
}
// PullMaxBytes limits the number of bytes to be buffered in the client.
// If not provided, the limit is not set (max messages will be used instead).
// This option is exclusive with PullMaxMessages.
//
// The value should be set to a high enough value to accommodate the largest
// message expected from the server. Note that it may not be sufficient to set
// this value to the maximum message size, as this setting controls the client
// buffer size, not the max bytes requested from the server within a single pull
// request. If the value is set too low, the consumer will stall and not be able
// to consume messages.
//
// PullMaxBytes implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullMaxBytes int
func (max PullMaxBytes) configureConsume(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
}
opts.MaxBytes = int(max)
return nil
}
func (max PullMaxBytes) configureMessages(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
}
opts.MaxBytes = int(max)
return nil
}
// PullThresholdMessages sets the message count on which consuming will trigger
// new pull request to the server. Defaults to 50% of MaxMessages.
//
// PullThresholdMessages implements both PullConsumeOpt and PullMessagesOpt,
// allowing it to configure Consumer.Consume and Consumer.Messages.
type PullThresholdMessages int
func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error {
opts.ThresholdMessages = int(t)
return nil
}
func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error {
opts.ThresholdMessages = int(t)
return nil
}
// PullThresholdBytes sets the byte count on which consuming will trigger
// new pull request to the server. Defaults to 50% of MaxBytes (if set).
//
// PullThresholdBytes implements both PullConsumeOpt and PullMessagesOpt,
// allowing it to configure Consumer.Consume and Consumer.Messages.
type PullThresholdBytes int
func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error {
opts.ThresholdBytes = int(t)
return nil
}
func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error {
opts.ThresholdBytes = int(t)
return nil
}
// PullMinPending sets the minimum number of messages that should be pending for
// a consumer with PriorityPolicyOverflow to be considered for delivery.
// If provided, PullPriorityGroup must be set as well and the consumer has to have
// PriorityPolicy set to PriorityPolicyOverflow.
//
// PullMinPending implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullMinPending int
func (min PullMinPending) configureConsume(opts *consumeOpts) error {
if min < 1 {
return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption)
}
opts.MinPending = int64(min)
return nil
}
func (min PullMinPending) configureMessages(opts *consumeOpts) error {
if min < 1 {
return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption)
}
opts.MinPending = int64(min)
return nil
}
// PullMinAckPending sets the minimum number of pending acks that should be
// present for a consumer with PriorityPolicyOverflow to be considered for
// delivery. If provided, PullPriorityGroup must be set as well and the consumer
// has to have PriorityPolicy set to PriorityPolicyOverflow.
//
// PullMinAckPending implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullMinAckPending int
func (min PullMinAckPending) configureConsume(opts *consumeOpts) error {
if min < 1 {
return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption)
}
opts.MinAckPending = int64(min)
return nil
}
func (min PullMinAckPending) configureMessages(opts *consumeOpts) error {
if min < 1 {
return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption)
}
opts.MinAckPending = int64(min)
return nil
}
// PullPriorityGroup sets the priority group for a consumer.
// It has to match one of the priority groups set on the consumer.
//
// PullPriorityGroup implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullPriorityGroup string
func (g PullPriorityGroup) configureConsume(opts *consumeOpts) error {
opts.Group = string(g)
return nil
}
func (g PullPriorityGroup) configureMessages(opts *consumeOpts) error {
opts.Group = string(g)
return nil
}
// PullHeartbeat sets the idle heartbeat duration for a pull subscription
// If a client does not receive a heartbeat message from a stream for more
// than the idle heartbeat setting, the subscription will be removed
// and error will be passed to the message handler.
// If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds)
//
// PullHeartbeat implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type PullHeartbeat time.Duration
func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error {
hbTime := time.Duration(hb)
if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
}
opts.Heartbeat = hbTime
return nil
}
func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error {
hbTime := time.Duration(hb)
if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
}
opts.Heartbeat = hbTime
return nil
}
// StopAfter sets the number of messages after which the consumer is
// automatically stopped and no more messages are pulled from the server.
//
// StopAfter implements both PullConsumeOpt and PullMessagesOpt, allowing
// it to configure Consumer.Consume and Consumer.Messages.
type StopAfter int
func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error {
if nMsgs <= 0 {
return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
}
opts.StopAfter = int(nMsgs)
return nil
}
func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error {
if nMsgs <= 0 {
return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
}
opts.StopAfter = int(nMsgs)
return nil
}
// ConsumeErrHandler sets custom error handler invoked when an error was
// encountered while consuming messages It will be invoked for both terminal
// (Consumer Deleted, invalid request body) and non-terminal (e.g. missing
// heartbeats) errors.
func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt {
return pullOptFunc(func(cfg *consumeOpts) error {
cfg.ErrHandler = cb
return nil
})
}
// WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error
// should be reported when calling [MessagesContext.Next] (Default: true).
func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt {
return pullOptFunc(func(cfg *consumeOpts) error {
cfg.ReportMissingHeartbeats = hbErr
return nil
})
}
// FetchMinPending sets the minimum number of messages that should be pending for
// a consumer with PriorityPolicyOverflow to be considered for delivery.
// If provided, FetchPriorityGroup must be set as well and the consumer has to have
// PriorityPolicy set to PriorityPolicyOverflow.
func FetchMinPending(min int64) FetchOpt {
return func(req *pullRequest) error {
if min < 1 {
return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption)
}
req.MinPending = min
return nil
}
}
// FetchMinAckPending sets the minimum number of pending acks that should be
// present for a consumer with PriorityPolicyOverflow to be considered for
// delivery. If provided, FetchPriorityGroup must be set as well and the consumer
// has to have PriorityPolicy set to PriorityPolicyOverflow.
func FetchMinAckPending(min int64) FetchOpt {
return func(req *pullRequest) error {
if min < 1 {
return fmt.Errorf("%w: min ack pending should be more than 0", ErrInvalidOption)
}
req.MinAckPending = min
return nil
}
}
// FetchPriorityGroup sets the priority group for a consumer.
// It has to match one of the priority groups set on the consumer.
func FetchPriorityGroup(group string) FetchOpt {
return func(req *pullRequest) error {
req.Group = group
return nil
}
}
// FetchMaxWait sets custom timeout for fetching predefined batch of messages.
//
// If not provided, a default of 30 seconds will be used.
func FetchMaxWait(timeout time.Duration) FetchOpt {
return func(req *pullRequest) error {
if timeout <= 0 {
return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
}
req.Expires = timeout
return nil
}
}
// FetchHeartbeat sets custom heartbeat for individual fetch request. If a
// client does not receive a heartbeat message from a stream for more than 2
// times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
//
// Heartbeat value has to be lower than FetchMaxWait / 2.
//
// If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s
// and disabled otherwise.
func FetchHeartbeat(hb time.Duration) FetchOpt {
return func(req *pullRequest) error {
if hb <= 0 {
return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
}
req.Heartbeat = hb
return nil
}
}
// WithDeletedDetails can be used to display the information about messages
// deleted from a stream on a stream info request
func WithDeletedDetails(deletedDetails bool) StreamInfoOpt {
return func(req *streamInfoRequest) error {
req.DeletedDetails = deletedDetails
return nil
}
}
// WithSubjectFilter can be used to display the information about messages
// stored on given subjects.
// NOTE: if the subject filter matches over 100k
// subjects, this will result in multiple requests to the server to retrieve all
// the information, and all of the returned subjects will be kept in memory.
func WithSubjectFilter(subject string) StreamInfoOpt {
return func(req *streamInfoRequest) error {
req.SubjectFilter = subject
return nil
}
}
// WithStreamListSubject can be used to filter results of ListStreams and
// StreamNames requests to only streams that have given subject in their
// configuration.
func WithStreamListSubject(subject string) StreamListOpt {
return func(req *streamsRequest) error {
req.Subject = subject
return nil
}
}
// WithMsgID sets the message ID used for deduplication.
func WithMsgID(id string) PublishOpt {
return func(opts *pubOpts) error {
opts.id = id
return nil
}
}
// WithMsgTTL sets per msg TTL.
// Requires [StreamConfig.AllowMsgTTL] to be enabled.
func WithMsgTTL(dur time.Duration) PublishOpt {
return func(opts *pubOpts) error {
opts.ttl = dur
return nil
}
}
// WithExpectStream sets the expected stream the message should be published to.
// If the message is published to a different stream server will reject the
// message and publish will fail.
func WithExpectStream(stream string) PublishOpt {
return func(opts *pubOpts) error {
opts.stream = stream
return nil
}
}
// WithExpectLastSequence sets the expected sequence number the last message
// on a stream should have. If the last message has a different sequence number
// server will reject the message and publish will fail.
func WithExpectLastSequence(seq uint64) PublishOpt {
return func(opts *pubOpts) error {
opts.lastSeq = &seq
return nil
}
}
// WithExpectLastSequencePerSubject sets the expected sequence number the last
// message on a subject the message is published to. If the last message on a
// subject has a different sequence number server will reject the message and
// publish will fail.
func WithExpectLastSequencePerSubject(seq uint64) PublishOpt {
return func(opts *pubOpts) error {
opts.lastSubjectSeq = &seq
return nil
}
}
// WithExpectLastMsgID sets the expected message ID the last message on a stream
// should have. If the last message has a different message ID server will
// reject the message and publish will fail.
func WithExpectLastMsgID(id string) PublishOpt {
return func(opts *pubOpts) error {
opts.lastMsgID = id
return nil
}
}
// WithRetryWait sets the retry wait time when ErrNoResponders is encountered.
// Defaults to 250ms.
func WithRetryWait(dur time.Duration) PublishOpt {
return func(opts *pubOpts) error {
if dur <= 0 {
return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption)
}
opts.retryWait = dur
return nil
}
}
// WithRetryAttempts sets the retry number of attempts when ErrNoResponders is
// encountered. Defaults to 2
func WithRetryAttempts(num int) PublishOpt {
return func(opts *pubOpts) error {
if num < 0 {
return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption)
}
opts.retryAttempts = num
return nil
}
}
// WithStallWait sets the max wait when the producer becomes stall producing
// messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs
// is returned.
func WithStallWait(ttl time.Duration) PublishOpt {
return func(opts *pubOpts) error {
if ttl <= 0 {
return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption)
}
opts.stallWait = ttl
return nil
}
}

1523
vendor/github.com/nats-io/nats.go/jetstream/kv.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,131 @@
// Copyright 2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"fmt"
"time"
)
type watchOptFn func(opts *watchOpts) error
func (opt watchOptFn) configureWatcher(opts *watchOpts) error {
return opt(opts)
}
// IncludeHistory instructs the key watcher to include historical values as
// well (up to KeyValueMaxHistory).
func IncludeHistory() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
if opts.updatesOnly {
return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption)
}
opts.includeHistory = true
return nil
})
}
// UpdatesOnly instructs the key watcher to only include updates on values
// (without latest values when started).
func UpdatesOnly() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
if opts.includeHistory {
return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption)
}
opts.updatesOnly = true
return nil
})
}
// IgnoreDeletes will have the key watcher not pass any deleted keys.
func IgnoreDeletes() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.ignoreDeletes = true
return nil
})
}
// MetaOnly instructs the key watcher to retrieve only the entry meta data, not
// the entry value.
func MetaOnly() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.metaOnly = true
return nil
})
}
// ResumeFromRevision instructs the key watcher to resume from a specific
// revision number.
func ResumeFromRevision(revision uint64) WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.resumeFromRevision = revision
return nil
})
}
// DeleteMarkersOlderThan indicates that delete or purge markers older than that
// will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data
// will be removed but markers that are recent will be kept.
// Note that if no option is specified, the default is 30 minutes. You can set
// this option to a negative value to instruct to always remove the markers,
// regardless of their age.
type DeleteMarkersOlderThan time.Duration
func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error {
opts.dmthr = time.Duration(ttl)
return nil
}
type deleteOptFn func(opts *deleteOpts) error
func (opt deleteOptFn) configureDelete(opts *deleteOpts) error {
return opt(opts)
}
// LastRevision deletes if the latest revision matches the provided one. If the
// provided revision is not the latest, the delete will return an error.
func LastRevision(revision uint64) KVDeleteOpt {
return deleteOptFn(func(opts *deleteOpts) error {
opts.revision = revision
return nil
})
}
// PurgeTTL sets the TTL for the purge operation.
// After the TTL expires, the delete markers will be removed.
// This requires LimitMarkerTTL to be enabled on the bucket.
// Note that this is not the same as the TTL for the key itself, which is set
// using the KeyTTL option when creating the key.
func PurgeTTL(ttl time.Duration) KVDeleteOpt {
return deleteOptFn(func(opts *deleteOpts) error {
opts.ttl = ttl
return nil
})
}
type createOptFn func(opts *createOpts) error
func (opt createOptFn) configureCreate(opts *createOpts) error {
return opt(opts)
}
// KeyTTL sets the TTL for the key. This is the time after which the key will be
// automatically deleted. The TTL is set when the key is created and can not be
// changed later. This requires LimitMarkerTTL to be enabled on the bucket.
func KeyTTL(ttl time.Duration) KVCreateOpt {
return createOptFn(func(opts *createOpts) error {
opts.ttl = ttl
return nil
})
}

471
vendor/github.com/nats-io/nats.go/jetstream/message.go generated vendored Normal file
View File

@@ -0,0 +1,471 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"bytes"
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nats.go/internal/parser"
)
type (
// Msg contains methods to operate on a JetStream message. Metadata, Data,
// Headers, Subject and Reply can be used to retrieve the specific parts of
// the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and
// Term are various flavors of ack requests.
Msg interface {
// Metadata returns [MsgMetadata] for a JetStream message.
Metadata() (*MsgMetadata, error)
// Data returns the message body.
Data() []byte
// Headers returns a map of headers for a message.
Headers() nats.Header
// Subject returns a subject on which a message was published/received.
Subject() string
// Reply returns a reply subject for a message.
Reply() string
// Ack acknowledges a message. This tells the server that the message was
// successfully processed and it can move on to the next message.
Ack() error
// DoubleAck acknowledges a message and waits for ack reply from the server.
// While it impacts performance, it is useful for scenarios where
// message loss is not acceptable.
DoubleAck(context.Context) error
// Nak negatively acknowledges a message. This tells the server to
// redeliver the message.
//
// Nak does not adhere to AckWait or Backoff configured on the consumer
// and triggers instant redelivery. For a delayed redelivery, use
// NakWithDelay.
Nak() error
// NakWithDelay negatively acknowledges a message. This tells the server
// to redeliver the message after the given delay.
NakWithDelay(delay time.Duration) error
// InProgress tells the server that this message is being worked on. It
// resets the redelivery timer on the server.
InProgress() error
// Term tells the server to not redeliver this message, regardless of
// the value of MaxDeliver.
Term() error
// TermWithReason tells the server to not redeliver this message, regardless of
// the value of MaxDeliver. The provided reason will be included in JetStream
// advisory event sent by the server.
//
// Note: This will only work with JetStream servers >= 2.10.4.
// For older servers, TermWithReason will be ignored by the server and the message
// will not be terminated.
TermWithReason(reason string) error
}
// MsgMetadata is the JetStream metadata associated with received messages.
MsgMetadata struct {
// Sequence is the sequence information for the message.
Sequence SequencePair
// NumDelivered is the number of times this message was delivered to the
// consumer.
NumDelivered uint64
// NumPending is the number of messages that match the consumer's
// filter, but have not been delivered yet.
NumPending uint64
// Timestamp is the time the message was originally stored on a stream.
Timestamp time.Time
// Stream is the stream name this message is stored on.
Stream string
// Consumer is the consumer name this message was delivered to.
Consumer string
// Domain is the domain this message was received on.
Domain string
}
// SequencePair includes the consumer and stream sequence numbers for a
// message.
SequencePair struct {
// Consumer is the consumer sequence number for message deliveries. This
// is the total number of messages the consumer has seen (including
// redeliveries).
Consumer uint64 `json:"consumer_seq"`
// Stream is the stream sequence number for a message.
Stream uint64 `json:"stream_seq"`
}
jetStreamMsg struct {
msg *nats.Msg
ackd bool
js *jetStream
sync.Mutex
}
ackOpts struct {
nakDelay time.Duration
termReason string
}
ackType []byte
)
const (
controlMsg = "100"
badRequest = "400"
noMessages = "404"
reqTimeout = "408"
maxBytesExceeded = "409"
noResponders = "503"
pinIdMismatch = "423"
)
// Headers used when publishing messages.
const (
// MsgIdHeader is used to specify a user-defined message ID. It can be used
// e.g. for deduplication in conjunction with the Duplicates duration on
// ConsumerConfig or to provide optimistic concurrency safety together with
// [ExpectedLastMsgIDHeader].
//
// This can be set when publishing messages using [WithMsgID] option.
MsgIDHeader = "Nats-Msg-Id"
// ExpectedStreamHeader contains stream name and is used to assure that the
// published message is received by expected stream. Server will reject the
// message if it is not the case.
//
// This can be set when publishing messages using [WithExpectStream] option.
ExpectedStreamHeader = "Nats-Expected-Stream"
// ExpectedLastSeqHeader contains the expected last sequence number of the
// stream and can be used to apply optimistic concurrency control at stream
// level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using [WithExpectLastSequence]
// option. option.
ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence"
// ExpectedLastSubjSeqHeader contains the expected last sequence number on
// the subject and can be used to apply optimistic concurrency control at
// subject level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using
// [WithExpectLastSequencePerSubject] option.
ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence"
// ExpectedLastMsgIDHeader contains the expected last message ID on the
// subject and can be used to apply optimistic concurrency control at
// stream level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using [WithExpectLastMsgID]
// option.
ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id"
// MsgTTLHeader is used to specify the TTL for a specific message. This will
// override the default TTL for the stream.
MsgTTLHeader = "Nats-TTL"
// MsgRollup is used to apply a purge of all prior messages in the stream
// ("all") or at the subject ("sub") before this message.
MsgRollup = "Nats-Rollup"
// MarkerReasonHeader is used to specify a reason for message deletion.
MarkerReasonHeader = "Nats-Marker-Reason"
)
// Headers for republished messages and direct gets. Those headers are set by
// the server and should not be set by the client.
const (
// StreamHeader contains the stream name the message was republished from or
// the stream name the message was retrieved from using direct get.
StreamHeader = "Nats-Stream"
// SequenceHeader contains the original sequence number of the message.
SequenceHeader = "Nats-Sequence"
// TimeStampHeader contains the original timestamp of the message.
TimeStampHeaer = "Nats-Time-Stamp"
// SubjectHeader contains the original subject the message was published to.
SubjectHeader = "Nats-Subject"
// LastSequenceHeader contains the last sequence of the message having the
// same subject, otherwise zero if this is the first message for the
// subject.
LastSequenceHeader = "Nats-Last-Sequence"
)
// Rollups, can be subject only or all messages.
const (
// MsgRollupSubject is used to purge all messages before this message on the
// message subject.
MsgRollupSubject = "sub"
// MsgRollupAll is used to purge all messages before this message on the
// stream.
MsgRollupAll = "all"
)
var (
ackAck ackType = []byte("+ACK")
ackNak ackType = []byte("-NAK")
ackProgress ackType = []byte("+WPI")
ackTerm ackType = []byte("+TERM")
)
// Metadata returns [MsgMetadata] for a JetStream message.
func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) {
if err := m.checkReply(); err != nil {
return nil, err
}
tokens, err := parser.GetMetadataFields(m.msg.Reply)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err)
}
meta := &MsgMetadata{
Domain: tokens[parser.AckDomainTokenPos],
NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]),
NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]),
Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
Stream: tokens[parser.AckStreamTokenPos],
Consumer: tokens[parser.AckConsumerTokenPos],
}
meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos])
meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos])
return meta, nil
}
// Data returns the message body.
func (m *jetStreamMsg) Data() []byte {
return m.msg.Data
}
// Headers returns a map of headers for a message.
func (m *jetStreamMsg) Headers() nats.Header {
return m.msg.Header
}
// Subject returns a subject on which a message is published.
func (m *jetStreamMsg) Subject() string {
return m.msg.Subject
}
// Reply returns a reply subject for a JetStream message.
func (m *jetStreamMsg) Reply() string {
return m.msg.Reply
}
// Ack acknowledges a message. This tells the server that the message was
// successfully processed and it can move on to the next message.
func (m *jetStreamMsg) Ack() error {
return m.ackReply(context.Background(), ackAck, false, ackOpts{})
}
// DoubleAck acknowledges a message and waits for ack reply from the server.
// While it impacts performance, it is useful for scenarios where
// message loss is not acceptable.
func (m *jetStreamMsg) DoubleAck(ctx context.Context) error {
return m.ackReply(ctx, ackAck, true, ackOpts{})
}
// Nak negatively acknowledges a message. This tells the server to
// redeliver the message.
func (m *jetStreamMsg) Nak() error {
return m.ackReply(context.Background(), ackNak, false, ackOpts{})
}
// NakWithDelay negatively acknowledges a message. This tells the server
// to redeliver the message after the given delay.
func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error {
return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay})
}
// InProgress tells the server that this message is being worked on. It
// resets the redelivery timer on the server.
func (m *jetStreamMsg) InProgress() error {
return m.ackReply(context.Background(), ackProgress, false, ackOpts{})
}
// Term tells the server to not redeliver this message, regardless of
// the value of MaxDeliver.
func (m *jetStreamMsg) Term() error {
return m.ackReply(context.Background(), ackTerm, false, ackOpts{})
}
// TermWithReason tells the server to not redeliver this message, regardless of
// the value of MaxDeliver. The provided reason will be included in JetStream
// advisory event sent by the server.
//
// Note: This will only work with JetStream servers >= 2.10.4.
// For older servers, TermWithReason will be ignored by the server and the message
// will not be terminated.
func (m *jetStreamMsg) TermWithReason(reason string) error {
return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason})
}
func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error {
err := m.checkReply()
if err != nil {
return err
}
m.Lock()
if m.ackd {
m.Unlock()
return ErrMsgAlreadyAckd
}
m.Unlock()
if sync {
var cancel context.CancelFunc
ctx, cancel = m.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
}
var body []byte
if opts.nakDelay > 0 {
body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds()))
} else if opts.termReason != "" {
body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason))
} else {
body = ackType
}
if sync {
_, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body)
} else {
err = m.js.conn.Publish(m.msg.Reply, body)
}
if err != nil {
return err
}
// Mark that the message has been acked unless it is ackProgress
// which can be sent many times.
if !bytes.Equal(ackType, ackProgress) {
m.Lock()
m.ackd = true
m.Unlock()
}
return nil
}
func (m *jetStreamMsg) checkReply() error {
if m == nil || m.msg.Sub == nil {
return ErrMsgNotBound
}
if m.msg.Reply == "" {
return ErrMsgNoReply
}
return nil
}
// Returns if the given message is a user message or not, and if
// checkSts() is true, returns appropriate error based on the
// content of the status (404, etc..)
func checkMsg(msg *nats.Msg) (bool, error) {
// If payload or no header, consider this a user message
if len(msg.Data) > 0 || len(msg.Header) == 0 {
return true, nil
}
// Look for status header
val := msg.Header.Get("Status")
descr := msg.Header.Get("Description")
// If not present, then this is considered a user message
if val == "" {
return true, nil
}
switch val {
case badRequest:
return false, ErrBadRequest
case noResponders:
return false, nats.ErrNoResponders
case noMessages:
// 404 indicates that there are no messages.
return false, ErrNoMessages
case reqTimeout:
return false, nats.ErrTimeout
case controlMsg:
return false, nil
case pinIdMismatch:
return false, ErrPinIDMismatch
case maxBytesExceeded:
if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") {
return false, ErrMaxBytesExceeded
}
if strings.Contains(strings.ToLower(descr), "batch completed") {
return false, ErrBatchCompleted
}
if strings.Contains(strings.ToLower(descr), "consumer deleted") {
return false, ErrConsumerDeleted
}
if strings.Contains(strings.ToLower(descr), "leadership change") {
return false, ErrConsumerLeadershipChanged
}
}
return false, fmt.Errorf("nats: %s", msg.Header.Get("Description"))
}
func parsePending(msg *nats.Msg) (int, int, error) {
msgsLeftStr := msg.Header.Get("Nats-Pending-Messages")
var msgsLeft int
var err error
if msgsLeftStr != "" {
msgsLeft, err = strconv.Atoi(msgsLeftStr)
if err != nil {
return 0, 0, errors.New("nats: invalid format of Nats-Pending-Messages")
}
}
bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes")
var bytesLeft int
if bytesLeftStr != "" {
bytesLeft, err = strconv.Atoi(bytesLeftStr)
if err != nil {
return 0, 0, errors.New("nats: invalid format of Nats-Pending-Bytes")
}
}
return msgsLeft, bytesLeft, nil
}
// toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations
func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg {
return &jetStreamMsg{
msg: msg,
js: js,
}
}

1625
vendor/github.com/nats-io/nats.go/jetstream/object.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,41 @@
// Copyright 2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
// GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was
// marked as deleted.
func GetObjectShowDeleted() GetObjectOpt {
return func(opts *getObjectOpts) error {
opts.showDeleted = true
return nil
}
}
// GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event
// if it was marked as deleted.
func GetObjectInfoShowDeleted() GetObjectInfoOpt {
return func(opts *getObjectInfoOpts) error {
opts.showDeleted = true
return nil
}
}
// ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted
// objects.
func ListObjectsShowDeleted() ListObjectsOpt {
return func(opts *listObjectOpts) error {
opts.showDeleted = true
return nil
}
}

810
vendor/github.com/nats-io/nats.go/jetstream/ordered.go generated vendored Normal file
View File

@@ -0,0 +1,810 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nats.go"
)
type (
orderedConsumer struct {
js *jetStream
cfg *OrderedConsumerConfig
stream string
currentConsumer *pullConsumer
currentSub *pullSubscription
cursor cursor
namePrefix string
serial int
consumerType consumerType
doReset chan struct{}
resetInProgress atomic.Uint32
userErrHandler ConsumeErrHandlerFunc
stopAfter int
stopAfterMsgsLeft chan int
withStopAfter bool
runningFetch *fetchResult
subscription *orderedSubscription
sync.Mutex
}
orderedSubscription struct {
consumer *orderedConsumer
opts []PullMessagesOpt
done chan struct{}
closed atomic.Uint32
}
cursor struct {
streamSeq uint64
deliverSeq uint64
}
consumerType int
)
const (
consumerTypeNotSet consumerType = iota
consumerTypeConsume
consumerTypeFetch
)
var (
errOrderedSequenceMismatch = errors.New("sequence mismatch")
errOrderedConsumerClosed = errors.New("ordered consumer closed")
)
// Consume can be used to continuously receive messages and handle them
// with the provided callback function. Consume cannot be used concurrently
// when using ordered consumer.
//
// See [Consumer.Consume] for more details.
func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) {
if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
err := c.reset()
if err != nil {
return nil, err
}
} else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.consumerType == consumerTypeFetch {
return nil, ErrOrderConsumerUsedAsFetch
}
c.consumerType = consumerTypeConsume
consumeOpts, err := parseConsumeOpts(true, opts...)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
}
c.userErrHandler = consumeOpts.ErrHandler
opts = append(opts, consumeReconnectNotify(),
ConsumeErrHandler(c.errHandler(c.serial)))
if consumeOpts.StopAfter > 0 {
c.withStopAfter = true
c.stopAfter = consumeOpts.StopAfter
}
c.stopAfterMsgsLeft = make(chan int, 1)
if c.stopAfter > 0 {
opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
sub := &orderedSubscription{
consumer: c,
done: make(chan struct{}, 1),
}
c.subscription = sub
internalHandler := func(serial int) func(msg Msg) {
return func(msg Msg) {
// handler is a noop if message was delivered for a consumer with different serial
if serial != c.serial {
return
}
meta, err := msg.Metadata()
if err != nil {
c.errHandler(serial)(c.currentSub, err)
return
}
dseq := meta.Sequence.Consumer
if dseq != c.cursor.deliverSeq+1 {
c.errHandler(serial)(sub, errOrderedSequenceMismatch)
return
}
c.cursor.deliverSeq = dseq
c.cursor.streamSeq = meta.Sequence.Stream
handler(msg)
}
}
cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...)
if err != nil {
return nil, err
}
c.currentSub = cc.(*pullSubscription)
go func() {
for {
select {
case <-c.doReset:
if err := c.reset(); err != nil {
if errors.Is(err, errOrderedConsumerClosed) {
continue
}
c.errHandler(c.serial)(c.currentSub, err)
}
if c.withStopAfter {
select {
case c.stopAfter = <-c.stopAfterMsgsLeft:
default:
}
if c.stopAfter <= 0 {
sub.Stop()
return
}
}
if c.stopAfter > 0 {
opts = opts[:len(opts)-2]
} else {
opts = opts[:len(opts)-1]
}
// overwrite the previous err handler to use the new serial
opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial)))
if c.withStopAfter {
opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
if cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil {
c.errHandler(c.serial)(cc, err)
} else {
c.Lock()
c.currentSub = cc.(*pullSubscription)
c.Unlock()
}
case <-sub.done:
s := sub.consumer.currentSub
if s != nil {
sub.consumer.Lock()
s.Stop()
sub.consumer.Unlock()
}
return
case msgsLeft, ok := <-c.stopAfterMsgsLeft:
if !ok {
close(sub.done)
}
c.stopAfter = msgsLeft
return
}
}
}()
return sub, nil
}
func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) {
return func(cc ConsumeContext, err error) {
c.Lock()
defer c.Unlock()
if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) && !errors.Is(err, errConnected) {
c.userErrHandler(cc, err)
}
if errors.Is(err, ErrNoHeartbeat) ||
errors.Is(err, errOrderedSequenceMismatch) ||
errors.Is(err, ErrConsumerDeleted) ||
errors.Is(err, errConnected) ||
errors.Is(err, nats.ErrNoResponders) {
// only reset if serial matches the current consumer serial and there is no reset in progress
if serial == c.serial && c.resetInProgress.Load() == 0 {
c.resetInProgress.Store(1)
c.doReset <- struct{}{}
}
}
}
}
// Messages returns MessagesContext, allowing continuously iterating
// over messages on a stream. Messages cannot be used concurrently
// when using ordered consumer.
//
// See [Consumer.Messages] for more details.
func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) {
if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
err := c.reset()
if err != nil {
return nil, err
}
} else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.consumerType == consumerTypeFetch {
return nil, ErrOrderConsumerUsedAsFetch
}
c.consumerType = consumerTypeConsume
consumeOpts, err := parseMessagesOpts(true, opts...)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
}
opts = append(opts,
WithMessagesErrOnMissingHeartbeat(true),
messagesReconnectNotify())
c.stopAfterMsgsLeft = make(chan int, 1)
if consumeOpts.StopAfter > 0 {
c.withStopAfter = true
c.stopAfter = consumeOpts.StopAfter
}
c.userErrHandler = consumeOpts.ErrHandler
if c.stopAfter > 0 {
opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
cc, err := c.currentConsumer.Messages(opts...)
if err != nil {
return nil, err
}
c.currentSub = cc.(*pullSubscription)
sub := &orderedSubscription{
consumer: c,
opts: opts,
done: make(chan struct{}, 1),
}
c.subscription = sub
return sub, nil
}
func (s *orderedSubscription) Next() (Msg, error) {
for {
msg, err := s.consumer.currentSub.Next()
if err != nil {
if errors.Is(err, ErrMsgIteratorClosed) {
s.Stop()
return nil, err
}
if s.consumer.withStopAfter {
select {
case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft:
default:
}
if s.consumer.stopAfter <= 0 {
s.Stop()
return nil, ErrMsgIteratorClosed
}
s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter)
}
if err := s.consumer.reset(); err != nil {
if errors.Is(err, errOrderedConsumerClosed) {
return nil, ErrMsgIteratorClosed
}
return nil, err
}
cc, err := s.consumer.currentConsumer.Messages(s.opts...)
if err != nil {
return nil, err
}
s.consumer.currentSub = cc.(*pullSubscription)
continue
}
meta, err := msg.Metadata()
if err != nil {
return nil, err
}
serial := serialNumberFromConsumer(meta.Consumer)
if serial != s.consumer.serial {
continue
}
dseq := meta.Sequence.Consumer
if dseq != s.consumer.cursor.deliverSeq+1 {
if err := s.consumer.reset(); err != nil {
if errors.Is(err, errOrderedConsumerClosed) {
return nil, ErrMsgIteratorClosed
}
return nil, err
}
cc, err := s.consumer.currentConsumer.Messages(s.opts...)
if err != nil {
return nil, err
}
s.consumer.currentSub = cc.(*pullSubscription)
continue
}
s.consumer.cursor.deliverSeq = dseq
s.consumer.cursor.streamSeq = meta.Sequence.Stream
return msg, nil
}
}
func (s *orderedSubscription) Stop() {
if !s.closed.CompareAndSwap(0, 1) {
return
}
s.consumer.Lock()
defer s.consumer.Unlock()
if s.consumer.currentSub != nil {
s.consumer.currentSub.Stop()
}
close(s.done)
}
func (s *orderedSubscription) Drain() {
if !s.closed.CompareAndSwap(0, 1) {
return
}
if s.consumer.currentSub != nil {
s.consumer.currentConsumer.Lock()
s.consumer.currentSub.Drain()
s.consumer.currentConsumer.Unlock()
}
close(s.done)
}
// Closed returns a channel that is closed when the consuming is
// fully stopped/drained. When the channel is closed, no more messages
// will be received and processing is complete.
func (s *orderedSubscription) Closed() <-chan struct{} {
s.consumer.Lock()
defer s.consumer.Unlock()
closedCh := make(chan struct{})
go func() {
for {
s.consumer.Lock()
if s.consumer.currentSub == nil {
return
}
closed := s.consumer.currentSub.Closed()
s.consumer.Unlock()
// wait until the underlying pull consumer is closed
<-closed
// if the subscription is closed and ordered consumer is closed as well,
// send a signal that the Consume() is fully stopped
if s.closed.Load() == 1 {
close(closedCh)
return
}
}
}()
return closedCh
}
// Fetch is used to retrieve up to a provided number of messages from a
// stream. This method will always send a single request and wait until
// either all messages are retrieved or request times out.
//
// It is not efficient to use Fetch with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) {
c.Lock()
if c.consumerType == consumerTypeConsume {
c.Unlock()
return nil, ErrOrderConsumerUsedAsConsume
}
if c.runningFetch != nil {
if !c.runningFetch.closed() {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.runningFetch.sseq != 0 {
c.cursor.streamSeq = c.runningFetch.sseq
}
}
c.consumerType = consumerTypeFetch
sub := orderedSubscription{
consumer: c,
done: make(chan struct{}),
}
c.subscription = &sub
c.Unlock()
err := c.reset()
if err != nil {
return nil, err
}
msgs, err := c.currentConsumer.Fetch(batch, opts...)
if err != nil {
return nil, err
}
c.runningFetch = msgs.(*fetchResult)
return msgs, nil
}
// FetchBytes is used to retrieve up to a provided bytes from the
// stream. This method will always send a single request and wait until
// provided number of bytes is exceeded or request times out.
//
// It is not efficient to use FetchBytes with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) {
c.Lock()
if c.consumerType == consumerTypeConsume {
c.Unlock()
return nil, ErrOrderConsumerUsedAsConsume
}
if c.runningFetch != nil {
if !c.runningFetch.closed() {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.runningFetch.sseq != 0 {
c.cursor.streamSeq = c.runningFetch.sseq
}
}
c.consumerType = consumerTypeFetch
sub := orderedSubscription{
consumer: c,
done: make(chan struct{}),
}
c.subscription = &sub
c.Unlock()
err := c.reset()
if err != nil {
return nil, err
}
msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...)
if err != nil {
return nil, err
}
c.runningFetch = msgs.(*fetchResult)
return msgs, nil
}
// FetchNoWait is used to retrieve up to a provided number of messages
// from a stream. This method will always send a single request and
// immediately return up to a provided number of messages or wait until
// at least one message is available or request times out.
//
// It is not efficient to use FetchNoWait with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) {
if c.consumerType == consumerTypeConsume {
return nil, ErrOrderConsumerUsedAsConsume
}
if c.runningFetch != nil && !c.runningFetch.done {
return nil, ErrOrderedConsumerConcurrentRequests
}
c.consumerType = consumerTypeFetch
sub := orderedSubscription{
consumer: c,
done: make(chan struct{}),
}
c.subscription = &sub
err := c.reset()
if err != nil {
return nil, err
}
return c.currentConsumer.FetchNoWait(batch)
}
// Next is used to retrieve the next message from the stream. This
// method will block until the message is retrieved or timeout is
// reached.
//
// It is not efficient to use Next with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) {
res, err := c.Fetch(1, opts...)
if err != nil {
return nil, err
}
msg := <-res.Messages()
if msg != nil {
return msg, nil
}
if res.Error() == nil {
return nil, nats.ErrTimeout
}
return nil, res.Error()
}
func serialNumberFromConsumer(name string) int {
if len(name) == 0 {
return 0
}
parts := strings.Split(name, "_")
if len(parts) < 2 {
return 0
}
serial, err := strconv.Atoi(parts[len(parts)-1])
if err != nil {
return 0
}
return serial
}
func (c *orderedConsumer) reset() error {
c.Lock()
defer c.Unlock()
defer c.resetInProgress.Store(0)
if c.currentConsumer != nil {
c.currentConsumer.Lock()
if c.currentSub != nil {
c.currentSub.Stop()
}
consName := c.currentConsumer.CachedInfo().Name
c.currentConsumer.Unlock()
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_ = c.js.DeleteConsumer(ctx, c.stream, consName)
cancel()
}()
}
c.cursor.deliverSeq = 0
consumerConfig := c.getConsumerConfig()
var err error
var cons Consumer
backoffOpts := backoffOpts{
attempts: c.cfg.MaxResetAttempts,
initialInterval: time.Second,
factor: 2,
maxInterval: 10 * time.Second,
cancel: c.subscription.done,
}
err = retryWithBackoff(func(attempt int) (bool, error) {
isClosed := c.subscription.closed.Load() == 1
if isClosed {
return false, errOrderedConsumerClosed
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cons, err = c.js.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig)
if err != nil {
return true, err
}
return false, nil
}, backoffOpts)
if err != nil {
return err
}
c.currentConsumer = cons.(*pullConsumer)
return nil
}
func (c *orderedConsumer) getConsumerConfig() *ConsumerConfig {
c.serial++
var nextSeq uint64
// if stream sequence is not initialized, no message was consumed yet
// therefore, start from the beginning (either from 1 or from the provided sequence)
if c.cursor.streamSeq == 0 {
if c.cfg.OptStartSeq != 0 {
nextSeq = c.cfg.OptStartSeq
} else {
nextSeq = 1
}
} else {
// otherwise, start from the next sequence
nextSeq = c.cursor.streamSeq + 1
}
if c.cfg.MaxResetAttempts == 0 {
c.cfg.MaxResetAttempts = -1
}
name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial)
cfg := &ConsumerConfig{
Name: name,
DeliverPolicy: DeliverByStartSequencePolicy,
OptStartSeq: nextSeq,
AckPolicy: AckNonePolicy,
InactiveThreshold: 5 * time.Minute,
Replicas: 1,
HeadersOnly: c.cfg.HeadersOnly,
MemoryStorage: true,
Metadata: c.cfg.Metadata,
}
if len(c.cfg.FilterSubjects) == 1 {
cfg.FilterSubject = c.cfg.FilterSubjects[0]
} else {
cfg.FilterSubjects = c.cfg.FilterSubjects
}
if c.cfg.InactiveThreshold != 0 {
cfg.InactiveThreshold = c.cfg.InactiveThreshold
}
// if the cursor is not yet set, use the provided deliver policy
if c.cursor.streamSeq != 0 {
return cfg
}
// initial request, some options may be modified at that point
cfg.DeliverPolicy = c.cfg.DeliverPolicy
if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy ||
c.cfg.DeliverPolicy == DeliverLastPolicy ||
c.cfg.DeliverPolicy == DeliverNewPolicy ||
c.cfg.DeliverPolicy == DeliverAllPolicy {
cfg.OptStartSeq = 0
} else if c.cfg.DeliverPolicy == DeliverByStartTimePolicy {
cfg.OptStartSeq = 0
cfg.OptStartTime = c.cfg.OptStartTime
} else {
cfg.OptStartSeq = c.cfg.OptStartSeq
}
if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 {
cfg.FilterSubjects = []string{">"}
}
return cfg
}
func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.StopAfter = numMsgs
opts.stopAfterMsgsLeft = msgsLeftAfterStop
return nil
})
}
func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.StopAfter = numMsgs
opts.stopAfterMsgsLeft = msgsLeftAfterStop
return nil
})
}
func consumeReconnectNotify() PullConsumeOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.notifyOnReconnect = true
return nil
})
}
func messagesReconnectNotify() PullMessagesOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.notifyOnReconnect = true
return nil
})
}
// Info returns information about the ordered consumer.
// Note that this method will fetch the latest instance of the
// consumer from the server, which can be deleted by the library at any time.
func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
c.Lock()
defer c.Unlock()
if c.currentConsumer == nil {
return nil, ErrOrderedConsumerNotCreated
}
infoSubject := fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name)
var resp consumerInfoResponse
if _, err := c.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
c.currentConsumer.info = resp.ConsumerInfo
return resp.ConsumerInfo, nil
}
// CachedInfo returns cached information about the consumer currently
// used by the ordered consumer. Cached info will be updated on every call
// to [Consumer.Info] or on consumer reset.
func (c *orderedConsumer) CachedInfo() *ConsumerInfo {
c.Lock()
defer c.Unlock()
if c.currentConsumer == nil {
return nil
}
return c.currentConsumer.info
}
type backoffOpts struct {
// total retry attempts
// -1 for unlimited
attempts int
// initial interval after which first retry will be performed
// defaults to 1s
initialInterval time.Duration
// determines whether first function execution should be performed immediately
disableInitialExecution bool
// multiplier on each attempt
// defaults to 2
factor float64
// max interval between retries
// after reaching this value, all subsequent
// retries will be performed with this interval
// defaults to 1 minute
maxInterval time.Duration
// custom backoff intervals
// if set, overrides all other options except attempts
// if attempts are set, then the last interval will be used
// for all subsequent retries after reaching the limit
customBackoff []time.Duration
// cancel channel
// if set, retry will be canceled when this channel is closed
cancel <-chan struct{}
}
func retryWithBackoff(f func(int) (bool, error), opts backoffOpts) error {
var err error
var shouldContinue bool
// if custom backoff is set, use it instead of other options
if len(opts.customBackoff) > 0 {
if opts.attempts != 0 {
return errors.New("cannot use custom backoff intervals when attempts are set")
}
for i, interval := range opts.customBackoff {
select {
case <-opts.cancel:
return nil
case <-time.After(interval):
}
shouldContinue, err = f(i)
if !shouldContinue {
return err
}
}
return err
}
// set default options
if opts.initialInterval == 0 {
opts.initialInterval = 1 * time.Second
}
if opts.factor == 0 {
opts.factor = 2
}
if opts.maxInterval == 0 {
opts.maxInterval = 1 * time.Minute
}
if opts.attempts == 0 {
return errors.New("retry attempts have to be set when not using custom backoff intervals")
}
interval := opts.initialInterval
for i := 0; ; i++ {
if i == 0 && opts.disableInitialExecution {
time.Sleep(interval)
continue
}
shouldContinue, err = f(i)
if !shouldContinue {
return err
}
if opts.attempts > 0 && i >= opts.attempts-1 {
break
}
select {
case <-opts.cancel:
return nil
case <-time.After(interval):
}
interval = time.Duration(float64(interval) * opts.factor)
if interval >= opts.maxInterval {
interval = opts.maxInterval
}
}
return err
}

661
vendor/github.com/nats-io/nats.go/jetstream/publish.go generated vendored Normal file
View File

@@ -0,0 +1,661 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math/rand"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nuid"
)
type (
asyncPublisherOpts struct {
// For async publish error handling.
aecb MsgErrHandler
// Max async pub ack in flight
maxpa int
// ackTimeout is the max time to wait for an ack.
ackTimeout time.Duration
}
// PublishOpt are the options that can be passed to Publish methods.
PublishOpt func(*pubOpts) error
pubOpts struct {
id string
lastMsgID string // Expected last msgId
stream string // Expected stream name
lastSeq *uint64 // Expected last sequence
lastSubjectSeq *uint64 // Expected last sequence per subject
ttl time.Duration // Message TTL
// Publish retries for NoResponders err.
retryWait time.Duration // Retry wait between attempts
retryAttempts int // Retry attempts
// stallWait is the max wait of a async pub ack.
stallWait time.Duration
// internal option to re-use existing paf in case of retry.
pafRetry *pubAckFuture
}
// PubAckFuture is a future for a PubAck.
// It can be used to wait for a PubAck or an error after an async publish.
PubAckFuture interface {
// Ok returns a receive only channel that can be used to get a PubAck.
Ok() <-chan *PubAck
// Err returns a receive only channel that can be used to get the error from an async publish.
Err() <-chan error
// Msg returns the message that was sent to the server.
Msg() *nats.Msg
}
pubAckFuture struct {
jsClient *jetStreamClient
msg *nats.Msg
retries int
maxRetries int
retryWait time.Duration
ack *PubAck
err error
errCh chan error
doneCh chan *PubAck
reply string
timeout *time.Timer
}
jetStreamClient struct {
asyncPublishContext
asyncPublisherOpts
}
// MsgErrHandler is used to process asynchronous errors from JetStream
// PublishAsync. It will return the original message sent to the server for
// possible retransmitting and the error encountered.
MsgErrHandler func(JetStream, *nats.Msg, error)
asyncPublishContext struct {
sync.RWMutex
replyPrefix string
replySub *nats.Subscription
acks map[string]*pubAckFuture
stallCh chan struct{}
doneCh chan struct{}
rr *rand.Rand
// channel to signal when server is disconnected or conn is closed
connStatusCh chan (nats.Status)
}
pubAckResponse struct {
apiResponse
*PubAck
}
// PubAck is an ack received after successfully publishing a message.
PubAck struct {
// Stream is the stream name the message was published to.
Stream string `json:"stream"`
// Sequence is the stream sequence number of the message.
Sequence uint64 `json:"seq"`
// Duplicate indicates whether the message was a duplicate.
// Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates].
Duplicate bool `json:"duplicate,omitempty"`
// Domain is the domain the message was published to.
Domain string `json:"domain,omitempty"`
}
)
const (
// Default time wait between retries on Publish if err is ErrNoResponders.
DefaultPubRetryWait = 250 * time.Millisecond
// Default number of retries
DefaultPubRetryAttempts = 2
)
const (
statusHdr = "Status"
rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// Publish performs a synchronous publish to a stream and waits for ack
// from server. It accepts subject name (which must be bound to a stream)
// and message payload.
func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) {
return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...)
}
// PublishMsg performs a synchronous publish to a stream and waits for
// ack from server. It accepts subject name (which must be bound to a
// stream) and nats.Message.
func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) {
ctx, cancel := js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
o := pubOpts{
retryWait: DefaultPubRetryWait,
retryAttempts: DefaultPubRetryAttempts,
}
if len(opts) > 0 {
if m.Header == nil {
m.Header = nats.Header{}
}
for _, opt := range opts {
if err := opt(&o); err != nil {
return nil, err
}
}
}
if o.stallWait > 0 {
return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption)
}
if o.id != "" {
m.Header.Set(MsgIDHeader, o.id)
}
if o.lastMsgID != "" {
m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
}
if o.stream != "" {
m.Header.Set(ExpectedStreamHeader, o.stream)
}
if o.lastSeq != nil {
m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
}
if o.lastSubjectSeq != nil {
m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
}
if o.ttl > 0 {
m.Header.Set(MsgTTLHeader, o.ttl.String())
}
var resp *nats.Msg
var err error
resp, err = js.conn.RequestMsgWithContext(ctx, m)
if err != nil {
for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ {
// To protect against small blips in leadership changes etc, if we get a no responders here retry.
select {
case <-ctx.Done():
case <-time.After(o.retryWait):
}
resp, err = js.conn.RequestMsgWithContext(ctx, m)
}
if err != nil {
if errors.Is(err, nats.ErrNoResponders) {
return nil, ErrNoStreamResponse
}
return nil, err
}
}
var ackResp pubAckResponse
if err := json.Unmarshal(resp.Data, &ackResp); err != nil {
return nil, ErrInvalidJSAck
}
if ackResp.Error != nil {
return nil, fmt.Errorf("nats: %w", ackResp.Error)
}
if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" {
return nil, ErrInvalidJSAck
}
return ackResp.PubAck, nil
}
// PublishAsync performs an asynchronous publish to a stream and returns
// [PubAckFuture] interface. It accepts subject name (which must be bound
// to a stream) and message payload.
func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) {
return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...)
}
// PublishMsgAsync performs an asynchronous publish to a stream and
// returns [PubAckFuture] interface. It accepts subject name (which must
// be bound to a stream) and nats.Message.
func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) {
o := pubOpts{
retryWait: DefaultPubRetryWait,
retryAttempts: DefaultPubRetryAttempts,
}
if len(opts) > 0 {
if m.Header == nil {
m.Header = nats.Header{}
}
for _, opt := range opts {
if err := opt(&o); err != nil {
return nil, err
}
}
}
defaultStallWait := 200 * time.Millisecond
stallWait := defaultStallWait
if o.stallWait > 0 {
stallWait = o.stallWait
}
if o.id != "" {
m.Header.Set(MsgIDHeader, o.id)
}
if o.lastMsgID != "" {
m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
}
if o.stream != "" {
m.Header.Set(ExpectedStreamHeader, o.stream)
}
if o.lastSeq != nil {
m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
}
if o.lastSubjectSeq != nil {
m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
}
if o.ttl > 0 {
m.Header.Set(MsgTTLHeader, o.ttl.String())
}
paf := o.pafRetry
if paf == nil && m.Reply != "" {
return nil, ErrAsyncPublishReplySubjectSet
}
var id string
var reply string
// register new paf if not retrying
if paf == nil {
var err error
reply, err = js.newAsyncReply()
if err != nil {
return nil, fmt.Errorf("nats: error creating async reply handler: %s", err)
}
id = reply[js.opts.replyPrefixLen:]
paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait, reply: reply}
numPending, maxPending := js.registerPAF(id, paf)
if maxPending > 0 && numPending > maxPending {
select {
case <-js.asyncStall():
case <-time.After(stallWait):
js.clearPAF(id)
return nil, ErrTooManyStalledMsgs
}
}
if js.publisher.ackTimeout > 0 {
paf.timeout = time.AfterFunc(js.publisher.ackTimeout, func() {
js.publisher.Lock()
defer js.publisher.Unlock()
if _, ok := js.publisher.acks[id]; !ok {
// paf has already been resolved
// while waiting for the lock
return
}
// ack timed out, remove from pending acks
delete(js.publisher.acks, id)
// check on anyone stalled and waiting.
if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa {
close(js.publisher.stallCh)
js.publisher.stallCh = nil
}
// send error to user
paf.err = ErrAsyncPublishTimeout
if paf.errCh != nil {
paf.errCh <- paf.err
}
// call error callback if set
if js.publisher.asyncPublisherOpts.aecb != nil {
js.publisher.asyncPublisherOpts.aecb(js, paf.msg, ErrAsyncPublishTimeout)
}
// check on anyone one waiting on done status.
if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 {
close(js.publisher.doneCh)
js.publisher.doneCh = nil
}
})
}
} else {
// when retrying, get the ID from existing reply subject
reply = paf.reply
if paf.timeout != nil {
paf.timeout.Reset(js.publisher.ackTimeout)
}
id = reply[js.opts.replyPrefixLen:]
}
pubMsg := &nats.Msg{
Subject: m.Subject,
Reply: reply,
Data: m.Data,
Header: m.Header,
}
if err := js.conn.PublishMsg(pubMsg); err != nil {
js.clearPAF(id)
return nil, err
}
return paf, nil
}
// For quick token lookup etc.
const (
aReplyTokensize = 6
)
func (js *jetStream) newAsyncReply() (string, error) {
js.publisher.Lock()
if js.publisher.replySub == nil {
// Create our wildcard reply subject.
sha := sha256.New()
sha.Write([]byte(nuid.Next()))
b := sha.Sum(nil)
for i := 0; i < aReplyTokensize; i++ {
b[i] = rdigits[int(b[i]%base)]
}
js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.opts.replyPrefix, b[:aReplyTokensize])
sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply)
if err != nil {
js.publisher.Unlock()
return "", err
}
js.publisher.replySub = sub
js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano()))
}
if js.publisher.connStatusCh == nil {
js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED)
go js.resetPendingAcksOnReconnect()
}
var sb strings.Builder
sb.WriteString(js.publisher.replyPrefix)
for {
rn := js.publisher.rr.Int63()
var b [aReplyTokensize]byte
for i, l := 0, rn; i < len(b); i++ {
b[i] = rdigits[l%base]
l /= base
}
if _, ok := js.publisher.acks[string(b[:])]; ok {
continue
}
sb.Write(b[:])
break
}
js.publisher.Unlock()
return sb.String(), nil
}
// Handle an async reply from PublishAsync.
func (js *jetStream) handleAsyncReply(m *nats.Msg) {
if len(m.Subject) <= js.opts.replyPrefixLen {
return
}
id := m.Subject[js.opts.replyPrefixLen:]
js.publisher.Lock()
paf := js.getPAF(id)
if paf == nil {
js.publisher.Unlock()
return
}
closeStc := func() {
// Check on anyone stalled and waiting.
if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa {
close(js.publisher.stallCh)
js.publisher.stallCh = nil
}
}
closeDchFn := func() func() {
var dch chan struct{}
// Check on anyone one waiting on done status.
if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 {
dch = js.publisher.doneCh
js.publisher.doneCh = nil
}
// Return function to close done channel which
// should be deferred so that error is processed and
// can be checked.
return func() {
if dch != nil {
close(dch)
}
}
}
doErr := func(err error) {
paf.err = err
if paf.errCh != nil {
paf.errCh <- paf.err
}
cb := js.publisher.asyncPublisherOpts.aecb
js.publisher.Unlock()
if cb != nil {
cb(js, paf.msg, err)
}
}
if paf.timeout != nil {
paf.timeout.Stop()
}
// Process no responders etc.
if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
if paf.retries < paf.maxRetries {
paf.retries++
time.AfterFunc(paf.retryWait, func() {
js.publisher.Lock()
paf := js.getPAF(id)
js.publisher.Unlock()
if paf == nil {
return
}
_, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error {
po.pafRetry = paf
return nil
})
if err != nil {
js.publisher.Lock()
doErr(err)
}
})
js.publisher.Unlock()
return
}
delete(js.publisher.acks, id)
closeStc()
defer closeDchFn()()
doErr(ErrNoStreamResponse)
return
}
// Remove
delete(js.publisher.acks, id)
closeStc()
defer closeDchFn()()
var pa pubAckResponse
if err := json.Unmarshal(m.Data, &pa); err != nil {
doErr(ErrInvalidJSAck)
return
}
if pa.Error != nil {
doErr(pa.Error)
return
}
if pa.PubAck == nil || pa.PubAck.Stream == "" {
doErr(ErrInvalidJSAck)
return
}
// So here we have received a proper puback.
paf.ack = pa.PubAck
if paf.doneCh != nil {
paf.doneCh <- paf.ack
}
js.publisher.Unlock()
}
func (js *jetStream) resetPendingAcksOnReconnect() {
js.publisher.Lock()
connStatusCh := js.publisher.connStatusCh
js.publisher.Unlock()
for {
newStatus, ok := <-connStatusCh
if !ok || newStatus == nats.CLOSED {
return
}
js.publisher.Lock()
errCb := js.publisher.asyncPublisherOpts.aecb
for id, paf := range js.publisher.acks {
paf.err = nats.ErrDisconnected
if paf.errCh != nil {
paf.errCh <- paf.err
}
if errCb != nil {
defer errCb(js, paf.msg, nats.ErrDisconnected)
}
delete(js.publisher.acks, id)
}
if js.publisher.doneCh != nil {
close(js.publisher.doneCh)
js.publisher.doneCh = nil
}
js.publisher.Unlock()
}
}
// registerPAF will register for a PubAckFuture.
func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) {
js.publisher.Lock()
if js.publisher.acks == nil {
js.publisher.acks = make(map[string]*pubAckFuture)
}
js.publisher.acks[id] = paf
np := len(js.publisher.acks)
maxpa := js.publisher.asyncPublisherOpts.maxpa
js.publisher.Unlock()
return np, maxpa
}
// Lock should be held.
func (js *jetStream) getPAF(id string) *pubAckFuture {
if js.publisher.acks == nil {
return nil
}
return js.publisher.acks[id]
}
// clearPAF will remove a PubAckFuture that was registered.
func (js *jetStream) clearPAF(id string) {
js.publisher.Lock()
delete(js.publisher.acks, id)
js.publisher.Unlock()
}
func (js *jetStream) asyncStall() <-chan struct{} {
js.publisher.Lock()
if js.publisher.stallCh == nil {
js.publisher.stallCh = make(chan struct{})
}
stc := js.publisher.stallCh
js.publisher.Unlock()
return stc
}
func (paf *pubAckFuture) Ok() <-chan *PubAck {
paf.jsClient.Lock()
defer paf.jsClient.Unlock()
if paf.doneCh == nil {
paf.doneCh = make(chan *PubAck, 1)
if paf.ack != nil {
paf.doneCh <- paf.ack
}
}
return paf.doneCh
}
func (paf *pubAckFuture) Err() <-chan error {
paf.jsClient.Lock()
defer paf.jsClient.Unlock()
if paf.errCh == nil {
paf.errCh = make(chan error, 1)
if paf.err != nil {
paf.errCh <- paf.err
}
}
return paf.errCh
}
func (paf *pubAckFuture) Msg() *nats.Msg {
paf.jsClient.RLock()
defer paf.jsClient.RUnlock()
return paf.msg
}
// PublishAsyncPending returns the number of async publishes outstanding
// for this context.
func (js *jetStream) PublishAsyncPending() int {
js.publisher.RLock()
defer js.publisher.RUnlock()
return len(js.publisher.acks)
}
// PublishAsyncComplete returns a channel that will be closed when all
// outstanding asynchronously published messages are acknowledged by the
// server.
func (js *jetStream) PublishAsyncComplete() <-chan struct{} {
js.publisher.Lock()
defer js.publisher.Unlock()
if js.publisher.doneCh == nil {
js.publisher.doneCh = make(chan struct{})
}
dch := js.publisher.doneCh
if len(js.publisher.acks) == 0 {
close(js.publisher.doneCh)
js.publisher.doneCh = nil
}
return dch
}

1127
vendor/github.com/nats-io/nats.go/jetstream/pull.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

766
vendor/github.com/nats-io/nats.go/jetstream/stream.go generated vendored Normal file
View File

@@ -0,0 +1,766 @@
// Copyright 2022-2025 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nuid"
)
type (
// Stream contains CRUD methods on a consumer via [ConsumerManager], as well
// as operations on an existing stream. It allows fetching and removing
// messages from a stream, as well as purging a stream.
Stream interface {
ConsumerManager
// Info returns StreamInfo from the server.
Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error)
// CachedInfo returns ConsumerInfo currently cached on this stream.
// This method does not perform any network requests. The cached
// StreamInfo is updated on every call to Info and Update.
CachedInfo() *StreamInfo
// Purge removes messages from a stream. It is a destructive operation.
// Use with caution. See StreamPurgeOpt for available options.
Purge(ctx context.Context, opts ...StreamPurgeOpt) error
// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error)
// GetLastMsgForSubject retrieves the last raw stream message stored in
// JetStream on a given subject subject.
GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error)
// DeleteMsg deletes a message from a stream.
// On the server, the message is marked as erased, but not overwritten.
DeleteMsg(ctx context.Context, seq uint64) error
// SecureDeleteMsg deletes a message from a stream. The deleted message
// is overwritten with random data. As a result, this operation is slower
// than DeleteMsg.
SecureDeleteMsg(ctx context.Context, seq uint64) error
}
// ConsumerManager provides CRUD API for managing consumers. It is
// available as a part of [Stream] interface. CreateConsumer,
// UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a
// [Consumer] interface, allowing to operate on a consumer (e.g. consume
// messages).
ConsumerManager interface {
// CreateOrUpdateConsumer creates a consumer on a given stream with
// given config. If consumer already exists, it will be updated (if
// possible). Consumer interface is returned, allowing to operate on a
// consumer (e.g. fetch messages).
CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// CreateConsumer creates a consumer on a given stream with given
// config. If consumer already exists and the provided configuration
// differs from its configuration, ErrConsumerExists is returned. If the
// provided configuration is the same as the existing consumer, the
// existing consumer is returned. Consumer interface is returned,
// allowing to operate on a consumer (e.g. fetch messages).
CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// UpdateConsumer updates an existing consumer. If consumer does not
// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
// returned, allowing to operate on a consumer (e.g. fetch messages).
UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
// are managed by the library and provide a simple way to consume
// messages from a stream. Ordered consumers are ephemeral in-memory
// pull consumers and are resilient to deletes and restarts.
OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error)
// Consumer returns an interface to an existing consumer, allowing processing
// of messages. If consumer does not exist, ErrConsumerNotFound is
// returned.
Consumer(ctx context.Context, consumer string) (Consumer, error)
// DeleteConsumer removes a consumer with given name from a stream.
// If consumer does not exist, ErrConsumerNotFound is returned.
DeleteConsumer(ctx context.Context, consumer string) error
// PauseConsumer pauses a consumer.
PauseConsumer(ctx context.Context, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error)
// ResumeConsumer resumes a consumer.
ResumeConsumer(ctx context.Context, consumer string) (*ConsumerPauseResponse, error)
// ListConsumers returns ConsumerInfoLister enabling iterating over a
// channel of consumer infos.
ListConsumers(context.Context) ConsumerInfoLister
// ConsumerNames returns a ConsumerNameLister enabling iterating over a
// channel of consumer names.
ConsumerNames(context.Context) ConsumerNameLister
// UnpinConsumer unpins the currently pinned client for a consumer for the given group name.
// If consumer does not exist, ErrConsumerNotFound is returned.
UnpinConsumer(ctx context.Context, consumer string, group string) error
}
RawStreamMsg struct {
Subject string
Sequence uint64
Header nats.Header
Data []byte
Time time.Time
}
stream struct {
name string
info *StreamInfo
js *jetStream
}
// StreamInfoOpt is a function setting options for [Stream.Info]
StreamInfoOpt func(*streamInfoRequest) error
streamInfoRequest struct {
apiPaged
DeletedDetails bool `json:"deleted_details,omitempty"`
SubjectFilter string `json:"subjects_filter,omitempty"`
}
consumerInfoResponse struct {
apiResponse
*ConsumerInfo
}
// StreamPurgeOpt is a function setting options for [Stream.Purge]
StreamPurgeOpt func(*StreamPurgeRequest) error
// StreamPurgeRequest is an API request body to purge a stream.
StreamPurgeRequest struct {
// Purge up to but not including sequence.
Sequence uint64 `json:"seq,omitempty"`
// Subject to match against messages for the purge command.
Subject string `json:"filter,omitempty"`
// Number of messages to keep.
Keep uint64 `json:"keep,omitempty"`
}
streamPurgeResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
Purged uint64 `json:"purged"`
}
consumerDeleteResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
}
consumerPauseRequest struct {
PauseUntil *time.Time `json:"pause_until,omitempty"`
}
ConsumerPauseResponse struct {
// Paused is true if the consumer is paused.
Paused bool `json:"paused"`
// PauseUntil is the time until the consumer is paused.
PauseUntil time.Time `json:"pause_until"`
// PauseRemaining is the time remaining until the consumer is paused.
PauseRemaining time.Duration `json:"pause_remaining,omitempty"`
}
consumerPauseApiResponse struct {
apiResponse
ConsumerPauseResponse
}
// GetMsgOpt is a function setting options for [Stream.GetMsg]
GetMsgOpt func(*apiMsgGetRequest) error
apiMsgGetRequest struct {
Seq uint64 `json:"seq,omitempty"`
LastFor string `json:"last_by_subj,omitempty"`
NextFor string `json:"next_by_subj,omitempty"`
}
// apiMsgGetResponse is the response for a Stream get request.
apiMsgGetResponse struct {
apiResponse
Message *storedMsg `json:"message,omitempty"`
}
// storedMsg is a raw message stored in JetStream.
storedMsg struct {
Subject string `json:"subject"`
Sequence uint64 `json:"seq"`
Header []byte `json:"hdrs,omitempty"`
Data []byte `json:"data,omitempty"`
Time time.Time `json:"time"`
}
msgDeleteRequest struct {
Seq uint64 `json:"seq"`
NoErase bool `json:"no_erase,omitempty"`
}
msgDeleteResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
}
// ConsumerInfoLister is used to iterate over a channel of consumer infos.
// Err method can be used to check for errors encountered during iteration.
// Info channel is always closed and therefore can be used in a range loop.
ConsumerInfoLister interface {
Info() <-chan *ConsumerInfo
Err() error
}
// ConsumerNameLister is used to iterate over a channel of consumer names.
// Err method can be used to check for errors encountered during iteration.
// Name channel is always closed and therefore can be used in a range loop.
ConsumerNameLister interface {
Name() <-chan string
Err() error
}
consumerLister struct {
js *jetStream
offset int
pageInfo *apiPaged
consumers chan *ConsumerInfo
names chan string
err error
}
consumerListResponse struct {
apiResponse
apiPaged
Consumers []*ConsumerInfo `json:"consumers"`
}
consumerNamesResponse struct {
apiResponse
apiPaged
Consumers []string `json:"consumers"`
}
consumerUnpinRequest struct {
Group string `json:"group"`
}
)
// CreateOrUpdateConsumer creates a consumer on a given stream with
// given config. If consumer already exists, it will be updated (if
// possible). Consumer interface is returned, allowing to operate on a
// consumer (e.g. fetch messages).
func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreateOrUpdate)
}
// CreateConsumer creates a consumer on a given stream with given
// config. If consumer already exists and the provided configuration
// differs from its configuration, ErrConsumerExists is returned. If the
// provided configuration is the same as the existing consumer, the
// existing consumer is returned. Consumer interface is returned,
// allowing to operate on a consumer (e.g. fetch messages).
func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreate)
}
// UpdateConsumer updates an existing consumer. If consumer does not
// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
// returned, allowing to operate on a consumer (e.g. fetch messages).
func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionUpdate)
}
// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
// are managed by the library and provide a simple way to consume
// messages from a stream. Ordered consumers are ephemeral in-memory
// pull consumers and are resilient to deletes and restarts.
func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) {
oc := &orderedConsumer{
js: s.js,
cfg: &cfg,
stream: s.name,
namePrefix: nuid.Next(),
doReset: make(chan struct{}, 1),
}
consCfg := oc.getConsumerConfig()
cons, err := s.CreateOrUpdateConsumer(ctx, *consCfg)
if err != nil {
return nil, err
}
oc.currentConsumer = cons.(*pullConsumer)
return oc, nil
}
// Consumer returns an interface to an existing consumer, allowing processing
// of messages. If consumer does not exist, ErrConsumerNotFound is
// returned.
func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) {
return getConsumer(ctx, s.js, s.name, name)
}
// DeleteConsumer removes a consumer with given name from a stream.
// If consumer does not exist, ErrConsumerNotFound is returned.
func (s *stream) DeleteConsumer(ctx context.Context, name string) error {
return deleteConsumer(ctx, s.js, s.name, name)
}
// PauseConsumer pauses a consumer.
func (s *stream) PauseConsumer(ctx context.Context, name string, pauseUntil time.Time) (*ConsumerPauseResponse, error) {
return pauseConsumer(ctx, s.js, s.name, name, &pauseUntil)
}
// ResumeConsumer resumes a consumer.
func (s *stream) ResumeConsumer(ctx context.Context, name string) (*ConsumerPauseResponse, error) {
return resumeConsumer(ctx, s.js, s.name, name)
}
// Info returns StreamInfo from the server.
func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) {
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
var infoReq *streamInfoRequest
for _, opt := range opts {
if infoReq == nil {
infoReq = &streamInfoRequest{}
}
if err := opt(infoReq); err != nil {
return nil, err
}
}
var req []byte
var err error
var subjectMap map[string]uint64
var offset int
infoSubject := fmt.Sprintf(apiStreamInfoT, s.name)
var info *StreamInfo
for {
if infoReq != nil {
if infoReq.SubjectFilter != "" {
if subjectMap == nil {
subjectMap = make(map[string]uint64)
}
infoReq.Offset = offset
}
req, err = json.Marshal(infoReq)
if err != nil {
return nil, err
}
}
var resp streamInfoResponse
if _, err = s.js.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
return nil, ErrStreamNotFound
}
return nil, resp.Error
}
info = resp.StreamInfo
var total int
if resp.Total != 0 {
total = resp.Total
}
if len(resp.StreamInfo.State.Subjects) > 0 {
for subj, msgs := range resp.StreamInfo.State.Subjects {
subjectMap[subj] = msgs
}
offset = len(subjectMap)
}
if total == 0 || total <= offset {
info.State.Subjects = nil
// we don't want to store subjects in cache
cached := *info
s.info = &cached
info.State.Subjects = subjectMap
break
}
}
return info, nil
}
// CachedInfo returns ConsumerInfo currently cached on this stream.
// This method does not perform any network requests. The cached
// StreamInfo is updated on every call to Info and Update.
func (s *stream) CachedInfo() *StreamInfo {
return s.info
}
// Purge removes messages from a stream. It is a destructive operation.
// Use with caution. See StreamPurgeOpt for available options.
func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error {
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
var purgeReq StreamPurgeRequest
for _, opt := range opts {
if err := opt(&purgeReq); err != nil {
return err
}
}
var req []byte
var err error
req, err = json.Marshal(purgeReq)
if err != nil {
return err
}
purgeSubject := fmt.Sprintf(apiStreamPurgeT, s.name)
var resp streamPurgeResponse
if _, err = s.js.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
return nil
}
// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) {
req := &apiMsgGetRequest{Seq: seq}
for _, opt := range opts {
if err := opt(req); err != nil {
return nil, err
}
}
return s.getMsg(ctx, req)
}
// GetLastMsgForSubject retrieves the last raw stream message stored in
// JetStream on a given subject subject.
func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) {
return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject})
}
func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) {
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
req, err := json.Marshal(mreq)
if err != nil {
return nil, err
}
var gmSubj string
// handle direct gets
if s.info.Config.AllowDirect {
if mreq.LastFor != "" {
gmSubj = fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor)
r, err := s.js.apiRequest(ctx, gmSubj, nil)
if err != nil {
return nil, err
}
return convertDirectGetMsgResponseToMsg(r.msg)
}
gmSubj = fmt.Sprintf(apiDirectMsgGetT, s.name)
r, err := s.js.apiRequest(ctx, gmSubj, req)
if err != nil {
return nil, err
}
return convertDirectGetMsgResponseToMsg(r.msg)
}
var resp apiMsgGetResponse
dsSubj := fmt.Sprintf(apiMsgGetT, s.name)
_, err = s.js.apiRequestJSON(ctx, dsSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeMessageNotFound {
return nil, ErrMsgNotFound
}
return nil, resp.Error
}
msg := resp.Message
var hdr nats.Header
if len(msg.Header) > 0 {
hdr, err = nats.DecodeHeadersMsg(msg.Header)
if err != nil {
return nil, err
}
}
return &RawStreamMsg{
Subject: msg.Subject,
Sequence: msg.Sequence,
Header: hdr,
Data: msg.Data,
Time: msg.Time,
}, nil
}
func convertDirectGetMsgResponseToMsg(r *nats.Msg) (*RawStreamMsg, error) {
// Check for 404/408. We would get a no-payload message and a "Status" header
if len(r.Data) == 0 {
val := r.Header.Get(statusHdr)
if val != "" {
switch val {
case noMessages:
return nil, ErrMsgNotFound
default:
desc := r.Header.Get("Description")
if desc == "" {
desc = "unable to get message"
}
return nil, fmt.Errorf("nats: %s", desc)
}
}
}
// Check for headers that give us the required information to
// reconstruct the message.
if len(r.Header) == 0 {
return nil, errors.New("nats: response should have headers")
}
stream := r.Header.Get(StreamHeader)
if stream == "" {
return nil, errors.New("nats: missing stream header")
}
seqStr := r.Header.Get(SequenceHeader)
if seqStr == "" {
return nil, errors.New("nats: missing sequence header")
}
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err)
}
timeStr := r.Header.Get(TimeStampHeaer)
if timeStr == "" {
return nil, errors.New("nats: missing timestamp header")
}
tm, err := time.Parse(time.RFC3339Nano, timeStr)
if err != nil {
return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err)
}
subj := r.Header.Get(SubjectHeader)
if subj == "" {
return nil, errors.New("nats: missing subject header")
}
return &RawStreamMsg{
Subject: subj,
Sequence: seq,
Header: r.Header,
Data: r.Data,
Time: tm,
}, nil
}
// DeleteMsg deletes a message from a stream.
// On the server, the message is marked as erased, but not overwritten.
func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error {
return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true})
}
// SecureDeleteMsg deletes a message from a stream. The deleted message
// is overwritten with random data. As a result, this operation is slower
// than DeleteMsg.
func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error {
return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq})
}
func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error {
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
r, err := json.Marshal(req)
if err != nil {
return err
}
subj := fmt.Sprintf(apiMsgDeleteT, s.name)
var resp msgDeleteResponse
if _, err = s.js.apiRequestJSON(ctx, subj, &resp, r); err != nil {
return err
}
if !resp.Success {
return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, resp.Error.Error())
}
return nil
}
// ListConsumers returns ConsumerInfoLister enabling iterating over a
// channel of consumer infos.
func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister {
l := &consumerLister{
js: s.js,
consumers: make(chan *ConsumerInfo),
}
go func() {
defer close(l.consumers)
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
for {
page, err := l.consumerInfos(ctx, s.name)
if err != nil && !errors.Is(err, ErrEndOfData) {
l.err = err
return
}
for _, info := range page {
select {
case <-ctx.Done():
l.err = ctx.Err()
return
default:
}
if info != nil {
l.consumers <- info
}
}
if errors.Is(err, ErrEndOfData) {
return
}
}
}()
return l
}
func (s *consumerLister) Info() <-chan *ConsumerInfo {
return s.consumers
}
func (s *consumerLister) Err() error {
return s.err
}
// ConsumerNames returns a ConsumerNameLister enabling iterating over a
// channel of consumer names.
func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister {
l := &consumerLister{
js: s.js,
names: make(chan string),
}
go func() {
defer close(l.names)
ctx, cancel := s.js.wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
for {
page, err := l.consumerNames(ctx, s.name)
if err != nil && !errors.Is(err, ErrEndOfData) {
l.err = err
return
}
for _, info := range page {
select {
case l.names <- info:
case <-ctx.Done():
l.err = ctx.Err()
return
}
}
if errors.Is(err, ErrEndOfData) {
return
}
}
}()
return l
}
func (s *consumerLister) Name() <-chan string {
return s.names
}
// consumerInfos fetches the next ConsumerInfo page
func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) {
if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
return nil, ErrEndOfData
}
req, err := json.Marshal(
apiPagedRequest{Offset: s.offset},
)
if err != nil {
return nil, err
}
slSubj := fmt.Sprintf(apiConsumerListT, stream)
var resp consumerListResponse
_, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
s.pageInfo = &resp.apiPaged
s.offset += len(resp.Consumers)
return resp.Consumers, nil
}
// consumerNames fetches the next consumer names page
func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) {
if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
return nil, ErrEndOfData
}
req, err := json.Marshal(
apiPagedRequest{Offset: s.offset},
)
if err != nil {
return nil, err
}
slSubj := fmt.Sprintf(apiConsumerNamesT, stream)
var resp consumerNamesResponse
_, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
s.pageInfo = &resp.apiPaged
s.offset += len(resp.Consumers)
return resp.Consumers, nil
}
// UnpinConsumer unpins the currently pinned client for a consumer for the given group name.
// If consumer does not exist, ErrConsumerNotFound is returned.
func (s *stream) UnpinConsumer(ctx context.Context, consumer string, group string) error {
return unpinConsumer(ctx, s.js, s.name, consumer, group)
}

View File

@@ -0,0 +1,611 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
)
type (
// StreamInfo shows config and current state for this stream.
StreamInfo struct {
// Config contains the configuration settings of the stream, set when
// creating or updating the stream.
Config StreamConfig `json:"config"`
// Created is the timestamp when the stream was created.
Created time.Time `json:"created"`
// State provides the state of the stream at the time of request,
// including metrics like the number of messages in the stream, total
// bytes, etc.
State StreamState `json:"state"`
// Cluster contains information about the cluster to which this stream
// belongs (if applicable).
Cluster *ClusterInfo `json:"cluster,omitempty"`
// Mirror contains information about another stream this one is
// mirroring. Mirroring is used to create replicas of another stream's
// data. This field is omitted if the stream is not mirroring another
// stream.
Mirror *StreamSourceInfo `json:"mirror,omitempty"`
// Sources is a list of source streams from which this stream collects
// data.
Sources []*StreamSourceInfo `json:"sources,omitempty"`
// TimeStamp indicates when the info was gathered by the server.
TimeStamp time.Time `json:"ts"`
}
// StreamConfig is the configuration of a JetStream stream.
StreamConfig struct {
// Name is the name of the stream. It is required and must be unique
// across the JetStream account.
//
// Name Names cannot contain whitespace, ., *, >, path separators
// (forward or backwards slash), and non-printable characters.
Name string `json:"name"`
// Description is an optional description of the stream.
Description string `json:"description,omitempty"`
// Subjects is a list of subjects that the stream is listening on.
// Wildcards are supported. Subjects cannot be set if the stream is
// created as a mirror.
Subjects []string `json:"subjects,omitempty"`
// Retention defines the message retention policy for the stream.
// Defaults to LimitsPolicy.
Retention RetentionPolicy `json:"retention"`
// MaxConsumers specifies the maximum number of consumers allowed for
// the stream.
MaxConsumers int `json:"max_consumers"`
// MaxMsgs is the maximum number of messages the stream will store.
// After reaching the limit, stream adheres to the discard policy.
// If not set, server default is -1 (unlimited).
MaxMsgs int64 `json:"max_msgs"`
// MaxBytes is the maximum total size of messages the stream will store.
// After reaching the limit, stream adheres to the discard policy.
// If not set, server default is -1 (unlimited).
MaxBytes int64 `json:"max_bytes"`
// Discard defines the policy for handling messages when the stream
// reaches its limits in terms of number of messages or total bytes.
Discard DiscardPolicy `json:"discard"`
// DiscardNewPerSubject is a flag to enable discarding new messages per
// subject when limits are reached. Requires DiscardPolicy to be
// DiscardNew and the MaxMsgsPerSubject to be set.
DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"`
// MaxAge is the maximum age of messages that the stream will retain.
MaxAge time.Duration `json:"max_age"`
// MaxMsgsPerSubject is the maximum number of messages per subject that
// the stream will retain.
MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"`
// MaxMsgSize is the maximum size of any single message in the stream.
MaxMsgSize int32 `json:"max_msg_size,omitempty"`
// Storage specifies the type of storage backend used for the stream
// (file or memory).
Storage StorageType `json:"storage"`
// Replicas is the number of stream replicas in clustered JetStream.
// Defaults to 1, maximum is 5.
Replicas int `json:"num_replicas"`
// NoAck is a flag to disable acknowledging messages received by this
// stream.
//
// If set to true, publish methods from the JetStream client will not
// work as expected, since they rely on acknowledgements. Core NATS
// publish methods should be used instead. Note that this will make
// message delivery less reliable.
NoAck bool `json:"no_ack,omitempty"`
// Duplicates is the window within which to track duplicate messages.
// If not set, server default is 2 minutes.
Duplicates time.Duration `json:"duplicate_window,omitempty"`
// Placement is used to declare where the stream should be placed via
// tags and/or an explicit cluster name.
Placement *Placement `json:"placement,omitempty"`
// Mirror defines the configuration for mirroring another stream.
Mirror *StreamSource `json:"mirror,omitempty"`
// Sources is a list of other streams this stream sources messages from.
Sources []*StreamSource `json:"sources,omitempty"`
// Sealed streams do not allow messages to be published or deleted via limits or API,
// sealed streams can not be unsealed via configuration update. Can only
// be set on already created streams via the Update API.
Sealed bool `json:"sealed,omitempty"`
// DenyDelete restricts the ability to delete messages from a stream via
// the API. Defaults to false.
DenyDelete bool `json:"deny_delete,omitempty"`
// DenyPurge restricts the ability to purge messages from a stream via
// the API. Defaults to false.
DenyPurge bool `json:"deny_purge,omitempty"`
// AllowRollup allows the use of the Nats-Rollup header to replace all
// contents of a stream, or subject in a stream, with a single new
// message.
AllowRollup bool `json:"allow_rollup_hdrs,omitempty"`
// Compression specifies the message storage compression algorithm.
// Defaults to NoCompression.
Compression StoreCompression `json:"compression"`
// FirstSeq is the initial sequence number of the first message in the
// stream.
FirstSeq uint64 `json:"first_seq,omitempty"`
// SubjectTransform allows applying a transformation to matching
// messages' subjects.
SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"`
// RePublish allows immediate republishing a message to the configured
// subject after it's stored.
RePublish *RePublish `json:"republish,omitempty"`
// AllowDirect enables direct access to individual messages using direct
// get API. Defaults to false.
AllowDirect bool `json:"allow_direct"`
// MirrorDirect enables direct access to individual messages from the
// origin stream using direct get API. Defaults to false.
MirrorDirect bool `json:"mirror_direct"`
// ConsumerLimits defines limits of certain values that consumers can
// set, defaults for those who don't set these settings
ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"`
// Metadata is a set of application-defined key-value pairs for
// associating metadata on the stream. This feature requires nats-server
// v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
// Template identifies the template that manages the Stream.
// Deprecated: This feature is no longer supported.
Template string `json:"template_owner,omitempty"`
// AllowMsgTTL allows header initiated per-message TTLs.
// This feature requires nats-server v2.11.0 or later.
AllowMsgTTL bool `json:"allow_msg_ttl"`
// Enables and sets a duration for adding server markers for delete, purge and max age limits.
// This feature requires nats-server v2.11.0 or later.
SubjectDeleteMarkerTTL time.Duration `json:"subject_delete_marker_ttl,omitempty"`
}
// StreamSourceInfo shows information about an upstream stream
// source/mirror.
StreamSourceInfo struct {
// Name is the name of the stream that is being replicated.
Name string `json:"name"`
// Lag informs how many messages behind the source/mirror operation is.
// This will only show correctly if there is active communication
// with stream/mirror.
Lag uint64 `json:"lag"`
// Active informs when last the mirror or sourced stream had activity.
// Value will be -1 when there has been no activity.
Active time.Duration `json:"active"`
// FilterSubject is the subject filter defined for this source/mirror.
FilterSubject string `json:"filter_subject,omitempty"`
// SubjectTransforms is a list of subject transforms defined for this
// source/mirror.
SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
}
// StreamState is the state of a JetStream stream at the time of request.
StreamState struct {
// Msgs is the number of messages stored in the stream.
Msgs uint64 `json:"messages"`
// Bytes is the number of bytes stored in the stream.
Bytes uint64 `json:"bytes"`
// FirstSeq is the sequence number of the first message in the stream.
FirstSeq uint64 `json:"first_seq"`
// FirstTime is the timestamp of the first message in the stream.
FirstTime time.Time `json:"first_ts"`
// LastSeq is the sequence number of the last message in the stream.
LastSeq uint64 `json:"last_seq"`
// LastTime is the timestamp of the last message in the stream.
LastTime time.Time `json:"last_ts"`
// Consumers is the number of consumers on the stream.
Consumers int `json:"consumer_count"`
// Deleted is a list of sequence numbers that have been removed from the
// stream. This field will only be returned if the stream has been
// fetched with the DeletedDetails option.
Deleted []uint64 `json:"deleted"`
// NumDeleted is the number of messages that have been removed from the
// stream. Only deleted messages causing a gap in stream sequence numbers
// are counted. Messages deleted at the beginning or end of the stream
// are not counted.
NumDeleted int `json:"num_deleted"`
// NumSubjects is the number of unique subjects the stream has received
// messages on.
NumSubjects uint64 `json:"num_subjects"`
// Subjects is a map of subjects the stream has received messages on
// with message count per subject. This field will only be returned if
// the stream has been fetched with the SubjectFilter option.
Subjects map[string]uint64 `json:"subjects"`
}
// ClusterInfo shows information about the underlying set of servers that
// make up the stream or consumer.
ClusterInfo struct {
// Name is the name of the cluster.
Name string `json:"name,omitempty"`
// Leader is the server name of the RAFT leader.
Leader string `json:"leader,omitempty"`
// Replicas is the list of members of the RAFT cluster
Replicas []*PeerInfo `json:"replicas,omitempty"`
}
// PeerInfo shows information about the peers in the cluster that are
// supporting the stream or consumer.
PeerInfo struct {
// Name is the server name of the peer.
Name string `json:"name"`
// Current indicates if the peer is up to date and synchronized with the
// leader.
Current bool `json:"current"`
// Offline indicates if the peer is considered offline by the group.
Offline bool `json:"offline,omitempty"`
// Active it the duration since this peer was last seen.
Active time.Duration `json:"active"`
// Lag is the number of uncommitted operations this peer is behind the
// leader.
Lag uint64 `json:"lag,omitempty"`
}
// SubjectTransformConfig is for applying a subject transform (to matching
// messages) before doing anything else when a new message is received.
SubjectTransformConfig struct {
// Source is the subject pattern to match incoming messages against.
Source string `json:"src"`
// Destination is the subject pattern to remap the subject to.
Destination string `json:"dest"`
}
// RePublish is for republishing messages once committed to a stream. The
// original subject is remapped from the subject pattern to the destination
// pattern.
RePublish struct {
// Source is the subject pattern to match incoming messages against.
Source string `json:"src,omitempty"`
// Destination is the subject pattern to republish the subject to.
Destination string `json:"dest"`
// HeadersOnly is a flag to indicate that only the headers should be
// republished.
HeadersOnly bool `json:"headers_only,omitempty"`
}
// Placement is used to guide placement of streams in clustered JetStream.
Placement struct {
// Cluster is the name of the cluster to which the stream should be
// assigned.
Cluster string `json:"cluster"`
// Tags are used to match streams to servers in the cluster. A stream
// will be assigned to a server with a matching tag.
Tags []string `json:"tags,omitempty"`
}
// StreamSource dictates how streams can source from other streams.
StreamSource struct {
// Name is the name of the stream to source from.
Name string `json:"name"`
// OptStartSeq is the sequence number to start sourcing from.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is the timestamp of messages to start sourcing from.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// FilterSubject is the subject filter used to only replicate messages
// with matching subjects.
FilterSubject string `json:"filter_subject,omitempty"`
// SubjectTransforms is a list of subject transforms to apply to
// matching messages.
//
// Subject transforms on sources and mirrors are also used as subject
// filters with optional transformations.
SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
// External is a configuration referencing a stream source in another
// account or JetStream domain.
External *ExternalStream `json:"external,omitempty"`
// Domain is used to configure a stream source in another JetStream
// domain. This setting will set the External field with the appropriate
// APIPrefix.
Domain string `json:"-"`
}
// ExternalStream allows you to qualify access to a stream source in another
// account.
ExternalStream struct {
// APIPrefix is the subject prefix that imports the other account/domain
// $JS.API.CONSUMER.> subjects.
APIPrefix string `json:"api"`
// DeliverPrefix is the delivery subject to use for the push consumer.
DeliverPrefix string `json:"deliver"`
}
// StreamConsumerLimits are the limits for a consumer on a stream. These can
// be overridden on a per consumer basis.
StreamConsumerLimits struct {
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// MaxAckPending is a maximum number of outstanding unacknowledged
// messages for a consumer.
MaxAckPending int `json:"max_ack_pending,omitempty"`
}
// DiscardPolicy determines how to proceed when limits of messages or bytes
// are reached.
DiscardPolicy int
// RetentionPolicy determines how messages in a stream are retained.
RetentionPolicy int
// StorageType determines how messages are stored for retention.
StorageType int
// StoreCompression determines how messages are compressed.
StoreCompression uint8
)
const (
// LimitsPolicy (default) means that messages are retained until any given
// limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge.
LimitsPolicy RetentionPolicy = iota
// InterestPolicy specifies that when all known observables have
// acknowledged a message it can be removed.
InterestPolicy
// WorkQueuePolicy specifies that when the first worker or subscriber
// acknowledges the message it can be removed.
WorkQueuePolicy
)
const (
// DiscardOld will remove older messages to return to the limits. This is
// the default.
DiscardOld DiscardPolicy = iota
// DiscardNew will fail to store new messages once the limits are reached.
DiscardNew
)
const (
limitsPolicyString = "limits"
interestPolicyString = "interest"
workQueuePolicyString = "workqueue"
)
func (rp RetentionPolicy) String() string {
switch rp {
case LimitsPolicy:
return "Limits"
case InterestPolicy:
return "Interest"
case WorkQueuePolicy:
return "WorkQueue"
default:
return "Unknown Retention Policy"
}
}
func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
switch rp {
case LimitsPolicy:
return json.Marshal(limitsPolicyString)
case InterestPolicy:
return json.Marshal(interestPolicyString)
case WorkQueuePolicy:
return json.Marshal(workQueuePolicyString)
default:
return nil, fmt.Errorf("nats: can not marshal %v", rp)
}
}
func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(limitsPolicyString):
*rp = LimitsPolicy
case jsonString(interestPolicyString):
*rp = InterestPolicy
case jsonString(workQueuePolicyString):
*rp = WorkQueuePolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (dp DiscardPolicy) String() string {
switch dp {
case DiscardOld:
return "DiscardOld"
case DiscardNew:
return "DiscardNew"
default:
return "Unknown Discard Policy"
}
}
func (dp DiscardPolicy) MarshalJSON() ([]byte, error) {
switch dp {
case DiscardOld:
return json.Marshal("old")
case DiscardNew:
return json.Marshal("new")
default:
return nil, fmt.Errorf("nats: can not marshal %v", dp)
}
}
func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error {
switch strings.ToLower(string(data)) {
case jsonString("old"):
*dp = DiscardOld
case jsonString("new"):
*dp = DiscardNew
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
const (
// FileStorage specifies on disk storage. It's the default.
FileStorage StorageType = iota
// MemoryStorage specifies in memory only.
MemoryStorage
)
const (
memoryStorageString = "memory"
fileStorageString = "file"
)
func (st StorageType) String() string {
switch st {
case MemoryStorage:
return "Memory"
case FileStorage:
return "File"
default:
return "Unknown Storage Type"
}
}
func (st StorageType) MarshalJSON() ([]byte, error) {
switch st {
case MemoryStorage:
return json.Marshal(memoryStorageString)
case FileStorage:
return json.Marshal(fileStorageString)
default:
return nil, fmt.Errorf("nats: can not marshal %v", st)
}
}
func (st *StorageType) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(memoryStorageString):
*st = MemoryStorage
case jsonString(fileStorageString):
*st = FileStorage
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func jsonString(s string) string {
return "\"" + s + "\""
}
const (
// NoCompression disables compression on the stream. This is the default.
NoCompression StoreCompression = iota
// S2Compression enables S2 compression on the stream.
S2Compression
)
func (alg StoreCompression) String() string {
switch alg {
case NoCompression:
return "None"
case S2Compression:
return "S2"
default:
return "Unknown StoreCompression"
}
}
func (alg StoreCompression) MarshalJSON() ([]byte, error) {
var str string
switch alg {
case S2Compression:
str = "s2"
case NoCompression:
str = "none"
default:
return nil, errors.New("unknown compression algorithm")
}
return json.Marshal(str)
}
func (alg *StoreCompression) UnmarshalJSON(b []byte) error {
var str string
if err := json.Unmarshal(b, &str); err != nil {
return err
}
switch str {
case "s2":
*alg = S2Compression
case "none":
*alg = NoCompression
default:
return errors.New("unknown compression algorithm")
}
return nil
}

View File

@@ -33,10 +33,11 @@ type Config struct {
ProductVersion string `mapstructure:"product_version"`
AllowPropfindDepthInfinitiy bool `mapstructure:"allow_depth_infinity"`
TransferSharedSecret string `mapstructure:"transfer_shared_secret"`
NameValidation NameValidation `mapstructure:"validation"`
// SharedSecret used to sign the 'oc:download' URLs
URLSigningSharedSecret string `mapstructure:"url_signing_shared_secret"`
MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"`
}

View File

@@ -20,6 +20,7 @@ package ocdav
import (
"context"
"fmt"
"io"
"net/http"
"path"
@@ -41,6 +42,7 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/rhttp"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/global"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/router"
"github.com/opencloud-eu/reva/v2/pkg/signedurl"
"github.com/opencloud-eu/reva/v2/pkg/storage/favorite"
"github.com/opencloud-eu/reva/v2/pkg/storage/favorite/registry"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates"
@@ -69,6 +71,7 @@ type svc struct {
LockSystem LockSystem
userIdentifierCache *ttlcache.Cache
nameValidators []Validator
urlSigner signedurl.Signer
}
func (s *svc) Config() *config.Config {
@@ -116,6 +119,15 @@ func NewWith(conf *config.Config, fm favorite.Manager, ls LockSystem, _ *zerolog
// be safe - init the conf again
conf.Init()
var signer signedurl.Signer
if conf.URLSigningSharedSecret != "" {
var err error
signer, err = signedurl.NewJWTSignedURL(signedurl.WithSecret(conf.URLSigningSharedSecret))
if err != nil {
return nil, fmt.Errorf("failed to initialize URL signer: %w", err)
}
}
s := &svc{
c: conf,
webDavHandler: new(WebDavHandler),
@@ -129,6 +141,7 @@ func NewWith(conf *config.Config, fm favorite.Manager, ls LockSystem, _ *zerolog
LockSystem: ls,
userIdentifierCache: ttlcache.NewCache(),
nameValidators: ValidatorsFromConfig(conf),
urlSigner: signer,
}
_ = s.userIdentifierCache.SetTTL(60 * time.Second)

View File

@@ -52,6 +52,7 @@ import (
rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status"
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/router"
"github.com/opencloud-eu/reva/v2/pkg/signedurl"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/rs/zerolog"
@@ -214,14 +215,16 @@ type Handler struct {
PublicURL string
selector pool.Selectable[gateway.GatewayAPIClient]
c *config.Config
urlSigner signedurl.Signer
}
// NewHandler returns a new PropfindHandler instance
func NewHandler(publicURL string, selector pool.Selectable[gateway.GatewayAPIClient], c *config.Config) *Handler {
func NewHandler(publicURL string, selector pool.Selectable[gateway.GatewayAPIClient], signer signedurl.Signer, c *config.Config) *Handler {
return &Handler{
PublicURL: publicURL,
selector: selector,
c: c,
urlSigner: signer,
}
}
@@ -494,7 +497,7 @@ func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r
prefer := net.ParsePrefer(r.Header.Get(net.HeaderPrefer))
returnMinimal := prefer[net.HeaderPreferReturn] == "minimal"
propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares, returnMinimal)
propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares, returnMinimal, p.urlSigner)
if err != nil {
log.Error().Err(err).Msg("error formatting propfind")
w.WriteHeader(http.StatusInternalServerError)
@@ -985,7 +988,7 @@ func ReadPropfind(r io.Reader) (pf XML, status int, err error) {
}
// MultistatusResponse converts a list of resource infos into a multistatus response string
func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool) ([]byte, error) {
func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, downloadURLSigner signedurl.Signer) ([]byte, error) {
g, ctx := errgroup.WithContext(ctx)
type work struct {
@@ -1020,7 +1023,7 @@ func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceI
for i := 0; i < numWorkers; i++ {
g.Go(func() error {
for work := range workChan {
res, err := mdToPropResponse(ctx, pf, work.info, publicURL, ns, linkshares, returnMinimal)
res, err := mdToPropResponse(ctx, pf, work.info, publicURL, ns, linkshares, returnMinimal, downloadURLSigner)
if err != nil {
return err
}
@@ -1061,7 +1064,7 @@ func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceI
// mdToPropResponse converts the CS3 metadata into a webdav PropResponse
// ns is the CS3 namespace that needs to be removed from the CS3 path before
// prefixing it with the baseURI
func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool) (*ResponseXML, error) {
func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, urlSigner signedurl.Signer) (*ResponseXML, error) {
ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "md_to_prop_response")
span.SetAttributes(attribute.KeyValue{Key: "publicURL", Value: attribute.StringValue(publicURL)})
span.SetAttributes(attribute.KeyValue{Key: "ns", Value: attribute.StringValue(ns)})
@@ -1516,23 +1519,14 @@ func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, p
appendToNotFound(prop.NotFound("oc:owner-display-name"))
}
case "downloadURL": // desktop
if isPublic && md.Type == provider.ResourceType_RESOURCE_TYPE_FILE {
var path string
if !ls.PasswordProtected {
path = p
if md.Type == provider.ResourceType_RESOURCE_TYPE_FILE {
url := downloadURL(ctx, sublog, isPublic, p, ls, publicURL, baseURI, urlSigner)
if url != "" {
appendToOK(prop.Escaped("oc:downloadURL", url))
} else {
expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos))
var sb strings.Builder
sb.WriteString(p)
sb.WriteString("?signature=")
sb.WriteString(ls.Signature.Signature)
sb.WriteString("&expiration=")
sb.WriteString(url.QueryEscape(expiration.Format(time.RFC3339)))
path = sb.String()
appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local))
}
appendToOK(prop.Escaped("oc:downloadURL", publicURL+baseURI+path))
} else {
appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local))
}
@@ -1738,6 +1732,42 @@ func hasPreview(md *provider.ResourceInfo, appendToOK func(p ...prop.PropertyXML
}
}
func downloadURL(ctx context.Context, log zerolog.Logger, isPublic bool, path string, ls *link.PublicShare, publicURL string, baseURI string, urlSigner signedurl.Signer) string {
switch {
case isPublic:
var queryString string
if !ls.PasswordProtected {
queryString = path
} else {
expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos))
var sb strings.Builder
sb.WriteString(path)
sb.WriteString("?signature=")
sb.WriteString(ls.Signature.Signature)
sb.WriteString("&expiration=")
sb.WriteString(url.QueryEscape(expiration.Format(time.RFC3339)))
queryString = sb.String()
}
return publicURL + baseURI + queryString
case urlSigner != nil:
u, ok := ctxpkg.ContextGetUser(ctx)
if !ok {
log.Error().Msg("could not get user from context for download URL signing")
return ""
}
signedURL, err := urlSigner.Sign(publicURL+baseURI+path, u.Id.OpaqueId, 30*time.Minute)
if err != nil {
log.Error().Err(err).Msg("failed to sign download URL")
return ""
} else {
return signedURL
}
}
return ""
}
func activeLocks(log *zerolog.Logger, lock *provider.Lock) string {
if lock == nil || lock.Type == provider.LockType_LOCK_TYPE_INVALID {
return ""

View File

@@ -147,7 +147,7 @@ func (s *svc) handlePropfindOnToken(w http.ResponseWriter, r *http.Request, ns s
prefer := net.ParsePrefer(r.Header.Get("prefer"))
returnMinimal := prefer[net.HeaderPreferReturn] == "minimal"
propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, ns, nil, returnMinimal)
propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, ns, nil, returnMinimal, nil)
if err != nil {
sublog.Error().Err(err).Msg("error formatting propfind")
w.WriteHeader(http.StatusInternalServerError)

View File

@@ -117,7 +117,7 @@ func (s *svc) doFilterFiles(w http.ResponseWriter, r *http.Request, ff *reportFi
prefer := net.ParsePrefer(r.Header.Get("prefer"))
returnMinimal := prefer[net.HeaderPreferReturn] == "minimal"
responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal)
responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal, nil)
if err != nil {
log.Error().Err(err).Msg("error formatting propfind")
w.WriteHeader(http.StatusInternalServerError)

View File

@@ -82,7 +82,7 @@ func (h *SpacesHandler) Handler(s *svc, trashbinHandler *TrashbinHandler) http.H
var err error
switch r.Method {
case MethodPropfind:
p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, config)
p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config)
p.HandleSpacesPropfind(w, r, spaceID)
case MethodProppatch:
status, err = s.handleSpacesProppatch(w, r, spaceID)

View File

@@ -200,7 +200,7 @@ func (h *VersionsHandler) doListVersions(w http.ResponseWriter, r *http.Request,
prefer := net.ParsePrefer(r.Header.Get("prefer"))
returnMinimal := prefer[net.HeaderPreferReturn] == "minimal"
propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, "", nil, returnMinimal)
propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, "", nil, returnMinimal, nil)
if err != nil {
sublog.Error().Err(err).Msg("error formatting propfind")
w.WriteHeader(http.StatusInternalServerError)

View File

@@ -72,7 +72,7 @@ func (h *WebDavHandler) Handler(s *svc) http.Handler {
var status int // status 0 means the handler already sent the response
switch r.Method {
case MethodPropfind:
p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, config)
p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config)
p.HandlePathPropfind(w, r, ns)
case MethodLock:
status, err = s.handleLock(w, r, ns)

View File

@@ -40,14 +40,13 @@ func (_m *Stream) EXPECT() *Stream_Expecter {
// Consume provides a mock function with given fields: _a0, _a1
func (_m *Stream) Consume(_a0 string, _a1 ...events.ConsumeOption) (<-chan events.Event, error) {
_va := make([]interface{}, len(_a1))
for _i := range _a1 {
_va[_i] = _a1[_i]
var tmpRet mock.Arguments
if len(_a1) > 0 {
tmpRet = _m.Called(_a0, _a1)
} else {
tmpRet = _m.Called(_a0)
}
var _ca []interface{}
_ca = append(_ca, _a0)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for Consume")
@@ -113,14 +112,13 @@ func (_c *Stream_Consume_Call) RunAndReturn(run func(string, ...events.ConsumeOp
// Publish provides a mock function with given fields: _a0, _a1, _a2
func (_m *Stream) Publish(_a0 string, _a1 interface{}, _a2 ...events.PublishOption) error {
_va := make([]interface{}, len(_a2))
for _i := range _a2 {
_va[_i] = _a2[_i]
var tmpRet mock.Arguments
if len(_a2) > 0 {
tmpRet = _m.Called(_a0, _a1, _a2)
} else {
tmpRet = _m.Called(_a0, _a1)
}
var _ca []interface{}
_ca = append(_ca, _a0, _a1)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for Publish")

View File

@@ -0,0 +1,27 @@
package raw
import (
"bytes"
"crypto/x509"
"errors"
"io"
)
// newCertPoolFromPEM reads certificates from io.Reader and returns a x509.CertPool
// containing those certificates.
func newCertPoolFromPEM(crts ...io.Reader) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
var buf bytes.Buffer
for _, c := range crts {
if _, err := io.Copy(&buf, c); err != nil {
return nil, err
}
if !certPool.AppendCertsFromPEM(buf.Bytes()) {
return nil, errors.New("failed to append cert from PEM")
}
buf.Reset()
}
return certPool, nil
}

View File

@@ -0,0 +1,127 @@
// Copyright 2018-2022 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
// Code generated by mockery v2.53.2. DO NOT EDIT.
package mocks
import (
events "github.com/opencloud-eu/reva/v2/pkg/events"
mock "github.com/stretchr/testify/mock"
raw "github.com/opencloud-eu/reva/v2/pkg/events/raw"
)
// Stream is an autogenerated mock type for the Stream type
type Stream struct {
mock.Mock
}
type Stream_Expecter struct {
mock *mock.Mock
}
func (_m *Stream) EXPECT() *Stream_Expecter {
return &Stream_Expecter{mock: &_m.Mock}
}
// Consume provides a mock function with given fields: group, evs
func (_m *Stream) Consume(group string, evs ...events.Unmarshaller) (<-chan raw.Event, error) {
var tmpRet mock.Arguments
if len(evs) > 0 {
tmpRet = _m.Called(group, evs)
} else {
tmpRet = _m.Called(group)
}
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for Consume")
}
var r0 <-chan raw.Event
var r1 error
if rf, ok := ret.Get(0).(func(string, ...events.Unmarshaller) (<-chan raw.Event, error)); ok {
return rf(group, evs...)
}
if rf, ok := ret.Get(0).(func(string, ...events.Unmarshaller) <-chan raw.Event); ok {
r0 = rf(group, evs...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(<-chan raw.Event)
}
}
if rf, ok := ret.Get(1).(func(string, ...events.Unmarshaller) error); ok {
r1 = rf(group, evs...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Stream_Consume_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consume'
type Stream_Consume_Call struct {
*mock.Call
}
// Consume is a helper method to define mock.On call
// - group string
// - evs ...events.Unmarshaller
func (_e *Stream_Expecter) Consume(group interface{}, evs ...interface{}) *Stream_Consume_Call {
return &Stream_Consume_Call{Call: _e.mock.On("Consume",
append([]interface{}{group}, evs...)...)}
}
func (_c *Stream_Consume_Call) Run(run func(group string, evs ...events.Unmarshaller)) *Stream_Consume_Call {
_c.Call.Run(func(args mock.Arguments) {
variadicArgs := make([]events.Unmarshaller, len(args)-1)
for i, a := range args[1:] {
if a != nil {
variadicArgs[i] = a.(events.Unmarshaller)
}
}
run(args[0].(string), variadicArgs...)
})
return _c
}
func (_c *Stream_Consume_Call) Return(_a0 <-chan raw.Event, _a1 error) *Stream_Consume_Call {
_c.Call.Return(_a0, _a1)
return _c
}
func (_c *Stream_Consume_Call) RunAndReturn(run func(string, ...events.Unmarshaller) (<-chan raw.Event, error)) *Stream_Consume_Call {
_c.Call.Return(run)
return _c
}
// NewStream creates a new instance of Stream. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewStream(t interface {
mock.TestingT
Cleanup(func())
}) *Stream {
mock := &Stream{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@@ -0,0 +1,216 @@
package raw
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"reflect"
"time"
"github.com/cenkalti/backoff"
"github.com/nats-io/nats.go"
"github.com/nats-io/nats.go/jetstream"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/pkg/errors"
)
// Config is the configuration needed for a NATS event stream
type Config struct {
Endpoint string `mapstructure:"address"` // Endpoint of the nats server
Cluster string `mapstructure:"clusterID"` // CluserID of the nats cluster
TLSInsecure bool `mapstructure:"tls-insecure"` // Whether to verify TLS certificates
TLSRootCACertificate string `mapstructure:"tls-root-ca-cert"` // The root CA certificate used to validate the TLS certificate
EnableTLS bool `mapstructure:"enable-tls"` // Enable TLS
AuthUsername string `mapstructure:"username"` // Username for authentication
AuthPassword string `mapstructure:"password"` // Password for authentication
MaxAckPending int `mapstructure:"max-ack-pending"` // Maximum number of unacknowledged messages
AckWait time.Duration `mapstructure:"ack-wait"` // Time to wait for an ack
}
type RawEvent struct {
Timestamp time.Time
Metadata map[string]string
ID string
Topic string
Payload []byte
msg jetstream.Msg
}
type Event struct {
events.Event
msg jetstream.Msg
}
func (re *Event) Ack() error {
if re.msg == nil {
return errors.New("cannot ack event without message")
}
return re.msg.Ack()
}
func (re *Event) InProgress() error {
if re.msg == nil {
return errors.New("cannot mark event as in progress without message")
}
return re.msg.InProgress()
}
type Stream interface {
Consume(group string, evs ...events.Unmarshaller) (<-chan Event, error)
}
type RawStream struct {
Js jetstream.Stream
c Config
}
func FromConfig(ctx context.Context, name string, cfg Config) (Stream, error) {
var s Stream
b := backoff.NewExponentialBackOff()
connect := func() error {
var tlsConf *tls.Config
if cfg.EnableTLS {
var rootCAPool *x509.CertPool
if cfg.TLSRootCACertificate != "" {
rootCrtFile, err := os.Open(cfg.TLSRootCACertificate)
if err != nil {
return err
}
rootCAPool, err = newCertPoolFromPEM(rootCrtFile)
if err != nil {
return err
}
cfg.TLSInsecure = false
}
tlsConf = &tls.Config{
MinVersion: tls.VersionTLS12,
InsecureSkipVerify: cfg.TLSInsecure,
RootCAs: rootCAPool,
}
}
nopts := nats.GetDefaultOptions()
nopts.Name = name
if tlsConf != nil {
nopts.Secure = true
nopts.TLSConfig = tlsConf
}
if len(cfg.Endpoint) > 0 {
nopts.Servers = []string{cfg.Endpoint}
}
if cfg.AuthUsername != "" && cfg.AuthPassword != "" {
nopts.User = cfg.AuthUsername
nopts.Password = cfg.AuthPassword
}
conn, err := nopts.Connect()
if err != nil {
return err
}
jsConn, err := jetstream.New(conn)
if err != nil {
return err
}
js, err := jsConn.Stream(ctx, events.MainQueueName)
if err != nil {
return err
}
s = &RawStream{
Js: js,
c: cfg,
}
return nil
}
err := backoff.Retry(connect, b)
if err != nil {
return s, errors.Wrap(err, "could not connect to nats jetstream")
}
return s, nil
}
func (s *RawStream) Consume(group string, evs ...events.Unmarshaller) (<-chan Event, error) {
c, err := s.consumeRaw(group)
if err != nil {
return nil, err
}
registeredEvents := map[string]events.Unmarshaller{}
for _, e := range evs {
typ := reflect.TypeOf(e)
registeredEvents[typ.String()] = e
}
outchan := make(chan Event)
go func() {
for {
e := <-c
eventType := e.Metadata[events.MetadatakeyEventType]
ev, ok := registeredEvents[eventType]
if !ok {
_ = e.msg.Ack() // Discard. We are not interested in this event type
continue
}
event, err := ev.Unmarshal(e.Payload)
if err != nil {
continue
}
outchan <- Event{
Event: events.Event{
Type: eventType,
ID: e.Metadata[events.MetadatakeyEventID],
TraceParent: e.Metadata[events.MetadatakeyTraceParent],
InitiatorID: e.Metadata[events.MetadatakeyInitiatorID],
Event: event,
},
msg: e.msg,
}
}
}()
return outchan, nil
}
func (s *RawStream) consumeRaw(group string) (<-chan RawEvent, error) {
consumer, err := s.Js.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{
Durable: group,
DeliverPolicy: jetstream.DeliverNewPolicy,
AckPolicy: jetstream.AckExplicitPolicy, // Require manual acknowledgment
MaxAckPending: s.c.MaxAckPending, // Maximum number of unacknowledged messages
AckWait: s.c.AckWait, // Time to wait for an ack
})
if err != nil {
return nil, err
}
channel := make(chan RawEvent)
callback := func(msg jetstream.Msg) {
var rawEvent RawEvent
if err := json.Unmarshal(msg.Data(), &rawEvent); err != nil {
fmt.Printf("error unmarshalling event: %v\n", err)
return
}
rawEvent.msg = msg
channel <- rawEvent
}
_, err = consumer.Consume(callback)
if err != nil {
return nil, err
}
return channel, nil
}

View File

@@ -401,3 +401,10 @@ func RegisterInterval(interval time.Duration) Option {
o.RegisterInterval = interval
}
}
// URLSigningSharedSecret provides a function to set the URLSigningSharedSecret config option.
func URLSigningSharedSecret(secret string) Option {
return func(o *Options) {
o.config.URLSigningSharedSecret = secret
}
}

View File

@@ -0,0 +1,116 @@
package signedurl
import (
"errors"
"fmt"
"net/url"
"time"
"github.com/golang-jwt/jwt/v5"
)
// JWTSignedURL implements the Signer and Verifier interfaces using JWT for signing URLs.
type JWTSignedURL struct {
JWTOptions
}
type claims struct {
TargetURL string `json:"target_url"`
jwt.RegisteredClaims
}
// JWTOption defines a single option function.
type JWTOption func(o *JWTOptions)
// JWTOptions defines the available options for this package.
type JWTOptions struct {
secret string // Secret key used for signing and verifying JWTs
queryParam string // Name of the query parameter for the signature
}
func NewJWTSignedURL(opts ...JWTOption) (*JWTSignedURL, error) {
opt := JWTOptions{}
for _, o := range opts {
o(&opt)
}
if opt.secret == "" {
return nil, ErrInvalidKey
}
if opt.queryParam == "" {
opt.queryParam = "oc-jwt-sig"
}
return &JWTSignedURL{opt}, nil
}
func WithSecret(secret string) JWTOption {
return func(o *JWTOptions) {
o.secret = secret
}
}
func WithQueryParam(queryParam string) JWTOption {
return func(o *JWTOptions) {
o.queryParam = queryParam
}
}
// Sign signs a URL using JWT with a specified time-to-live (ttl).
func (j *JWTSignedURL) Sign(unsignedURL, subject string, ttl time.Duration) (string, error) {
// Re-encode the Query parameters to ensure they are "normalized" (Values.Encode() does return them alphabetically ordered).
u, err := url.Parse(unsignedURL)
if err != nil {
return "", NewSignedURLError(err, "failed to parse url")
}
query := u.Query()
u.RawQuery = query.Encode()
c := claims{
TargetURL: u.String(),
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(ttl)),
Issuer: "reva",
IssuedAt: jwt.NewNumericDate(time.Now()),
Subject: subject,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, c)
signedToken, err := token.SignedString([]byte(j.secret))
if err != nil {
return "", fmt.Errorf("signing failed: %w", err)
}
query.Set(j.queryParam, signedToken)
u.RawQuery = query.Encode()
return u.String(), nil
}
// Verify verifies a signed URL using a JWT. Returns the subject of the JWT if verification is successful.
func (j *JWTSignedURL) Verify(signedURL string) (string, error) {
u, err := url.Parse(signedURL)
if err != nil {
return "", NewSignatureVerificationError(fmt.Errorf("could not parse URL: %w", err))
}
query := u.Query()
tokenString := query.Get(j.queryParam)
if tokenString == "" {
return "", NewSignatureVerificationError(errors.New("no signature in url"))
}
token, err := jwt.ParseWithClaims(tokenString, &claims{}, func(token *jwt.Token) (any, error) { return []byte(j.secret), nil })
if err != nil {
return "", NewSignatureVerificationError(err)
}
c, ok := token.Claims.(*claims)
if !ok {
return "", NewSignatureVerificationError(errors.New("invalid JWT claims"))
}
query.Del(j.queryParam)
u.RawQuery = query.Encode()
if c.TargetURL != u.String() {
return "", NewSignatureVerificationError(errors.New("url mismatch"))
}
return c.Subject, nil
}

View File

@@ -0,0 +1,64 @@
// Package signedurl provides interfaces and implementations for signing and verifying URLs.
package signedurl
import (
"time"
)
type Signer interface {
// Sign signs a URL
Sign(url, principal string, ttl time.Duration) (string, error)
}
type Verifier interface {
// Verify verifies a signed URL
Verify(signedURL string) (string, error)
}
type SignedURLError struct {
innerErr error
message string
}
// NewSignedURLError creates a new SignedURLError with the provided inner error and message.
func NewSignedURLError(innerErr error, message string) SignedURLError {
return SignedURLError{
innerErr: innerErr,
message: message,
}
}
var ErrInvalidKey = NewSignedURLError(nil, "invalid key provided")
type SignatureVerificationError struct {
SignedURLError
}
func NewSignatureVerificationError(innerErr error) SignatureVerificationError {
return SignatureVerificationError{
SignedURLError: SignedURLError{
innerErr: innerErr,
message: "signature verification failed",
},
}
}
func (e SignatureVerificationError) Is(tgt error) bool {
// Check if the target error is of type SignatureVerificationError
if _, ok := tgt.(SignatureVerificationError); ok {
return true
}
return false
}
// Error implements the error interface for errorConst.
func (e SignedURLError) Error() string {
if e.innerErr != nil {
return e.message + ": " + e.innerErr.Error()
}
return e.message
}
func (e SignedURLError) Unwrap() error {
return e.innerErr
}

View File

@@ -98,11 +98,13 @@ func (w *CephFSWatcher) Watch(topic string) {
switch {
case mask&CEPH_MDS_NOTIFY_DELETE > 0:
err = w.tree.Scan(path, ActionDelete, isDir)
case mask&CEPH_MDS_NOTIFY_CREATE > 0 || mask&CEPH_MDS_NOTIFY_MOVED_TO > 0:
case mask&CEPH_MDS_NOTIFY_MOVED_TO > 0:
if ev.SrcMask > 0 {
// This is a move, clean up the old path
err = w.tree.Scan(filepath.Join(w.tree.options.WatchRoot, ev.SrcPath), ActionMoveFrom, isDir)
}
err = w.tree.Scan(path, ActionMove, isDir)
case mask&CEPH_MDS_NOTIFY_CREATE > 0:
err = w.tree.Scan(path, ActionCreate, isDir)
case mask&CEPH_MDS_NOTIFY_CLOSE_WRITE > 0:
err = w.tree.Scan(path, ActionUpdate, isDir)

View File

@@ -92,7 +92,9 @@ func (iw *InotifyWatcher) Watch(path string) {
err = iw.tree.Scan(event.Filename, ActionDelete, event.IsDir)
case inotifywaitgo.MOVED_FROM:
err = iw.tree.Scan(event.Filename, ActionMoveFrom, event.IsDir)
case inotifywaitgo.CREATE, inotifywaitgo.MOVED_TO:
case inotifywaitgo.MOVED_TO:
err = iw.tree.Scan(event.Filename, ActionMove, event.IsDir)
case inotifywaitgo.CREATE:
err = iw.tree.Scan(event.Filename, ActionCreate, event.IsDir)
case inotifywaitgo.CLOSE_WRITE:
err = iw.tree.Scan(event.Filename, ActionUpdate, event.IsDir)

View File

@@ -1117,22 +1117,22 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro
continue
}
if isGrantExpired(g) {
continue
}
switch {
case err == nil:
if isGrantExpired(g) {
continue
}
// If all permissions are set to false we have a deny grant
if grants.PermissionsEqual(g.Permissions, &provider.ResourcePermissions{}) {
return NoPermissions(), true, nil
}
AddPermissions(ap, g.GetPermissions())
case metadata.IsAttrUnset(err):
appctx.GetLogger(ctx).Error().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
// continue with next segment
default:
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Msg("error reading permissions")
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Msg("error reading permissions")
// continue with next segment
}
}

View File

@@ -45,14 +45,13 @@ func (_m *CollaborationAPIClient) EXPECT() *CollaborationAPIClient_Expecter {
// CreateShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) CreateShare(ctx context.Context, in *collaborationv1beta1.CreateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.CreateShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for CreateShare")
@@ -119,14 +118,13 @@ func (_c *CollaborationAPIClient_CreateShare_Call) RunAndReturn(run func(context
// GetReceivedShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) GetReceivedShare(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for GetReceivedShare")
@@ -193,14 +191,13 @@ func (_c *CollaborationAPIClient_GetReceivedShare_Call) RunAndReturn(run func(co
// GetShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) GetShare(ctx context.Context, in *collaborationv1beta1.GetShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for GetShare")
@@ -267,14 +264,13 @@ func (_c *CollaborationAPIClient_GetShare_Call) RunAndReturn(run func(context.Co
// ListReceivedShares provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) ListReceivedShares(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for ListReceivedShares")
@@ -341,14 +337,13 @@ func (_c *CollaborationAPIClient_ListReceivedShares_Call) RunAndReturn(run func(
// ListShares provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) ListShares(ctx context.Context, in *collaborationv1beta1.ListSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListSharesResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for ListShares")
@@ -415,14 +410,13 @@ func (_c *CollaborationAPIClient_ListShares_Call) RunAndReturn(run func(context.
// RemoveShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) RemoveShare(ctx context.Context, in *collaborationv1beta1.RemoveShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.RemoveShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for RemoveShare")
@@ -489,14 +483,13 @@ func (_c *CollaborationAPIClient_RemoveShare_Call) RunAndReturn(run func(context
// UpdateReceivedShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) UpdateReceivedShare(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for UpdateReceivedShare")
@@ -563,14 +556,13 @@ func (_c *CollaborationAPIClient_UpdateReceivedShare_Call) RunAndReturn(run func
// UpdateShare provides a mock function with given fields: ctx, in, opts
func (_m *CollaborationAPIClient) UpdateShare(ctx context.Context, in *collaborationv1beta1.UpdateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateShareResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
var tmpRet mock.Arguments
if len(opts) > 0 {
tmpRet = _m.Called(ctx, in, opts)
} else {
tmpRet = _m.Called(ctx, in)
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
ret := tmpRet
if len(ret) == 0 {
panic("no return value specified for UpdateShare")

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.6.1"
Version = "3.6.2"
APIVersion = "unknown"
// Git SHA Value will be set during build

View File

@@ -12,6 +12,8 @@ import (
// XOF defines the interface to hash functions that
// support arbitrary-length output.
//
// New callers should prefer the standard library [hash.XOF].
type XOF interface {
// Write absorbs more data into the hash's state. It panics if called
// after Read.
@@ -47,6 +49,8 @@ const maxOutputLength = (1 << 32) * 64
//
// A non-nil key turns the hash into a MAC. The key must between
// zero and 32 bytes long.
//
// The result can be safely interface-upgraded to [hash.XOF].
func NewXOF(size uint32, key []byte) (XOF, error) {
if len(key) > Size {
return nil, errKeySize
@@ -93,6 +97,10 @@ func (x *xof) Clone() XOF {
return &clone
}
func (x *xof) BlockSize() int {
return x.d.BlockSize()
}
func (x *xof) Reset() {
x.cfg[0] = byte(Size)
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length

11
vendor/golang.org/x/crypto/blake2b/go125.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.25
package blake2b
import "hash"
var _ hash.XOF = (*xof)(nil)

View File

@@ -233,7 +233,11 @@ func parseCert(in []byte, privAlgo string) (*Certificate, error) {
if err != nil {
return nil, err
}
// The Type() function is intended to return only certificate key types, but
// we use certKeyAlgoNames anyway for safety, to match [Certificate.Type].
if _, ok := certKeyAlgoNames[k.Type()]; ok {
return nil, fmt.Errorf("ssh: the signature key type %q is invalid for certificates", k.Type())
}
c.SignatureKey = k
c.Signature, rest, ok = parseSignatureBody(g.Signature)
if !ok || len(rest) > 0 {
@@ -301,16 +305,13 @@ type CertChecker struct {
SupportedCriticalOptions []string
// IsUserAuthority should return true if the key is recognized as an
// authority for the given user certificate. This allows for
// certificates to be signed by other certificates. This must be set
// if this CertChecker will be checking user certificates.
// authority for user certificate. This must be set if this CertChecker
// will be checking user certificates.
IsUserAuthority func(auth PublicKey) bool
// IsHostAuthority should report whether the key is recognized as
// an authority for this host. This allows for certificates to be
// signed by other keys, and for those other keys to only be valid
// signers for particular hostnames. This must be set if this
// CertChecker will be checking host certificates.
// an authority for this host. This must be set if this CertChecker
// will be checking host certificates.
IsHostAuthority func(auth PublicKey, address string) bool
// Clock is used for verifying time stamps. If nil, time.Now
@@ -447,12 +448,19 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
// SignCert signs the certificate with an authority, setting the Nonce,
// SignatureKey, and Signature fields. If the authority implements the
// MultiAlgorithmSigner interface the first algorithm in the list is used. This
// is useful if you want to sign with a specific algorithm.
// is useful if you want to sign with a specific algorithm. As specified in
// [SSH-CERTS], Section 2.1.1, authority can't be a [Certificate].
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
return err
}
// The Type() function is intended to return only certificate key types, but
// we use certKeyAlgoNames anyway for safety, to match [Certificate.Type].
if _, ok := certKeyAlgoNames[authority.PublicKey().Type()]; ok {
return fmt.Errorf("ssh: certificates cannot be used as authority (public key type %q)",
authority.PublicKey().Type())
}
c.SignatureKey = authority.PublicKey()
if v, ok := authority.(MultiAlgorithmSigner); ok {

View File

@@ -289,7 +289,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
}
}
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos, true)
if err != nil {
// If there is no overlap, return the fallback algorithm to support
// servers that fail to list all supported algorithms.

View File

@@ -336,7 +336,7 @@ func parseError(tag uint8) error {
return fmt.Errorf("ssh: parse error in message type %d", tag)
}
func findCommon(what string, client []string, server []string) (common string, err error) {
func findCommon(what string, client []string, server []string, isClient bool) (string, error) {
for _, c := range client {
for _, s := range server {
if c == s {
@@ -344,7 +344,32 @@ func findCommon(what string, client []string, server []string) (common string, e
}
}
}
return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
err := &AlgorithmNegotiationError{
What: what,
}
if isClient {
err.SupportedAlgorithms = client
err.RequestedAlgorithms = server
} else {
err.SupportedAlgorithms = server
err.RequestedAlgorithms = client
}
return "", err
}
// AlgorithmNegotiationError defines the error returned if the client and the
// server cannot agree on an algorithm for key exchange, host key, cipher, MAC.
type AlgorithmNegotiationError struct {
What string
// RequestedAlgorithms lists the algorithms supported by the peer.
RequestedAlgorithms []string
// SupportedAlgorithms lists the algorithms supported on our side.
SupportedAlgorithms []string
}
func (a *AlgorithmNegotiationError) Error() string {
return fmt.Sprintf("ssh: no common algorithm for %s; we offered: %v, peer offered: %v",
a.What, a.SupportedAlgorithms, a.RequestedAlgorithms)
}
// DirectionAlgorithms defines the algorithms negotiated in one direction
@@ -379,12 +404,12 @@ var aeadCiphers = map[string]bool{
func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *NegotiatedAlgorithms, err error) {
result := &NegotiatedAlgorithms{}
result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos, isClient)
if err != nil {
return
}
result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos, isClient)
if err != nil {
return
}
@@ -394,36 +419,36 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs
ctos, stoc = stoc, ctos
}
ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer, isClient)
if err != nil {
return
}
stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient, isClient)
if err != nil {
return
}
if !aeadCiphers[ctos.Cipher] {
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer, isClient)
if err != nil {
return
}
}
if !aeadCiphers[stoc.Cipher] {
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient, isClient)
if err != nil {
return
}
}
ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer, isClient)
if err != nil {
return
}
stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient, isClient)
if err != nil {
return
}

View File

@@ -16,6 +16,7 @@ References:
[PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.

View File

@@ -273,7 +273,7 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
return nil, "", nil, nil, errors.New("ssh: no key found")
}
// ParsePublicKey parses an SSH public key formatted for use in
// ParsePublicKey parses an SSH public key or certificate formatted for use in
// the SSH wire protocol according to RFC 4253, section 6.6.
func ParsePublicKey(in []byte) (out PublicKey, err error) {
algo, in, ok := parseString(in)

15
vendor/modules.txt vendored
View File

@@ -1018,6 +1018,8 @@ github.com/nats-io/nats-server/v2/server/tpm
github.com/nats-io/nats.go
github.com/nats-io/nats.go/encoders/builtin
github.com/nats-io/nats.go/internal/parser
github.com/nats-io/nats.go/internal/syncx
github.com/nats-io/nats.go/jetstream
github.com/nats-io/nats.go/util
# github.com/nats-io/nkeys v0.4.11
## explicit; go 1.23.0
@@ -1211,7 +1213,7 @@ github.com/open-policy-agent/opa/v1/version
# github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce
## explicit; go 1.18
github.com/opencloud-eu/libre-graph-api-go
# github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717
# github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23
## explicit; go 1.24.1
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
@@ -1363,6 +1365,8 @@ github.com/opencloud-eu/reva/v2/pkg/eosclient/eosgrpc/eos_grpc
github.com/opencloud-eu/reva/v2/pkg/errtypes
github.com/opencloud-eu/reva/v2/pkg/events
github.com/opencloud-eu/reva/v2/pkg/events/mocks
github.com/opencloud-eu/reva/v2/pkg/events/raw
github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks
github.com/opencloud-eu/reva/v2/pkg/events/stream
github.com/opencloud-eu/reva/v2/pkg/group
github.com/opencloud-eu/reva/v2/pkg/group/manager/json
@@ -1470,6 +1474,7 @@ github.com/opencloud-eu/reva/v2/pkg/share/manager/memory
github.com/opencloud-eu/reva/v2/pkg/share/manager/owncloudsql
github.com/opencloud-eu/reva/v2/pkg/share/manager/registry
github.com/opencloud-eu/reva/v2/pkg/sharedconf
github.com/opencloud-eu/reva/v2/pkg/signedurl
github.com/opencloud-eu/reva/v2/pkg/siteacc
github.com/opencloud-eu/reva/v2/pkg/siteacc/account
github.com/opencloud-eu/reva/v2/pkg/siteacc/account/contact
@@ -2010,7 +2015,7 @@ go.etcd.io/bbolt
go.etcd.io/bbolt/errors
go.etcd.io/bbolt/internal/common
go.etcd.io/bbolt/internal/freelist
# go.etcd.io/etcd/api/v3 v3.6.1
# go.etcd.io/etcd/api/v3 v3.6.2
## explicit; go 1.23.0
go.etcd.io/etcd/api/v3/authpb
go.etcd.io/etcd/api/v3/etcdserverpb
@@ -2019,7 +2024,7 @@ go.etcd.io/etcd/api/v3/mvccpb
go.etcd.io/etcd/api/v3/v3rpc/rpctypes
go.etcd.io/etcd/api/v3/version
go.etcd.io/etcd/api/v3/versionpb
# go.etcd.io/etcd/client/pkg/v3 v3.6.1
# go.etcd.io/etcd/client/pkg/v3 v3.6.2
## explicit; go 1.23.0
go.etcd.io/etcd/client/pkg/v3/fileutil
go.etcd.io/etcd/client/pkg/v3/logutil
@@ -2028,7 +2033,7 @@ go.etcd.io/etcd/client/pkg/v3/tlsutil
go.etcd.io/etcd/client/pkg/v3/transport
go.etcd.io/etcd/client/pkg/v3/types
go.etcd.io/etcd/client/pkg/v3/verify
# go.etcd.io/etcd/client/v3 v3.6.1
# go.etcd.io/etcd/client/v3 v3.6.2
## explicit; go 1.23.0
go.etcd.io/etcd/client/v3
go.etcd.io/etcd/client/v3/credentials
@@ -2157,7 +2162,7 @@ go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
go.uber.org/zap/zapgrpc
# golang.org/x/crypto v0.39.0
# golang.org/x/crypto v0.40.0
## explicit; go 1.23.0
golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt