From 63ab8f789ed1df9b56b091cf3bb044bdd5ea6711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Duffeck?= Date: Tue, 15 Jul 2025 09:59:45 +0200 Subject: [PATCH] Bump reva --- go.mod | 10 +- go.sum | 20 +- .../nats-io/nats.go/internal/syncx/map.go | 73 + .../nats-io/nats.go/jetstream/README.md | 1040 +++++++++++ .../nats-io/nats.go/jetstream/api.go | 158 ++ .../nats-io/nats.go/jetstream/consumer.go | 410 +++++ .../nats.go/jetstream/consumer_config.go | 544 ++++++ .../nats-io/nats.go/jetstream/errors.go | 444 +++++ .../nats-io/nats.go/jetstream/jetstream.go | 1156 ++++++++++++ .../nats.go/jetstream/jetstream_options.go | 630 +++++++ .../nats-io/nats.go/jetstream/kv.go | 1523 +++++++++++++++ .../nats-io/nats.go/jetstream/kv_options.go | 131 ++ .../nats-io/nats.go/jetstream/message.go | 471 +++++ .../nats-io/nats.go/jetstream/object.go | 1625 +++++++++++++++++ .../nats.go/jetstream/object_options.go | 41 + .../nats-io/nats.go/jetstream/ordered.go | 810 ++++++++ .../nats-io/nats.go/jetstream/publish.go | 661 +++++++ .../nats-io/nats.go/jetstream/pull.go | 1127 ++++++++++++ .../nats-io/nats.go/jetstream/stream.go | 766 ++++++++ .../nats.go/jetstream/stream_config.go | 611 +++++++ .../services/owncloud/ocdav/config/config.go | 5 +- .../http/services/owncloud/ocdav/ocdav.go | 13 + .../owncloud/ocdav/propfind/propfind.go | 70 +- .../services/owncloud/ocdav/publicfile.go | 2 +- .../http/services/owncloud/ocdav/report.go | 2 +- .../http/services/owncloud/ocdav/spaces.go | 2 +- .../http/services/owncloud/ocdav/versions.go | 2 +- .../http/services/owncloud/ocdav/webdav.go | 2 +- .../reva/v2/pkg/events/mocks/Stream.go | 26 +- .../reva/v2/pkg/events/raw/certs.go | 27 + .../reva/v2/pkg/events/raw/mocks/Stream.go | 127 ++ .../reva/v2/pkg/events/raw/raw.go | 216 +++ .../reva/v2/pkg/micro/ocdav/option.go | 7 + .../opencloud-eu/reva/v2/pkg/signedurl/jwt.go | 116 ++ .../reva/v2/pkg/signedurl/signedurl.go | 64 + .../storage/fs/posix/tree/cephfswatcher.go | 4 +- .../storage/fs/posix/tree/inotifywatcher.go | 4 +- .../pkg/storage/pkg/decomposedfs/node/node.go | 12 +- .../cs3mocks/mocks/CollaborationAPIClient.go | 104 +- .../tests/cs3mocks/mocks/GatewayAPIClient.go | 1300 ++++++------- .../go.etcd.io/etcd/api/v3/version/version.go | 2 +- vendor/golang.org/x/crypto/blake2b/blake2x.go | 8 + vendor/golang.org/x/crypto/blake2b/go125.go | 11 + vendor/golang.org/x/crypto/ssh/certs.go | 26 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 2 +- vendor/golang.org/x/crypto/ssh/common.go | 45 +- vendor/golang.org/x/crypto/ssh/doc.go | 1 + vendor/golang.org/x/crypto/ssh/keys.go | 2 +- vendor/modules.txt | 15 +- 49 files changed, 13621 insertions(+), 847 deletions(-) create mode 100644 vendor/github.com/nats-io/nats.go/internal/syncx/map.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/README.md create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/api.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/consumer.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/errors.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/jetstream.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/kv.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/kv_options.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/message.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/object.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/object_options.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/ordered.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/publish.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/pull.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/stream.go create mode 100644 vendor/github.com/nats-io/nats.go/jetstream/stream_config.go create mode 100644 vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/certs.go create mode 100644 vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks/Stream.go create mode 100644 vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/raw.go create mode 100644 vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/jwt.go create mode 100644 vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/signedurl.go create mode 100644 vendor/golang.org/x/crypto/blake2b/go125.go diff --git a/go.mod b/go.mod index e6809d334..2bad80f19 100644 --- a/go.mod +++ b/go.mod @@ -64,7 +64,7 @@ require ( github.com/onsi/gomega v1.37.0 github.com/open-policy-agent/opa v1.6.0 github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce - github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717 + github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23 github.com/orcaman/concurrent-map v1.0.0 github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.12 @@ -97,7 +97,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 go.opentelemetry.io/otel/sdk v1.37.0 go.opentelemetry.io/otel/trace v1.37.0 - golang.org/x/crypto v0.39.0 + golang.org/x/crypto v0.40.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/image v0.28.0 golang.org/x/net v0.41.0 @@ -317,9 +317,9 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.6.1 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.6.1 // indirect - go.etcd.io/etcd/client/v3 v3.6.1 // indirect + go.etcd.io/etcd/api/v3 v3.6.2 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.2 // indirect + go.etcd.io/etcd/client/v3 v3.6.2 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect diff --git a/go.sum b/go.sum index f838aa4b1..b112e3f14 100644 --- a/go.sum +++ b/go.sum @@ -868,8 +868,8 @@ github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-202505121527 github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY= github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce h1:tjbIYsW5CFsEbCf5B/KN0Mo1oKU/K+oipgFm2B6wzG4= github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q= -github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717 h1:khqL0AenfN0vt6oXgvbqH4UIuJk+2+oxWSJKcal4GYQ= -github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717/go.mod h1:hSIUWU8JUaX+y0cVjbh6VaW6Mh0uJ/azFPx5cSVhQfc= +github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23 h1:FY6l12zi57efPXe9kVU1U6FB6HMuAV/t0XJPEU2XVDw= +github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23/go.mod h1:5Zur6s3GoCbhdU09voU8EO+Ls71NiHgWYmhcvmngjwY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1157,12 +1157,12 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= -go.etcd.io/etcd/api/v3 v3.6.1 h1:yJ9WlDih9HT457QPuHt/TH/XtsdN2tubyxyQHSHPsEo= -go.etcd.io/etcd/api/v3 v3.6.1/go.mod h1:lnfuqoGsXMlZdTJlact3IB56o3bWp1DIlXPIGKRArto= -go.etcd.io/etcd/client/pkg/v3 v3.6.1 h1:CxDVv8ggphmamrXM4Of8aCC8QHzDM4tGcVr9p2BSoGk= -go.etcd.io/etcd/client/pkg/v3 v3.6.1/go.mod h1:aTkCp+6ixcVTZmrJGa7/Mc5nMNs59PEgBbq+HCmWyMc= -go.etcd.io/etcd/client/v3 v3.6.1 h1:KelkcizJGsskUXlsxjVrSmINvMMga0VWwFF0tSPGEP0= -go.etcd.io/etcd/client/v3 v3.6.1/go.mod h1:fCbPUdjWNLfx1A6ATo9syUmFVxqHH9bCnPLBZmnLmMY= +go.etcd.io/etcd/api/v3 v3.6.2 h1:25aCkIMjUmiiOtnBIp6PhNj4KdcURuBak0hU2P1fgRc= +go.etcd.io/etcd/api/v3 v3.6.2/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk= +go.etcd.io/etcd/client/pkg/v3 v3.6.2 h1:zw+HRghi/G8fKpgKdOcEKpnBTE4OO39T6MegA0RopVU= +go.etcd.io/etcd/client/pkg/v3 v3.6.2/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI= +go.etcd.io/etcd/client/v3 v3.6.2 h1:RgmcLJxkpHqpFvgKNwAQHX3K+wsSARMXKgjmUSpoSKQ= +go.etcd.io/etcd/client/v3 v3.6.2/go.mod h1:PL7e5QMKzjybn0FosgiWvCUDzvdChpo5UgGR4Sk4Gzc= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1239,8 +1239,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/vendor/github.com/nats-io/nats.go/internal/syncx/map.go b/vendor/github.com/nats-io/nats.go/internal/syncx/map.go new file mode 100644 index 000000000..d2278e62a --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/internal/syncx/map.go @@ -0,0 +1,73 @@ +// Copyright 2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncx + +import "sync" + +// Map is a type-safe wrapper around sync.Map. +// It is safe for concurrent use. +// The zero value of Map is an empty map ready to use. +type Map[K comparable, V any] struct { + m sync.Map +} + +func (m *Map[K, V]) Load(key K) (V, bool) { + v, ok := m.m.Load(key) + if !ok { + var empty V + return empty, false + } + return v.(V), true +} + +func (m *Map[K, V]) Store(key K, value V) { + m.m.Store(key, value) +} + +func (m *Map[K, V]) Delete(key K) { + m.m.Delete(key) +} + +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + m.m.Range(func(key, value any) bool { + return f(key.(K), value.(V)) + }) +} + +func (m *Map[K, V]) LoadOrStore(key K, value V) (V, bool) { + v, loaded := m.m.LoadOrStore(key, value) + return v.(V), loaded +} + +func (m *Map[K, V]) LoadAndDelete(key K) (V, bool) { + v, ok := m.m.LoadAndDelete(key) + if !ok { + var empty V + return empty, false + } + return v.(V), true +} + +func (m *Map[K, V]) CompareAndSwap(key K, old, new V) bool { + return m.m.CompareAndSwap(key, old, new) +} + +func (m *Map[K, V]) CompareAndDelete(key K, value V) bool { + return m.m.CompareAndDelete(key, value) +} + +func (m *Map[K, V]) Swap(key K, value V) (V, bool) { + previous, loaded := m.m.Swap(key, value) + return previous.(V), loaded +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/README.md b/vendor/github.com/nats-io/nats.go/jetstream/README.md new file mode 100644 index 000000000..9aca8cd09 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/README.md @@ -0,0 +1,1040 @@ + +# JetStream Simplified Client [![JetStream API Reference](https://pkg.go.dev/badge/github.com/nats-io/nats.go/jetstream.svg)](https://pkg.go.dev/github.com/nats-io/nats.go/jetstream) + +This doc covers the basic usage of the `jetstream` package in `nats.go` client. + +- [Overview](#overview) +- [Basic usage](#basic-usage) +- [Streams](#streams) +- [Stream management (CRUD)](#stream-management-crud) +- [Listing streams and stream names](#listing-streams-and-stream-names) +- [Stream-specific operations](#stream-specific-operations) +- [Consumers](#consumers) +- [Consumers management](#consumers-management) +- [Listing consumers and consumer + names](#listing-consumers-and-consumer-names) +- [Ordered consumers](#ordered-consumers) +- [Receiving messages from the + consumer](#receiving-messages-from-the-consumer) + - [Single fetch](#single-fetch) + - [Continuous polling](#continuous-polling) + - [Using `Consume()` receive messages in a + callback](#using-consume-receive-messages-in-a-callback) + - [Using `Messages()` to iterate over incoming + messages](#using-messages-to-iterate-over-incoming-messages) +- [Publishing on stream](#publishing-on-stream) +- [Synchronous publish](#synchronous-publish) +- [Async publish](#async-publish) +- [KeyValue Store](#keyvalue-store) +- [Basic usage of KV bucket](#basic-usage-of-kv-bucket) +- [Watching for changes on a bucket](#watching-for-changes-on-a-bucket) +- [Additional operations on a bucket](#additional-operations-on-a-bucket) +- [Object Store](#object-store) +- [Basic usage of Object Store](#basic-usage-of-object-store) +- [Watching for changes on a store](#watching-for-changes-on-a-store) +- [Additional operations on a store](#additional-operations-on-a-store) +- [Examples](#examples) + +## Overview + +`jetstream` package is a new client API to interact with NATS JetStream, aiming +to replace the JetStream client implementation from `nats` package. The main +goal of this package is to provide a simple and clear way to interact with +JetStream API. Key differences between `jetstream` and `nats` packages include: + +- Using smaller, simpler interfaces to manage streams and consumers +- Using more granular and predictable approach to consuming messages from a + stream, instead of relying on often complicated and unpredictable + `Subscribe()` method (and all of its flavors) +- Allowing the usage of pull consumers to continuously receive incoming messages + (including ordered consumer functionality) +- Separating JetStream context from core NATS + +`jetstream` package provides several ways of interacting with the API: + +- `JetStream` - top-level interface, used to create and manage streams, + consumers and publishing messages +- `Stream` - used to manage consumers for a specific stream, as well as + performing stream-specific operations (purging, fetching and deleting messages + by sequence number, fetching stream info) +- `Consumer` - used to get information about a consumer as well as consuming + messages +- `Msg` - used for message-specific operations - reading data, headers and + metadata, as well as performing various types of acknowledgements + +Additionally, `jetstream` exposes [KeyValue Store](#keyvalue-store) and +[ObjectStore](#object-store) capabilities. KV and Object stores are abstraction +layers on top of JetStream Streams, simplifying key value and large data +storage on Streams. + +> __NOTE__: `jetstream` requires nats-server >= 2.9.0 to work correctly. + +## Basic usage + +```go +package main + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +func main() { + // In the `jetstream` package, almost all API calls rely on `context.Context` for timeout/cancellation handling + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + nc, _ := nats.Connect(nats.DefaultURL) + + // Create a JetStream management interface + js, _ := jetstream.New(nc) + + // Create a stream + s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"ORDERS.*"}, + }) + + // Publish some messages + for i := 0; i < 100; i++ { + js.Publish(ctx, "ORDERS.new", []byte("hello message "+strconv.Itoa(i))) + fmt.Printf("Published hello message %d\n", i) + } + + // Create durable consumer + c, _ := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ + Durable: "CONS", + AckPolicy: jetstream.AckExplicitPolicy, + }) + + // Get 10 messages from the consumer + messageCounter := 0 + msgs, err := c.Fetch(10) + if err != nil { + // handle error + } + + for msg := range msgs.Messages() { + msg.Ack() + fmt.Printf("Received a JetStream message via fetch: %s\n", string(msg.Data())) + messageCounter++ + } + + fmt.Printf("received %d messages\n", messageCounter) + + if msgs.Error() != nil { + fmt.Println("Error during Fetch(): ", msgs.Error()) + } + + // Receive messages continuously in a callback + cons, _ := c.Consume(func(msg jetstream.Msg) { + msg.Ack() + fmt.Printf("Received a JetStream message via callback: %s\n", string(msg.Data())) + messageCounter++ + }) + defer cons.Stop() + + // Iterate over messages continuously + it, _ := c.Messages() + for i := 0; i < 10; i++ { + msg, _ := it.Next() + msg.Ack() + fmt.Printf("Received a JetStream message via iterator: %s\n", string(msg.Data())) + messageCounter++ + } + it.Stop() + + // block until all 100 published messages have been processed + for messageCounter < 100 { + time.Sleep(10 * time.Millisecond) + } +} +``` + +## Streams + +`jetstream` provides methods to manage and list streams, as well as perform +stream-specific operations (purging, fetching/deleting messages by sequence id) + +### Stream management (CRUD) + +```go +js, _ := jetstream.New(nc) + +// create a stream (this is an idempotent operation) +s, _ := js.CreateStream(ctx, jetstream.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"ORDERS.*"}, +}) + +// update a stream +s, _ = js.UpdateStream(ctx, jetstream.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"ORDERS.*"}, + Description: "updated stream", +}) + +// get stream handle +s, _ = js.Stream(ctx, "ORDERS") + +// delete a stream +js.DeleteStream(ctx, "ORDERS") +``` + +### Listing streams and stream names + +```go +// list streams +streams := js.ListStreams(ctx) +for s := range streams.Info() { + fmt.Println(s.Config.Name) +} +if streams.Err() != nil { + fmt.Println("Unexpected error occurred") +} + +// list stream names +names := js.StreamNames(ctx) +for name := range names.Name() { + fmt.Println(name) +} +if names.Err() != nil { + fmt.Println("Unexpected error occurred") +} +``` + +### Stream-specific operations + +Using `Stream` interface, it is also possible to: + +- Purge a stream + +```go +// remove all messages from a stream +_ = s.Purge(ctx) + +// remove all messages from a stream that are stored on a specific subject +_ = s.Purge(ctx, jetstream.WithPurgeSubject("ORDERS.new")) + +// remove all messages up to specified sequence number +_ = s.Purge(ctx, jetstream.WithPurgeSequence(100)) + +// remove messages, but keep 10 newest +_ = s.Purge(ctx, jetstream.WithPurgeKeep(10)) +``` + +- Get and messages from stream + +```go +// get message from stream with sequence number == 100 +msg, _ := s.GetMsg(ctx, 100) + +// get last message from "ORDERS.new" subject +msg, _ = s.GetLastMsgForSubject(ctx, "ORDERS.new") + +// delete a message with sequence number == 100 +_ = s.DeleteMsg(ctx, 100) +``` + +- Get information about a stream + +```go +// Fetches latest stream info from server +info, _ := s.Info(ctx) +fmt.Println(info.Config.Name) + +// Returns the most recently fetched StreamInfo, without making an API call to the server +cachedInfo := s.CachedInfo() +fmt.Println(cachedInfo.Config.Name) +``` + +## Consumers + +Only pull consumers are supported in `jetstream` package. However, unlike the +JetStream API in `nats` package, pull consumers allow for continuous message +retrieval (similarly to how `nats.Subscribe()` works). Because of that, push +consumers can be easily replaced by pull consumers for most of the use cases. + +### Consumers management + +CRUD operations on consumers can be achieved on 2 levels: + +- on `JetStream` interface + +```go +js, _ := jetstream.New(nc) + +// create a consumer (this is an idempotent operation) +// an error will be returned if consumer already exists and has different configuration. +cons, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ + Durable: "foo", + AckPolicy: jetstream.AckExplicitPolicy, +}) + +// create an ephemeral pull consumer by not providing `Durable` +ephemeral, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ + AckPolicy: jetstream.AckExplicitPolicy, +}) + + +// consumer can also be created using CreateOrUpdateConsumer +// this method will either create a consumer if it does not exist +// or update existing consumer (if possible) +cons2 := js.CreateOrUpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ + Name: "bar", +}) + +// consumers can be updated +// an error will be returned if consumer with given name does not exist +// or an illegal property is to be updated (e.g. AckPolicy) +updated, _ := js.UpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{ + AckPolicy: jetstream.AckExplicitPolicy, + Description: "updated consumer" +}) + +// get consumer handle +cons, _ = js.Consumer(ctx, "ORDERS", "foo") + +// delete a consumer +js.DeleteConsumer(ctx, "ORDERS", "foo") +``` + +- on `Stream` interface + +```go +// Create a JetStream management interface +js, _ := jetstream.New(nc) + +// get stream handle +stream, _ := js.Stream(ctx, "ORDERS") + +// create consumer +cons, _ := stream.CreateConsumer(ctx, jetstream.ConsumerConfig{ + Durable: "foo", + AckPolicy: jetstream.AckExplicitPolicy, +}) + +// get consumer handle +cons, _ = stream.Consumer(ctx, "ORDERS", "foo") + +// delete a consumer +stream.DeleteConsumer(ctx, "foo") +``` + +`Consumer` interface, returned when creating/fetching consumers, allows fetching +`ConsumerInfo`: + +```go +// Fetches latest consumer info from server +info, _ := cons.Info(ctx) +fmt.Println(info.Config.Durable) + +// Returns the most recently fetched ConsumerInfo, without making an API call to the server +cachedInfo := cons.CachedInfo() +fmt.Println(cachedInfo.Config.Durable) +``` + +### Listing consumers and consumer names + +```go +// list consumers +consumers := s.ListConsumers(ctx) +for cons := range consumers.Info() { + fmt.Println(cons.Name) +} +if consumers.Err() != nil { + fmt.Println("Unexpected error occurred") +} + +// list consumer names +names := s.ConsumerNames(ctx) +for name := range names.Name() { + fmt.Println(name) +} +if names.Err() != nil { + fmt.Println("Unexpected error occurred") +} +``` + +### Ordered consumers + +`jetstream`, in addition to basic named/ephemeral consumers, supports ordered +consumer functionality. Ordered is strictly processing messages in the order +that they were stored on the stream, providing a consistent and deterministic +message ordering. It is also resilient to consumer deletion. + +Ordered consumers present the same set of message consumption methods as +standard pull consumers. + +```go +js, _ := jetstream.New(nc) + +// create a consumer (this is an idempotent operation) +cons, _ := js.OrderedConsumer(ctx, "ORDERS", jetstream.OrderedConsumerConfig{ + // Filter results from "ORDERS" stream by specific subject + FilterSubjects: []{"ORDERS.A"}, +}) +``` + +### Receiving messages from the consumer + +The `Consumer` interface covers allows fetching messages on demand, with +pre-defined batch size on bytes limit, or continuous push-like receiving of +messages. + +#### __Single fetch__ + +This pattern pattern allows fetching a defined number of messages in a single +RPC. + +- Using `Fetch` or `FetchBytes`, consumer will return up to the provided number +of messages/bytes. By default, `Fetch()` will wait 30 seconds before timing out +(this behavior can be configured using `FetchMaxWait()` option): + +```go +// receive up to 10 messages from the stream +msgs, err := c.Fetch(10) +if err != nil { + // handle error +} + +for msg := range msgs.Messages() { + fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) +} + +if msgs.Error() != nil { + // handle error +} + +// receive up to 1024 B of data +msgs, err := c.FetchBytes(1024) +if err != nil { +// handle error +} + +for msg := range msgs.Messages() { + fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) +} + +if msgs.Error() != nil { + // handle error +} +``` + +Similarly, `FetchNoWait()` can be used in order to only return messages from the +stream available at the time of sending request: + +```go +// FetchNoWait will not wait for new messages if the whole batch is not available at the time of sending request. +msgs, err := c.FetchNoWait(10) +if err != nil { +// handle error +} + +for msg := range msgs.Messages() { + fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) +} + +if msgs.Error() != nil { + // handle error +} +``` + +> __Warning__: Both `Fetch()` and `FetchNoWait()` have worse performance when +> used to continuously retrieve messages in comparison to `Messages()` or +`Consume()` methods, as they do not perform any optimizations (pre-buffering) +and new subscription is created for each execution. + +#### Continuous polling + +There are 2 ways to achieve push-like behavior using pull consumers in +`jetstream` package. Both `Messages()` and `Consume()` methods perform similar optimizations +and for most cases can be used interchangeably. + +There is an advantage of using `Messages()` instead of `Consume()` for work-queue scenarios, +where messages should be fetched one by one, as it allows for finer control over fetching +single messages on demand. + +Subject filtering is achieved by configuring a consumer with a `FilterSubject` +value. + +##### Using `Consume()` receive messages in a callback + +```go +cons, _ := js.CreateOrUpdateConsumer("ORDERS", jetstream.ConsumerConfig{ + AckPolicy: jetstream.AckExplicitPolicy, + // receive messages from ORDERS.A subject only + FilterSubject: "ORDERS.A" +})) + +consContext, _ := c.Consume(func(msg jetstream.Msg) { + fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) +}) +defer consContext.Stop() +``` + +Similarly to `Messages()`, `Consume()` can be supplied with options to modify +the behavior of a single pull request: + +- `PullMaxMessages(int)` - up to provided number of messages will be buffered +- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This + setting and `PullMaxMessages` are mutually exclusive. + The value should be set to a high enough value to accommodate the largest + message expected from the server. Note that it may not be sufficient to set + this value to the maximum message size, as this setting controls the client + buffer size, not the max bytes requested from the server within a single pull + request. If the value is set too low, the consumer will stall and not be able + to consume messages. +- `PullExpiry(time.Duration)` - timeout on a single pull request to the server +type PullThresholdMessages int +- `PullThresholdMessages(int)` - amount of messages which triggers refilling the + buffer +- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the + buffer +- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull +request. An error will be triggered if at least 2 heartbeats are missed +- `WithConsumeErrHandler(func (ConsumeContext, error))` - when used, sets a + custom error handler on `Consume()`, allowing e.g. tracking missing + heartbeats. +- `PullMaxMessagesWithBytesLimit` - up to the provided number of messages will + be buffered and a single fetch size will be limited to the provided value. + This is an advanced option and should be used with caution. Most of the time, + `PullMaxMessages` or `PullMaxBytes` should be used instead. Note that he byte + limit should never be set to a value lower than the maximum message size that + can be expected from the server. If the byte limit is lower than the maximum + message size, the consumer will stall and not be able to consume messages. + +> __NOTE__: `Stop()` should always be called on `ConsumeContext` to avoid +> leaking goroutines. + +##### Using `Messages()` to iterate over incoming messages + +```go +iter, _ := cons.Messages() +for { + msg, err := iter.Next() + // Next can return error, e.g. when iterator is closed or no heartbeats were received + if err != nil { + //handle error + } + fmt.Printf("Received a JetStream message: %s\n", string(msg.Data())) + msg.Ack() +} +iter.Stop() +``` + +It can also be configured to only store up to defined number of messages/bytes +in the buffer. + +```go +// a maximum of 10 messages or 1024 bytes will be stored in memory (whichever is encountered first) +iter, _ := cons.Messages(jetstream.PullMaxMessages(10), jetstream.PullMaxBytes(1024)) +``` + +`Messages()` exposes the following options: + +- `PullMaxMessages(int)` - up to provided number of messages will be buffered +- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This + setting and `PullMaxMessages` are mutually exclusive. + The value should be set to a high enough value to accommodate the largest + message expected from the server. Note that it may not be sufficient to set + this value to the maximum message size, as this setting controls the client + buffer size, not the max bytes requested from the server within a single pull + request. If the value is set too low, the consumer will stall and not be able + to consume messages. +- `PullExpiry(time.Duration)` - timeout on a single pull request to the server +type PullThresholdMessages int +- `PullThresholdMessages(int)` - amount of messages which triggers refilling the + buffer +- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the + buffer +- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull +request. An error will be triggered if at least 2 heartbeats are missed (unless +`WithMessagesErrOnMissingHeartbeat(false)` is used) +- `PullMaxMessagesWithBytesLimit` - up to the provided number of messages will + be buffered and a single fetch size will be limited to the provided value. + This is an advanced option and should be used with caution. Most of the time, + `PullMaxMessages` or `PullMaxBytes` should be used instead. Note that he byte + limit should never be set to a value lower than the maximum message size that + can be expected from the server. If the byte limit is lower than the maximum + message size, the consumer will stall and not be able to consume messages. + +##### Using `Messages()` to fetch single messages one by one + +When implementing work queue, it is possible to use `Messages()` in order to +fetch messages from the server one-by-one, without optimizations and +pre-buffering (to avoid redeliveries when processing messages at slow rate). + +```go +// PullMaxMessages determines how many messages will be sent to the client in a single pull request +iter, _ := cons.Messages(jetstream.PullMaxMessages(1)) +numWorkers := 5 +sem := make(chan struct{}, numWorkers) +for { + sem <- struct{}{} + go func() { + defer func() { + <-sem + }() + msg, err := iter.Next() + if err != nil { + // handle err + } + fmt.Printf("Processing msg: %s\n", string(msg.Data())) + doWork() + msg.Ack() + }() +} +``` + +## Publishing on stream + +`JetStream` interface allows publishing messages on stream in 2 ways: + +### __Synchronous publish__ + +```go +js, _ := jetstream.New(nc) + +// Publish message on subject ORDERS.new +// Given subject has to belong to a stream +ack, err := js.PublishMsg(ctx, &nats.Msg{ + Data: []byte("hello"), + Subject: "ORDERS.new", +}) +fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) + +// A helper method accepting subject and data as parameters +ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello")) +``` + +Both `Publish()` and `PublishMsg()` can be supplied with options allowing +setting various headers. Additionally, for `PublishMsg()` headers can be set +directly on `nats.Msg`. + +```go +// All 3 implementations are work identically +ack, err := js.PublishMsg(ctx, &nats.Msg{ + Data: []byte("hello"), + Subject: "ORDERS.new", + Header: nats.Header{ + "Nats-Msg-Id": []string{"id"}, + }, +}) + +ack, err = js.PublishMsg(ctx, &nats.Msg{ + Data: []byte("hello"), + Subject: "ORDERS.new", +}, jetstream.WithMsgID("id")) + +ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello"), jetstream.WithMsgID("id")) +``` + +### __Async publish__ + +```go +js, _ := jetstream.New(nc) + +// publish message and do not wait for ack +ackF, err := js.PublishMsgAsync(ctx, &nats.Msg{ + Data: []byte("hello"), + Subject: "ORDERS.new", +}) + +// block and wait for ack +select { +case ack := <-ackF.Ok(): + fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream) +case err := <-ackF.Err(): + fmt.Println(err) +} + +// similarly to synchronous publish, there is a helper method accepting subject and data +ackF, err = js.PublishAsync("ORDERS.new", []byte("hello")) +``` + +Just as for synchronous publish, `PublishAsync()` and `PublishMsgAsync()` accept +options for setting headers. + +## KeyValue Store + +JetStream KeyValue Stores offer a straightforward method for storing key-value +pairs within JetStream. These stores are supported by a specially configured +stream, designed to efficiently and compactly store these pairs. This structure +ensures rapid and convenient access to the data. + +The KV Store, also known as a bucket, enables the execution of various operations: + +- create/update a value for a given key +- get a value for a given key +- delete a value for a given key +- purge all values from a bucket +- list all keys in a bucket +- watch for changes on given key set or the whole bucket +- retrieve history of changes for a given key + +### Basic usage of KV bucket + +The most basic usage of KV bucket is to create or retrieve a bucket and perform +basic CRUD operations on keys. + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() + +// Create a new bucket. Bucket name is required and has to be unique within a JetStream account. +kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) + +// Set a value for a given key +// Put will either create or update a value for a given key +kv.Put(ctx, "sue.color", []byte("blue")) + +// Get an entry for a given key +// Entry contains key/value, but also metadata (revision, timestamp, etc.)) +entry, _ := kv.Get(ctx, "sue.color") + +// Prints `sue.color @ 1 -> "blue"` +fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) + +// Update a value for a given key +// Update will fail if the key does not exist or the revision has changed +kv.Update(ctx, "sue.color", []byte("red"), 1) + +// Create will fail if the key already exists +_, err := kv.Create(ctx, "sue.color", []byte("purple")) +fmt.Println(err) // prints `nats: key exists` + +// Delete a value for a given key. +// Delete is not destructive, it will add a delete marker for a given key +// and all previous revisions will still be available +kv.Delete(ctx, "sue.color") + +// getting a deleted key will return an error +_, err = kv.Get(ctx, "sue.color") +fmt.Println(err) // prints `nats: key not found` + +// A bucket can be deleted once it is no longer needed +js.DeleteKeyValue(ctx, "profiles") +``` + +### Watching for changes on a bucket + +KV buckets support Watchers, which can be used to watch for changes on a given +key or the whole bucket. Watcher will receive a notification on a channel when a +change occurs. By default, watcher will return initial values for all matching +keys. After sending all initial values, watcher will send nil on the channel to +signal that all initial values have been sent and it will start sending updates when +changes occur. + +Watcher supports several configuration options: + +- `IncludeHistory` will have the key watcher send all historical values +for each key (up to KeyValueMaxHistory). +- `IgnoreDeletes` will have the key watcher not pass any keys with +delete markers. +- `UpdatesOnly` will have the key watcher only pass updates on values +(without values already present when starting). +- `MetaOnly` will have the key watcher retrieve only the entry metadata, not the entry value. +- `ResumeFromRevision` instructs the key watcher to resume from a +specific revision number. + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) + +kv.Put(ctx, "sue.color", []byte("blue")) + +// A watcher can be created to watch for changes on a given key or the whole bucket +// By default, watcher will return most recent values for all matching keys. +// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. +watcher, _ := kv.Watch(ctx, "sue.*") +defer watcher.Stop() + +kv.Put(ctx, "sue.age", []byte("43")) +kv.Put(ctx, "sue.color", []byte("red")) + +// First, the watcher sends most recent values for all matching keys. +// In this case, it will send a single entry for `sue.color`. +entry := <-watcher.Updates() +// Prints `sue.color @ 1 -> "blue"` +fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) + +// After all current values have been sent, watcher will send nil on the channel. +entry = <-watcher.Updates() +if entry != nil { + fmt.Println("Unexpected entry received") +} + +// After that, watcher will send updates when changes occur +// In this case, it will send an entry for `sue.color` and `sue.age`. + +entry = <-watcher.Updates() +// Prints `sue.age @ 2 -> "43"` +fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) + +entry = <-watcher.Updates() +// Prints `sue.color @ 3 -> "red"` +fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value())) +``` + +### Additional operations on a bucket + +In addition to basic CRUD operations and watching for changes, KV buckets +support several additional operations: + +- `ListKeys` will return all keys in a bucket + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) + +kv.Put(ctx, "sue.color", []byte("blue")) +kv.Put(ctx, "sue.age", []byte("43")) +kv.Put(ctx, "bucket", []byte("profiles")) + +keys, _ := kv.ListKeys(ctx) + +// Prints all 3 keys +for key := range keys.Keys() { + fmt.Println(key) +} +``` + +- `Purge` and `PurgeDeletes` for removing all keys from a bucket + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) + +kv.Put(ctx, "sue.color", []byte("blue")) +kv.Put(ctx, "sue.age", []byte("43")) +kv.Put(ctx, "bucket", []byte("profiles")) + +// Purge will remove all keys from a bucket. +// The latest revision of each key will be kept +// with a delete marker, all previous revisions will be removed +// permanently. +kv.Purge(ctx) + +// PurgeDeletes will remove all keys from a bucket +// with a delete marker. +kv.PurgeDeletes(ctx) +``` + +- `Status` will return the current status of a bucket + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"}) + +kv.Put(ctx, "sue.color", []byte("blue")) +kv.Put(ctx, "sue.age", []byte("43")) +kv.Put(ctx, "bucket", []byte("profiles")) + +status, _ := kv.Status(ctx) + +fmt.Println(status.Bucket()) // prints `profiles` +fmt.Println(status.Values()) // prints `3` +fmt.Println(status.Bytes()) // prints the size of all values in bytes +``` + +## Object Store + +JetStream Object Stores offer a straightforward method for storing large objects +within JetStream. These stores are backed by a specially configured streams, +designed to efficiently and compactly store these objects. + +The Object Store, also known as a bucket, enables the execution of various +operations: + +- create/update an object +- get an object +- delete an object +- list all objects in a bucket +- watch for changes on objects in a bucket +- create links to other objects or other buckets + +### Basic usage of Object Store + +The most basic usage of Object bucket is to create or retrieve a bucket and +perform basic CRUD operations on objects. + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() + +// Create a new bucket. Bucket name is required and has to be unique within a JetStream account. +os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) + +config1 := bytes.NewBufferString("first config") +// Put an object in a bucket. Put expects an object metadata and a reader +// to read the object data from. +os.Put(ctx, jetstream.ObjectMeta{Name: "config-1"}, config1) + +// Objects can also be created using various helper methods + +// 1. As raw strings +os.PutString(ctx, "config-2", "second config") + +// 2. As raw bytes +os.PutBytes(ctx, "config-3", []byte("third config")) + +// 3. As a file +os.PutFile(ctx, "config-4.txt") + +// Get an object +// Get returns a reader and object info +// Similar to Put, Get can also be used with helper methods +// to retrieve object data as a string, bytes or to save it to a file +object, _ := os.Get(ctx, "config-1") +data, _ := io.ReadAll(object) +info, _ := object.Info() + +// Prints `configs.config-1 -> "first config"` +fmt.Printf("%s.%s -> %q\n", info.Bucket, info.Name, string(data)) + +// Delete an object. +// Delete will remove object data from stream, but object metadata will be kept +// with a delete marker. +os.Delete(ctx, "config-1") + +// getting a deleted object will return an error +_, err := os.Get(ctx, "config-1") +fmt.Println(err) // prints `nats: object not found` + +// A bucket can be deleted once it is no longer needed +js.DeleteObjectStore(ctx, "configs") +``` + +### Watching for changes on a store + +Object Stores support Watchers, which can be used to watch for changes on +objects in a given bucket. Watcher will receive a notification on a channel when +a change occurs. By default, watcher will return latest information for all +objects in a bucket. After sending all initial values, watcher will send nil on +the channel to signal that all initial values have been sent and it will start +sending updates when changes occur. + +>__NOTE:__ Watchers do not retrieve values for objects, only metadata (containing +>information such as object name, bucket name, object size etc.). If object data +>is required, `Get` method should be used. + +Watcher supports several configuration options: + +- `IncludeHistory` will have the watcher send historical updates for each + object. +- `IgnoreDeletes` will have the watcher not pass any objects with delete + markers. +- `UpdatesOnly` will have the watcher only pass updates on objects (without + objects already present when starting). + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) + +os.PutString(ctx, "config-1", "first config") + +// By default, watcher will return most recent values for all objects in a bucket. +// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option. +watcher, _ := os.Watch(ctx) +defer watcher.Stop() + +// create a second object +os.PutString(ctx, "config-2", "second config") + +// update metadata of the first object +os.UpdateMeta(ctx, "config-1", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) + +// First, the watcher sends most recent values for all matching objects. +// In this case, it will send a single entry for `config-1`. +object := <-watcher.Updates() +// Prints `configs.config-1 -> ""` +fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) + +// After all current values have been sent, watcher will send nil on the channel. +object = <-watcher.Updates() +if object != nil { + fmt.Println("Unexpected object received") +} + +// After that, watcher will send updates when changes occur +// In this case, it will send an entry for `config-2` and `config-1`. +object = <-watcher.Updates() +// Prints `configs.config-2 -> ""` +fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) + +object = <-watcher.Updates() +// Prints `configs.config-1 -> "updated config"` +fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description) +``` + +### Additional operations on a store + +In addition to basic CRUD operations and watching for changes, Object Stores +support several additional operations: + +- `UpdateMeta` for updating object metadata, such as name, description, etc. + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) + +os.PutString(ctx, "config", "data") + +// update metadata of the object to e.g. add a description +os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config", Description: "this is a config"}) + +// object can be moved under a new name (unless it already exists) +os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"}) +``` + +- `List` for listing information about all objects in a bucket: + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) + +os.PutString(ctx, "config-1", "cfg1") +os.PutString(ctx, "config-2", "cfg1") +os.PutString(ctx, "config-3", "cfg1") + +// List will return information about all objects in a bucket +objects, _ := os.List(ctx) + +// Prints all 3 objects +for _, object := range objects { + fmt.Println(object.Name) +} +``` + +- `Status` will return the current status of a bucket + +```go +js, _ := jetstream.New(nc) +ctx := context.Background() +os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"}) + +os.PutString(ctx, "config-1", "cfg1") +os.PutString(ctx, "config-2", "cfg1") +os.PutString(ctx, "config-3", "cfg1") + +status, _ := os.Status(ctx) + +fmt.Println(status.Bucket()) // prints `configs` +fmt.Println(status.Size()) // prints the size of the bucket in bytes +``` + +## Examples + +You can find more examples of `jetstream` usage [here](https://github.com/nats-io/nats.go/tree/main/examples/jetstream). diff --git a/vendor/github.com/nats-io/nats.go/jetstream/api.go b/vendor/github.com/nats-io/nats.go/jetstream/api.go new file mode 100644 index 000000000..0fc5d91ae --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/api.go @@ -0,0 +1,158 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "encoding/json" + "strings" +) + +type ( + apiResponse struct { + Type string `json:"type"` + Error *APIError `json:"error,omitempty"` + } + + // apiPaged includes variables used to create paged responses from the JSON API + apiPaged struct { + Total int `json:"total"` + Offset int `json:"offset"` + Limit int `json:"limit"` + } +) + +// Request API subjects for JetStream. +const ( + // DefaultAPIPrefix is the default prefix for the JetStream API. + DefaultAPIPrefix = "$JS.API." + + // jsDomainT is used to create JetStream API prefix by specifying only Domain + jsDomainT = "$JS.%s.API." + + // jsExtDomainT is used to create a StreamSource External APIPrefix + jsExtDomainT = "$JS.%s.API" + + // apiAccountInfo is for obtaining general information about JetStream. + apiAccountInfo = "INFO" + + // apiConsumerCreateT is used to create consumers. + apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" + + // apiConsumerCreateT is used to create consumers. + // it accepts stream name, consumer name and filter subject + apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" + + // apiConsumerInfoT is used to create consumers. + apiConsumerInfoT = "CONSUMER.INFO.%s.%s" + + // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. + apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" + + // apiConsumerDeleteT is used to delete consumers. + apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" + + // apiConsumerPauseT is used to pause a consumer. + apiConsumerPauseT = "CONSUMER.PAUSE.%s.%s" + + // apiConsumerListT is used to return all detailed consumer information + apiConsumerListT = "CONSUMER.LIST.%s" + + // apiConsumerNamesT is used to return a list with all consumer names for the stream. + apiConsumerNamesT = "CONSUMER.NAMES.%s" + + // apiStreams can lookup a stream by subject. + apiStreams = "STREAM.NAMES" + + // apiStreamCreateT is the endpoint to create new streams. + apiStreamCreateT = "STREAM.CREATE.%s" + + // apiStreamInfoT is the endpoint to get information on a stream. + apiStreamInfoT = "STREAM.INFO.%s" + + // apiStreamUpdateT is the endpoint to update existing streams. + apiStreamUpdateT = "STREAM.UPDATE.%s" + + // apiStreamDeleteT is the endpoint to delete streams. + apiStreamDeleteT = "STREAM.DELETE.%s" + + // apiStreamPurgeT is the endpoint to purge streams. + apiStreamPurgeT = "STREAM.PURGE.%s" + + // apiStreamListT is the endpoint that will return all detailed stream information + apiStreamListT = "STREAM.LIST" + + // apiMsgGetT is the endpoint to get a message. + apiMsgGetT = "STREAM.MSG.GET.%s" + + // apiMsgGetT is the endpoint to perform a direct get of a message. + apiDirectMsgGetT = "DIRECT.GET.%s" + + // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. + apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" + + // apiMsgDeleteT is the endpoint to remove a message. + apiMsgDeleteT = "STREAM.MSG.DELETE.%s" + + // apiConsumerUnpinT is the endpoint to unpin a consumer. + apiConsumerUnpinT = "CONSUMER.UNPIN.%s.%s" +) + +func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) { + jsMsg, err := js.apiRequest(ctx, subject, data...) + if err != nil { + return nil, err + } + if err := json.Unmarshal(jsMsg.Data(), resp); err != nil { + return nil, err + } + return jsMsg, nil +} + +// a RequestWithContext with tracing via TraceCB +func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) { + subj = js.apiSubject(subj) + var req []byte + if len(data) > 0 { + req = data[0] + } + if js.opts.clientTrace != nil { + ctrace := js.opts.clientTrace + if ctrace.RequestSent != nil { + ctrace.RequestSent(subj, req) + } + } + resp, err := js.conn.RequestWithContext(ctx, subj, req) + if err != nil { + return nil, err + } + if js.opts.clientTrace != nil { + ctrace := js.opts.clientTrace + if ctrace.ResponseReceived != nil { + ctrace.ResponseReceived(subj, resp.Data, resp.Header) + } + } + + return js.toJSMsg(resp), nil +} + +func (js *jetStream) apiSubject(subj string) string { + if js.opts.apiPrefix == "" { + return subj + } + var b strings.Builder + b.WriteString(js.opts.apiPrefix) + b.WriteString(subj) + return b.String() +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/consumer.go b/vendor/github.com/nats-io/nats.go/jetstream/consumer.go new file mode 100644 index 000000000..3f390627d --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/consumer.go @@ -0,0 +1,410 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/nats-io/nats.go/internal/syncx" + "github.com/nats-io/nuid" +) + +type ( + + // Consumer contains methods for fetching/processing messages from a stream, + // as well as fetching consumer info. + // + // This package provides two implementations of Consumer interface: + // + // - Standard named/ephemeral pull consumers. These consumers are created using + // CreateConsumer method on Stream or JetStream interface. They can be + // explicitly configured (using [ConsumerConfig]) and managed by the user, + // either from this package or externally. + // + // - Ordered consumers. These consumers are created using OrderedConsumer + // method on Stream or JetStream interface. They are managed by the library + // and provide a simple way to consume messages from a stream. Ordered + // consumers are ephemeral in-memory pull consumers and are resilient to + // deletes and restarts. They provide limited configuration options + // using [OrderedConsumerConfig]. + // + // Consumer provides method for optimized continuous consumption of messages + // using Consume and Messages methods, as well as simple one-off messages + // retrieval using Fetch and Next methods. + Consumer interface { + // Fetch is used to retrieve up to a provided number of messages from a + // stream. This method will send a single request and deliver either all + // requested messages unless time out is met earlier. Fetch timeout + // defaults to 30 seconds and can be configured using FetchMaxWait + // option. + // + // By default, Fetch uses a 5s idle heartbeat for requests longer than + // 10 seconds. For shorter requests, the idle heartbeat is disabled. + // This can be configured using FetchHeartbeat option. If a client does + // not receive a heartbeat message from a stream for more than 2 times + // the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. + // + // Fetch is non-blocking and returns MessageBatch, exposing a channel + // for delivered messages. + // + // Messages channel is always closed, thus it is safe to range over it + // without additional checks. After the channel is closed, + // MessageBatch.Error() should be checked to see if there was an error + // during message delivery (e.g. missing heartbeat). + Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) + + // FetchBytes is used to retrieve up to a provided bytes from the + // stream. This method will send a single request and deliver the + // provided number of bytes unless time out is met earlier. FetchBytes + // timeout defaults to 30 seconds and can be configured using + // FetchMaxWait option. + // + // By default, FetchBytes uses a 5s idle heartbeat for requests longer than + // 10 seconds. For shorter requests, the idle heartbeat is disabled. + // This can be configured using FetchHeartbeat option. If a client does + // not receive a heartbeat message from a stream for more than 2 times + // the idle heartbeat setting, Fetch will return ErrNoHeartbeat. + // + // FetchBytes is non-blocking and returns MessageBatch, exposing a channel + // for delivered messages. + // + // Messages channel is always closed, thus it is safe to range over it + // without additional checks. After the channel is closed, + // MessageBatch.Error() should be checked to see if there was an error + // during message delivery (e.g. missing heartbeat). + FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) + + // FetchNoWait is used to retrieve up to a provided number of messages + // from a stream. Unlike Fetch, FetchNoWait will only deliver messages + // that are currently available in the stream and will not wait for new + // messages to arrive, even if batch size is not met. + // + // FetchNoWait is non-blocking and returns MessageBatch, exposing a + // channel for delivered messages. + // + // Messages channel is always closed, thus it is safe to range over it + // without additional checks. After the channel is closed, + // MessageBatch.Error() should be checked to see if there was an error + // during message delivery (e.g. missing heartbeat). + FetchNoWait(batch int) (MessageBatch, error) + + // Consume will continuously receive messages and handle them + // with the provided callback function. Consume can be configured using + // PullConsumeOpt options: + // + // - Error handling and monitoring can be configured using ConsumeErrHandler + // option, which provides information about errors encountered during + // consumption (both transient and terminal) + // - Consume can be configured to stop after a certain number of + // messages is received using StopAfter option. + // - Consume can be optimized for throughput or memory usage using + // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. + // Unless there is a specific use case, these options should not be used. + // + // Consume returns a ConsumeContext, which can be used to stop or drain + // the consumer. + Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) + + // Messages returns MessagesContext, allowing continuously iterating + // over messages on a stream. Messages can be configured using + // PullMessagesOpt options: + // + // - Messages can be optimized for throughput or memory usage using + // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options. + // Unless there is a specific use case, these options should not be used. + // - WithMessagesErrOnMissingHeartbeat can be used to enable/disable + // erroring out on MessagesContext.Next when a heartbeat is missing. + // This option is enabled by default. + Messages(opts ...PullMessagesOpt) (MessagesContext, error) + + // Next is used to retrieve the next message from the consumer. This + // method will block until the message is retrieved or timeout is + // reached. + Next(opts ...FetchOpt) (Msg, error) + + // Info fetches current ConsumerInfo from the server. + Info(context.Context) (*ConsumerInfo, error) + + // CachedInfo returns ConsumerInfo currently cached on this consumer. + // This method does not perform any network requests. The cached + // ConsumerInfo is updated on every call to Info and Update. + CachedInfo() *ConsumerInfo + } + + createConsumerRequest struct { + Stream string `json:"stream_name"` + Config *ConsumerConfig `json:"config"` + Action string `json:"action"` + } +) + +// Info fetches current ConsumerInfo from the server. +func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { + ctx, cancel := p.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + infoSubject := fmt.Sprintf(apiConsumerInfoT, p.stream, p.name) + var resp consumerInfoResponse + + if _, err := p.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return nil, ErrConsumerNotFound + } + return nil, resp.Error + } + if resp.Error == nil && resp.ConsumerInfo == nil { + return nil, ErrConsumerNotFound + } + + p.info = resp.ConsumerInfo + return resp.ConsumerInfo, nil +} + +// CachedInfo returns ConsumerInfo currently cached on this consumer. +// This method does not perform any network requests. The cached +// ConsumerInfo is updated on every call to Info and Update. +func (p *pullConsumer) CachedInfo() *ConsumerInfo { + return p.info +} + +func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + req := createConsumerRequest{ + Stream: stream, + Config: &cfg, + Action: action, + } + reqJSON, err := json.Marshal(req) + if err != nil { + return nil, err + } + + consumerName := cfg.Name + if consumerName == "" { + if cfg.Durable != "" { + consumerName = cfg.Durable + } else { + consumerName = generateConsName() + } + } + if err := validateConsumerName(consumerName); err != nil { + return nil, err + } + + var ccSubj string + if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 { + if err := validateSubject(cfg.FilterSubject); err != nil { + return nil, err + } + ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) + } else { + ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) + } + var resp consumerInfoResponse + + if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNotFound { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo + if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 { + return nil, ErrConsumerMultipleFilterSubjectsNotSupported + } + + return &pullConsumer{ + js: js, + stream: stream, + name: resp.Name, + durable: cfg.Durable != "", + info: resp.ConsumerInfo, + subs: syncx.Map[string, *pullSubscription]{}, + }, nil +} + +const ( + consumerActionCreate = "create" + consumerActionUpdate = "update" + consumerActionCreateOrUpdate = "" +) + +func generateConsName() string { + name := nuid.Next() + sha := sha256.New() + sha.Write([]byte(name)) + b := sha.Sum(nil) + for i := 0; i < 8; i++ { + b[i] = rdigits[int(b[i]%base)] + } + return string(b[:8]) +} + +func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + if err := validateConsumerName(name); err != nil { + return nil, err + } + infoSubject := fmt.Sprintf(apiConsumerInfoT, stream, name) + + var resp consumerInfoResponse + + if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return nil, ErrConsumerNotFound + } + return nil, resp.Error + } + if resp.Error == nil && resp.ConsumerInfo == nil { + return nil, ErrConsumerNotFound + } + + cons := &pullConsumer{ + js: js, + stream: stream, + name: name, + durable: resp.Config.Durable != "", + info: resp.ConsumerInfo, + subs: syncx.Map[string, *pullSubscription]{}, + } + + return cons, nil +} + +func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + if err := validateConsumerName(consumer); err != nil { + return err + } + deleteSubject := fmt.Sprintf(apiConsumerDeleteT, stream, consumer) + + var resp consumerDeleteResponse + + if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { + return err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return ErrConsumerNotFound + } + return resp.Error + } + return nil +} + +func pauseConsumer(ctx context.Context, js *jetStream, stream, consumer string, pauseUntil *time.Time) (*ConsumerPauseResponse, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + if err := validateConsumerName(consumer); err != nil { + return nil, err + } + subject := fmt.Sprintf(apiConsumerPauseT, stream, consumer) + + var resp consumerPauseApiResponse + req, err := json.Marshal(consumerPauseRequest{ + PauseUntil: pauseUntil, + }) + if err != nil { + return nil, err + } + if _, err := js.apiRequestJSON(ctx, subject, &resp, req); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return nil, ErrConsumerNotFound + } + return nil, resp.Error + } + return &ConsumerPauseResponse{ + Paused: resp.Paused, + PauseUntil: resp.PauseUntil, + PauseRemaining: resp.PauseRemaining, + }, nil +} + +func resumeConsumer(ctx context.Context, js *jetStream, stream, consumer string) (*ConsumerPauseResponse, error) { + return pauseConsumer(ctx, js, stream, consumer, nil) +} + +func validateConsumerName(dur string) error { + if dur == "" { + return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, "name is required") + } + if strings.ContainsAny(dur, ">*. /\\") { + return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, dur) + } + return nil +} + +func unpinConsumer(ctx context.Context, js *jetStream, stream, consumer, group string) error { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + if err := validateConsumerName(consumer); err != nil { + return err + } + unpinSubject := fmt.Sprintf(apiConsumerUnpinT, stream, consumer) + + var req = consumerUnpinRequest{ + Group: group, + } + + reqJSON, err := json.Marshal(req) + if err != nil { + return err + } + + var resp apiResponse + + if _, err := js.apiRequestJSON(ctx, unpinSubject, &resp, reqJSON); err != nil { + return err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return ErrConsumerNotFound + } + return resp.Error + } + + return nil +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go b/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go new file mode 100644 index 000000000..e93f43ca3 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go @@ -0,0 +1,544 @@ +// Copyright 2022-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "encoding/json" + "fmt" + "time" +) + +type ( + // ConsumerInfo is the detailed information about a JetStream consumer. + ConsumerInfo struct { + // Stream specifies the name of the stream that the consumer is bound + // to. + Stream string `json:"stream_name"` + + // Name represents the unique identifier for the consumer. This can be + // either set explicitly by the client or generated automatically if not + // set. + Name string `json:"name"` + + // Created is the timestamp when the consumer was created. + Created time.Time `json:"created"` + + // Config contains the configuration settings of the consumer, set when + // creating or updating the consumer. + Config ConsumerConfig `json:"config"` + + // Delivered holds information about the most recently delivered + // message, including its sequence numbers and timestamp. + Delivered SequenceInfo `json:"delivered"` + + // AckFloor indicates the message before the first unacknowledged + // message. + AckFloor SequenceInfo `json:"ack_floor"` + + // NumAckPending is the number of messages that have been delivered but + // not yet acknowledged. + NumAckPending int `json:"num_ack_pending"` + + // NumRedelivered counts the number of messages that have been + // redelivered and not yet acknowledged. Each message is counted only + // once, even if it has been redelivered multiple times. This count is + // reset when the message is eventually acknowledged. + NumRedelivered int `json:"num_redelivered"` + + // NumWaiting is the count of active pull requests. It is only relevant + // for pull-based consumers. + NumWaiting int `json:"num_waiting"` + + // NumPending is the number of messages that match the consumer's + // filter, but have not been delivered yet. + NumPending uint64 `json:"num_pending"` + + // Cluster contains information about the cluster to which this consumer + // belongs (if applicable). + Cluster *ClusterInfo `json:"cluster,omitempty"` + + // PushBound indicates whether at least one subscription exists for the + // delivery subject of this consumer. This is only applicable to + // push-based consumers. + PushBound bool `json:"push_bound,omitempty"` + + // TimeStamp indicates when the info was gathered by the server. + TimeStamp time.Time `json:"ts"` + + // PriorityGroups contains the information about the currently defined priority groups + PriorityGroups []PriorityGroupState `json:"priority_groups,omitempty"` + + // Paused indicates whether the consumer is paused. + Paused bool `json:"paused,omitempty"` + + // PauseRemaining contains the amount of time left until the consumer + // unpauses. It will only be non-zero if the consumer is currently paused. + PauseRemaining time.Duration `json:"pause_remaining,omitempty"` + } + + PriorityGroupState struct { + // Group this status is for. + Group string `json:"group"` + + // PinnedClientID is the generated ID of the pinned client. + PinnedClientID string `json:"pinned_client_id,omitempty"` + + // PinnedTS is the timestamp when the client was pinned. + PinnedTS time.Time `json:"pinned_ts,omitempty"` + } + + // ConsumerConfig is the configuration of a JetStream consumer. + ConsumerConfig struct { + // Name is an optional name for the consumer. If not set, one is + // generated automatically. + // + // Name cannot contain whitespace, ., *, >, path separators (forward or + // backwards slash), and non-printable characters. + Name string `json:"name,omitempty"` + + // Durable is an optional durable name for the consumer. If both Durable + // and Name are set, they have to be equal. Unless InactiveThreshold is set, a + // durable consumer will not be cleaned up automatically. + // + // Durable cannot contain whitespace, ., *, >, path separators (forward or + // backwards slash), and non-printable characters. + Durable string `json:"durable_name,omitempty"` + + // Description provides an optional description of the consumer. + Description string `json:"description,omitempty"` + + // DeliverPolicy defines from which point to start delivering messages + // from the stream. Defaults to DeliverAllPolicy. + DeliverPolicy DeliverPolicy `json:"deliver_policy"` + + // OptStartSeq is an optional sequence number from which to start + // message delivery. Only applicable when DeliverPolicy is set to + // DeliverByStartSequencePolicy. + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + + // OptStartTime is an optional time from which to start message + // delivery. Only applicable when DeliverPolicy is set to + // DeliverByStartTimePolicy. + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + + // AckPolicy defines the acknowledgement policy for the consumer. + // Defaults to AckExplicitPolicy. + AckPolicy AckPolicy `json:"ack_policy"` + + // AckWait defines how long the server will wait for an acknowledgement + // before resending a message. If not set, server default is 30 seconds. + AckWait time.Duration `json:"ack_wait,omitempty"` + + // MaxDeliver defines the maximum number of delivery attempts for a + // message. Applies to any message that is re-sent due to ack policy. + // If not set, server default is -1 (unlimited). + MaxDeliver int `json:"max_deliver,omitempty"` + + // BackOff specifies the optional back-off intervals for retrying + // message delivery after a failed acknowledgement. It overrides + // AckWait. + // + // BackOff only applies to messages not acknowledged in specified time, + // not messages that were nack'ed. + // + // The number of intervals specified must be lower or equal to + // MaxDeliver. If the number of intervals is lower, the last interval is + // used for all remaining attempts. + BackOff []time.Duration `json:"backoff,omitempty"` + + // FilterSubject can be used to filter messages delivered from the + // stream. FilterSubject is exclusive with FilterSubjects. + FilterSubject string `json:"filter_subject,omitempty"` + + // ReplayPolicy defines the rate at which messages are sent to the + // consumer. If ReplayOriginalPolicy is set, messages are sent in the + // same intervals in which they were stored on stream. This can be used + // e.g. to simulate production traffic in development environments. If + // ReplayInstantPolicy is set, messages are sent as fast as possible. + // Defaults to ReplayInstantPolicy. + ReplayPolicy ReplayPolicy `json:"replay_policy"` + + // RateLimit specifies an optional maximum rate of message delivery in + // bits per second. + RateLimit uint64 `json:"rate_limit_bps,omitempty"` + + // SampleFrequency is an optional frequency for sampling how often + // acknowledgements are sampled for observability. See + // https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream + SampleFrequency string `json:"sample_freq,omitempty"` + + // MaxWaiting is a maximum number of pull requests waiting to be + // fulfilled. If not set, this will inherit settings from stream's + // ConsumerLimits or (if those are not set) from account settings. If + // neither are set, server default is 512. + MaxWaiting int `json:"max_waiting,omitempty"` + + // MaxAckPending is a maximum number of outstanding unacknowledged + // messages. Once this limit is reached, the server will suspend sending + // messages to the consumer. If not set, server default is 1000. + // Set to -1 for unlimited. + MaxAckPending int `json:"max_ack_pending,omitempty"` + + // HeadersOnly indicates whether only headers of messages should be sent + // (and no payload). Defaults to false. + HeadersOnly bool `json:"headers_only,omitempty"` + + // MaxRequestBatch is the optional maximum batch size a single pull + // request can make. When set with MaxRequestMaxBytes, the batch size + // will be constrained by whichever limit is hit first. + MaxRequestBatch int `json:"max_batch,omitempty"` + + // MaxRequestExpires is the maximum duration a single pull request will + // wait for messages to be available to pull. + MaxRequestExpires time.Duration `json:"max_expires,omitempty"` + + // MaxRequestMaxBytes is the optional maximum total bytes that can be + // requested in a given batch. When set with MaxRequestBatch, the batch + // size will be constrained by whichever limit is hit first. + MaxRequestMaxBytes int `json:"max_bytes,omitempty"` + + // InactiveThreshold is a duration which instructs the server to clean + // up the consumer if it has been inactive for the specified duration. + // Durable consumers will not be cleaned up by default, but if + // InactiveThreshold is set, they will be. If not set, this will inherit + // settings from stream's ConsumerLimits. If neither are set, server + // default is 5 seconds. + // + // A consumer is considered inactive there are not pull requests + // received by the server (for pull consumers), or no interest detected + // on deliver subject (for push consumers), not if there are no + // messages to be delivered. + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // Replicas the number of replicas for the consumer's state. By default, + // consumers inherit the number of replicas from the stream. + Replicas int `json:"num_replicas"` + + // MemoryStorage is a flag to force the consumer to use memory storage + // rather than inherit the storage type from the stream. + MemoryStorage bool `json:"mem_storage,omitempty"` + + // FilterSubjects allows filtering messages from a stream by subject. + // This field is exclusive with FilterSubject. Requires nats-server + // v2.10.0 or later. + FilterSubjects []string `json:"filter_subjects,omitempty"` + + // Metadata is a set of application-defined key-value pairs for + // associating metadata on the consumer. This feature requires + // nats-server v2.10.0 or later. + Metadata map[string]string `json:"metadata,omitempty"` + + // PauseUntil is for suspending the consumer until the deadline. + PauseUntil *time.Time `json:"pause_until,omitempty"` + + // PriorityPolicy represents he priority policy the consumer is set to. + // Requires nats-server v2.11.0 or later. + PriorityPolicy PriorityPolicy `json:"priority_policy,omitempty"` + + // PinnedTTL represents the time after which the client will be unpinned + // if no new pull requests are sent.Used with PriorityPolicyPinned. + // Requires nats-server v2.11.0 or later. + PinnedTTL time.Duration `json:"priority_timeout,omitempty"` + + // PriorityGroups is a list of priority groups this consumer supports. + PriorityGroups []string `json:"priority_groups,omitempty"` + } + + // OrderedConsumerConfig is the configuration of an ordered JetStream + // consumer. For more information, see [Ordered Consumers] in README + // + // [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers + OrderedConsumerConfig struct { + // FilterSubjects allows filtering messages from a stream by subject. + // This field is exclusive with FilterSubject. Requires nats-server + // v2.10.0 or later. + FilterSubjects []string `json:"filter_subjects,omitempty"` + + // DeliverPolicy defines from which point to start delivering messages + // from the stream. Defaults to DeliverAllPolicy. + DeliverPolicy DeliverPolicy `json:"deliver_policy"` + + // OptStartSeq is an optional sequence number from which to start + // message delivery. Only applicable when DeliverPolicy is set to + // DeliverByStartSequencePolicy. + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + + // OptStartTime is an optional time from which to start message + // delivery. Only applicable when DeliverPolicy is set to + // DeliverByStartTimePolicy. + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + + // ReplayPolicy defines the rate at which messages are sent to the + // consumer. If ReplayOriginalPolicy is set, messages are sent in the + // same intervals in which they were stored on stream. This can be used + // e.g. to simulate production traffic in development environments. If + // ReplayInstantPolicy is set, messages are sent as fast as possible. + // Defaults to ReplayInstantPolicy. + ReplayPolicy ReplayPolicy `json:"replay_policy"` + + // InactiveThreshold is a duration which instructs the server to clean + // up the consumer if it has been inactive for the specified duration. + // Defaults to 5m. + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // HeadersOnly indicates whether only headers of messages should be sent + // (and no payload). Defaults to false. + HeadersOnly bool `json:"headers_only,omitempty"` + + // Maximum number of attempts for the consumer to be recreated in a + // single recreation cycle. Defaults to unlimited. + MaxResetAttempts int + + // Metadata is a set of application-defined key-value pairs for + // associating metadata on the consumer. This feature requires + // nats-server v2.10.0 or later. + Metadata map[string]string `json:"metadata,omitempty"` + } + + // DeliverPolicy determines from which point to start delivering messages. + DeliverPolicy int + + // AckPolicy determines how the consumer should acknowledge delivered + // messages. + AckPolicy int + + // ReplayPolicy determines how the consumer should replay messages it + // already has queued in the stream. + ReplayPolicy int + + // SequenceInfo has both the consumer and the stream sequence and last + // activity. + SequenceInfo struct { + Consumer uint64 `json:"consumer_seq"` + Stream uint64 `json:"stream_seq"` + Last *time.Time `json:"last_active,omitempty"` + } + + // PriorityPolicy determines the priority policy the consumer is set to. + PriorityPolicy int +) + +const ( + // PriorityPolicyNone is the default priority policy. + PriorityPolicyNone PriorityPolicy = iota + + // PriorityPolicyPinned is the priority policy that pins a consumer to a + // specific client. + PriorityPolicyPinned + + // PriorityPolicyOverflow is the priority policy that allows for + // restricting when a consumer will receive messages based on the number of + // pending messages or acks. + PriorityPolicyOverflow +) + +func (p *PriorityPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(""): + *p = PriorityPolicyNone + case jsonString("pinned_client"): + *p = PriorityPolicyPinned + case jsonString("overflow"): + *p = PriorityPolicyOverflow + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (p PriorityPolicy) MarshalJSON() ([]byte, error) { + switch p { + case PriorityPolicyNone: + return json.Marshal("") + case PriorityPolicyPinned: + return json.Marshal("pinned_client") + case PriorityPolicyOverflow: + return json.Marshal("overflow") + } + return nil, fmt.Errorf("nats: unknown priority policy %v", p) +} + +const ( + // DeliverAllPolicy starts delivering messages from the very beginning of a + // stream. This is the default. + DeliverAllPolicy DeliverPolicy = iota + + // DeliverLastPolicy will start the consumer with the last sequence + // received. + DeliverLastPolicy + + // DeliverNewPolicy will only deliver new messages that are sent after the + // consumer is created. + DeliverNewPolicy + + // DeliverByStartSequencePolicy will deliver messages starting from a given + // sequence configured with OptStartSeq in ConsumerConfig. + DeliverByStartSequencePolicy + + // DeliverByStartTimePolicy will deliver messages starting from a given time + // configured with OptStartTime in ConsumerConfig. + DeliverByStartTimePolicy + + // DeliverLastPerSubjectPolicy will start the consumer with the last message + // for all subjects received. + DeliverLastPerSubjectPolicy +) + +func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("all"), jsonString("undefined"): + *p = DeliverAllPolicy + case jsonString("last"): + *p = DeliverLastPolicy + case jsonString("new"): + *p = DeliverNewPolicy + case jsonString("by_start_sequence"): + *p = DeliverByStartSequencePolicy + case jsonString("by_start_time"): + *p = DeliverByStartTimePolicy + case jsonString("last_per_subject"): + *p = DeliverLastPerSubjectPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + + return nil +} + +func (p DeliverPolicy) MarshalJSON() ([]byte, error) { + switch p { + case DeliverAllPolicy: + return json.Marshal("all") + case DeliverLastPolicy: + return json.Marshal("last") + case DeliverNewPolicy: + return json.Marshal("new") + case DeliverByStartSequencePolicy: + return json.Marshal("by_start_sequence") + case DeliverByStartTimePolicy: + return json.Marshal("by_start_time") + case DeliverLastPerSubjectPolicy: + return json.Marshal("last_per_subject") + } + return nil, fmt.Errorf("nats: unknown deliver policy %v", p) +} + +func (p DeliverPolicy) String() string { + switch p { + case DeliverAllPolicy: + return "all" + case DeliverLastPolicy: + return "last" + case DeliverNewPolicy: + return "new" + case DeliverByStartSequencePolicy: + return "by_start_sequence" + case DeliverByStartTimePolicy: + return "by_start_time" + case DeliverLastPerSubjectPolicy: + return "last_per_subject" + } + return "" +} + +const ( + // AckExplicitPolicy requires ack or nack for all messages. + AckExplicitPolicy AckPolicy = iota + + // AckAllPolicy when acking a sequence number, this implicitly acks all + // sequences below this one as well. + AckAllPolicy + + // AckNonePolicy requires no acks for delivered messages. + AckNonePolicy +) + +func (p *AckPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("none"): + *p = AckNonePolicy + case jsonString("all"): + *p = AckAllPolicy + case jsonString("explicit"): + *p = AckExplicitPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (p AckPolicy) MarshalJSON() ([]byte, error) { + switch p { + case AckNonePolicy: + return json.Marshal("none") + case AckAllPolicy: + return json.Marshal("all") + case AckExplicitPolicy: + return json.Marshal("explicit") + } + return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p) +} + +func (p AckPolicy) String() string { + switch p { + case AckNonePolicy: + return "AckNone" + case AckAllPolicy: + return "AckAll" + case AckExplicitPolicy: + return "AckExplicit" + } + return "Unknown AckPolicy" +} + +const ( + // ReplayInstantPolicy will replay messages as fast as possible. + ReplayInstantPolicy ReplayPolicy = iota + + // ReplayOriginalPolicy will maintain the same timing as the messages were + // received. + ReplayOriginalPolicy +) + +func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("instant"): + *p = ReplayInstantPolicy + case jsonString("original"): + *p = ReplayOriginalPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (p ReplayPolicy) MarshalJSON() ([]byte, error) { + switch p { + case ReplayOriginalPolicy: + return json.Marshal("original") + case ReplayInstantPolicy: + return json.Marshal("instant") + } + return nil, fmt.Errorf("nats: unknown replay policy %v", p) +} + +func (p ReplayPolicy) String() string { + switch p { + case ReplayOriginalPolicy: + return "original" + case ReplayInstantPolicy: + return "instant" + } + return "" +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/errors.go b/vendor/github.com/nats-io/nats.go/jetstream/errors.go new file mode 100644 index 000000000..c7d06588f --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/errors.go @@ -0,0 +1,444 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "errors" + "fmt" +) + +type ( + // JetStreamError is an error result that happens when using JetStream. + // In case of client-side error, [APIError] returns nil. + JetStreamError interface { + APIError() *APIError + error + } + + jsError struct { + apiErr *APIError + message string + } + + // APIError is included in all API responses if there was an error. + APIError struct { + Code int `json:"code"` + ErrorCode ErrorCode `json:"err_code"` + Description string `json:"description,omitempty"` + } + + // ErrorCode represents error_code returned in response from JetStream API. + ErrorCode uint16 +) + +const ( + JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 + JSErrCodeJetStreamNotEnabled ErrorCode = 10076 + + JSErrCodeStreamNotFound ErrorCode = 10059 + JSErrCodeStreamNameInUse ErrorCode = 10058 + + JSErrCodeConsumerCreate ErrorCode = 10012 + JSErrCodeConsumerNotFound ErrorCode = 10014 + JSErrCodeConsumerNameExists ErrorCode = 10013 + JSErrCodeConsumerAlreadyExists ErrorCode = 10105 + JSErrCodeConsumerExists ErrorCode = 10148 + JSErrCodeDuplicateFilterSubjects ErrorCode = 10136 + JSErrCodeOverlappingFilterSubjects ErrorCode = 10138 + JSErrCodeConsumerEmptyFilter ErrorCode = 10139 + JSErrCodeConsumerDoesNotExist ErrorCode = 10149 + + JSErrCodeMessageNotFound ErrorCode = 10037 + + JSErrCodeBadRequest ErrorCode = 10003 + + JSErrCodeStreamWrongLastSequence ErrorCode = 10071 +) + +var ( + // JetStream API errors + + // ErrJetStreamNotEnabled is an error returned when JetStream is not + // enabled. + // + // Note: This error will not be returned in clustered mode, even if each + // server in the cluster does not have JetStream enabled. In clustered mode, + // requests will time out instead. + ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} + + // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is + // not enabled for an account. + ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} + + // ErrStreamNotFound is an error returned when stream with given name does + // not exist. + ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} + + // ErrStreamNameAlreadyInUse is returned when a stream with given name + // already exists and has a different configuration. + ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} + + // ErrStreamSubjectTransformNotSupported is returned when the connected + // nats-server version does not support setting the stream subject + // transform. If this error is returned when executing CreateStream(), the + // stream with invalid configuration was already created in the server. + ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} + + // ErrStreamSourceSubjectTransformNotSupported is returned when the + // connected nats-server version does not support setting the stream source + // subject transform. If this error is returned when executing + // CreateStream(), the stream with invalid configuration was already created + // in the server. + ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"} + + // ErrStreamSourceNotSupported is returned when the connected nats-server + // version does not support setting the stream sources. If this error is + // returned when executing CreateStream(), the stream with invalid + // configuration was already created in the server. + ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"} + + // ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the + // connected nats-server version does not support setting the stream + // sources. If this error is returned when executing CreateStream(), the + // stream with invalid configuration was already created in the server. + ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"} + + // ErrConsumerNotFound is an error returned when consumer with given name + // does not exist. + ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} + + // ErrConsumerExists is returned when attempting to create a consumer with + // CreateConsumer but a consumer with given name already exists. + ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}} + + // ErrConsumerNameExists is returned when attempting to update a consumer + // with UpdateConsumer but a consumer with given name does not exist. + ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}} + + // ErrMsgNotFound is returned when message with provided sequence number + // does not exist. + ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} + + // ErrBadRequest is returned when invalid request is sent to JetStream API. + ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} + + // ErrConsumerCreate is returned when nats-server reports error when + // creating consumer (e.g. illegal update). + ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}} + + // ErrDuplicateFilterSubjects is returned when both FilterSubject and + // FilterSubjects are specified when creating consumer. + ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}} + + // ErrDuplicateFilterSubjects is returned when filter subjects overlap when + // creating consumer. + ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}} + + // ErrEmptyFilter is returned when a filter in FilterSubjects is empty. + ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}} + + // Client errors + + // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the + // connected nats-server version does not support setting multiple filter + // subjects with filter_subjects field. If this error is returned when + // executing AddConsumer(), the consumer with invalid configuration was + // already created in the server. + ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"} + + // ErrConsumerNotFound is an error returned when consumer with given name + // does not exist. + ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} + + // ErrInvalidJSAck is returned when JetStream ack from message publish is + // invalid. + ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} + + // ErrStreamNameRequired is returned when the provided stream name is empty. + ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} + + // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more + // than once. + ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} + + // ErrNoStreamResponse is returned when there is no response from stream + // (e.g. no responders error). + ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} + + // ErrNotJSMessage is returned when attempting to get metadata from non + // JetStream message. + ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} + + // ErrInvalidStreamName is returned when the provided stream name is invalid + // (contains '.'). + ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} + + // ErrInvalidSubject is returned when the provided subject name is invalid. + ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"} + + // ErrInvalidConsumerName is returned when the provided consumer name is + // invalid (contains '.'). + ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} + + // ErrNoMessages is returned when no messages are currently available for a + // consumer. + ErrNoMessages JetStreamError = &jsError{message: "no messages"} + + // ErrPinIDMismatch is returned when Pin ID sent in the request does not match + // the currently pinned consumer subscriber ID on the server. + ErrPinIDMismatch JetStreamError = &jsError{message: "pin ID mismatch"} + + // ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set + // on a pull request. + ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"} + + // ErrBatchCompleted is returned when a fetch request sent the whole batch, + // but there are still bytes left. This is applicable only when MaxBytes is + // set on a pull request. + ErrBatchCompleted JetStreamError = &jsError{message: "batch completed"} + + // ErrConsumerDeleted is returned when attempting to send pull request to a + // consumer which does not exist. + ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} + + // ErrConsumerLeadershipChanged is returned when pending requests are no + // longer valid after leadership has changed. + ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"} + + // ErrHandlerRequired is returned when no handler func is provided in + // Stream(). + ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"} + + // ErrEndOfData is returned when iterating over paged API from JetStream + // reaches end of data. + ErrEndOfData JetStreamError = &jsError{message: "end of data reached"} + + // ErrNoHeartbeat is received when no message is received in IdleHeartbeat + // time (if set). + ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"} + + // ErrConsumerHasActiveSubscription is returned when a consumer is already + // subscribed to a stream. + ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"} + + // ErrMsgNotBound is returned when given message is not bound to any + // subscription. + ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"} + + // ErrMsgNoReply is returned when attempting to reply to a message without a + // reply subject. + ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"} + + // ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message + // is unsuccessful. + ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"} + + // ErrAsyncPublishReplySubjectSet is returned when reply subject is set on + // async message publish. + ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"} + + // ErrTooManyStalledMsgs is returned when too many outstanding async + // messages are waiting for ack. + ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"} + + // ErrInvalidOption is returned when there is a collision between options. + ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"} + + // ErrMsgIteratorClosed is returned when attempting to get message from a + // closed iterator. + ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"} + + // ErrOrderedConsumerReset is returned when resetting ordered consumer fails + // due to too many attempts. + ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"} + + // ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already + // used to process messages using Fetch (or FetchBytes). + ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"} + + // ErrOrderConsumerUsedAsConsume is returned when ordered consumer was + // already used to process messages using Consume or Messages. + ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"} + + // ErrOrderedConsumerConcurrentRequests is returned when attempting to run + // concurrent operations on ordered consumers. + ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"} + + // ErrOrderedConsumerNotCreated is returned when trying to get consumer info + // of an ordered consumer which was not yet created. + ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"} + + // ErrJetStreamPublisherClosed is returned for each unfinished ack future when JetStream.Cleanup is called. + ErrJetStreamPublisherClosed JetStreamError = &jsError{message: "jetstream context closed"} + + // ErrAsyncPublishTimeout is returned when waiting for ack on async publish + ErrAsyncPublishTimeout JetStreamError = &jsError{message: "timeout waiting for ack"} + + // KeyValue Errors + + // ErrKeyExists is returned when attempting to create a key that already + // exists. + ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} + + // ErrKeyValueConfigRequired is returned when attempting to create a bucket + // without a config. + ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"} + + // ErrInvalidBucketName is returned when attempting to create a bucket with + // an invalid name. + ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"} + + // ErrInvalidKey is returned when attempting to create a key with an invalid + // name. + ErrInvalidKey JetStreamError = &jsError{message: "invalid key"} + + // ErrBucketExists is returned when attempting to create a bucket that + // already exists and has a different configuration. + ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"} + + // ErrBucketNotFound is returned when attempting to access a bucket that + // does not exist. + ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"} + + // ErrBadBucket is returned when attempting to access a bucket that is not a + // key-value store. + ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"} + + // ErrKeyNotFound is returned when attempting to access a key that does not + // exist. + ErrKeyNotFound JetStreamError = &jsError{message: "key not found"} + + // ErrKeyDeleted is returned when attempting to access a key that was + // deleted. + ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"} + + // ErrHistoryToLarge is returned when provided history limit is larger than + // 64. + ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"} + + // ErrNoKeysFound is returned when no keys are found. + ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"} + + // ErrTTLOnDeleteNotSupported is returned when attempting to set a TTL + // on a delete operation. + ErrTTLOnDeleteNotSupported JetStreamError = &jsError{message: "TTL is not supported on delete"} + + // ErrLimitMarkerTTLNotSupported is returned when the connected jetstream API + // does not support setting the LimitMarkerTTL. + ErrLimitMarkerTTLNotSupported JetStreamError = &jsError{message: "limit marker TTLs not supported by server"} + + // ErrObjectConfigRequired is returned when attempting to create an object + // without a config. + ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"} + + // ErrBadObjectMeta is returned when the meta information of an object is + // invalid. + ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"} + + // ErrObjectNotFound is returned when an object is not found. + ErrObjectNotFound JetStreamError = &jsError{message: "object not found"} + + // ErrInvalidStoreName is returned when the name of an object-store is + // invalid. + ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"} + + // ErrDigestMismatch is returned when the digests of an object do not match. + ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"} + + // ErrInvalidDigestFormat is returned when the digest hash of an object has + // an invalid format. + ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"} + + // ErrNoObjectsFound is returned when no objects are found. + ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"} + + // ErrObjectAlreadyExists is returned when an object with the same name + // already exists. + ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"} + + // ErrNameRequired is returned when a name is required. + ErrNameRequired JetStreamError = &jsError{message: "name is required"} + + // ErrLinkNotAllowed is returned when a link cannot be set when putting the + // object in a bucket. + ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"} + + // ErrObjectRequired is returned when an object is required. + ErrObjectRequired = &jsError{message: "object required"} + + // ErrNoLinkToDeleted is returned when it is not allowed to link to a + // deleted object. + ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"} + + // ErrNoLinkToLink is returned when it is not allowed to link to another + // link. + ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"} + + // ErrCantGetBucket is returned when an invalid Get is attempted on an + // object that is a link to a bucket. + ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"} + + // ErrBucketRequired is returned when a bucket is required. + ErrBucketRequired JetStreamError = &jsError{message: "bucket required"} + + // ErrBucketMalformed is returned when a bucket is malformed. + ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"} + + // ErrUpdateMetaDeleted is returned when the meta information of a deleted + // object cannot be updated. + ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"} +) + +// Error prints the JetStream API error code and description. +func (e *APIError) Error() string { + return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description) +} + +// APIError implements the JetStreamError interface. +func (e *APIError) APIError() *APIError { + return e +} + +// Is matches against an APIError. +func (e *APIError) Is(err error) bool { + if e == nil { + return false + } + // Extract internal APIError to match against. + var aerr *APIError + ok := errors.As(err, &aerr) + if !ok { + return ok + } + return e.ErrorCode == aerr.ErrorCode +} + +func (err *jsError) APIError() *APIError { + return err.apiErr +} + +func (err *jsError) Error() string { + if err.apiErr != nil && err.apiErr.Description != "" { + return err.apiErr.Error() + } + return fmt.Sprintf("nats: %s", err.message) +} + +func (err *jsError) Unwrap() error { + // Allow matching to embedded APIError in case there is one. + if err.apiErr == nil { + return nil + } + return err.apiErr +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go b/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go new file mode 100644 index 000000000..960febeb0 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go @@ -0,0 +1,1156 @@ +// Copyright 2022-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nuid" +) + +type ( + + // JetStream is the top-level interface for interacting with JetStream. + // The capabilities of JetStream include: + // + // - Publishing messages to a stream using [Publisher]. + // - Managing streams using [StreamManager]. + // - Managing consumers using [StreamConsumerManager]. Those are the same + // methods as on [Stream], but are available as a shortcut to a consumer + // bypassing stream lookup. + // - Managing KeyValue stores using [KeyValueManager]. + // - Managing Object Stores using [ObjectStoreManager]. + // + // JetStream can be created using [New], [NewWithAPIPrefix] or + // [NewWithDomain] methods. + JetStream interface { + // AccountInfo fetches account information from the server, containing details + // about the account associated with this JetStream connection. If account is + // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. If + // the server does not have JetStream enabled, ErrJetStreamNotEnabled is + // returned. + AccountInfo(ctx context.Context) (*AccountInfo, error) + + // Conn returns the underlying NATS connection. + Conn() *nats.Conn + + // Options returns read-only JetStreamOptions used + // when making requests to JetStream. + Options() JetStreamOptions + + StreamConsumerManager + StreamManager + Publisher + KeyValueManager + ObjectStoreManager + } + + // Publisher provides methods for publishing messages to a stream. + // It is available as a part of [JetStream] interface. + // The behavior of Publisher can be customized using [PublishOpt] options. + Publisher interface { + // Publish performs a synchronous publish to a stream and waits for ack + // from server. It accepts subject name (which must be bound to a stream) + // and message payload. + Publish(ctx context.Context, subject string, payload []byte, opts ...PublishOpt) (*PubAck, error) + + // PublishMsg performs a synchronous publish to a stream and waits for + // ack from server. It accepts subject name (which must be bound to a + // stream) and nats.Message. + PublishMsg(ctx context.Context, msg *nats.Msg, opts ...PublishOpt) (*PubAck, error) + + // PublishAsync performs a publish to a stream and returns + // [PubAckFuture] interface, not blocking while waiting for an + // acknowledgement. It accepts subject name (which must be bound to a + // stream) and message payload. + // + // PublishAsync does not guarantee that the message has been + // received by the server. It only guarantees that the message has been + // sent to the server and thus messages can be stored in the stream + // out of order in case of retries. + PublishAsync(subject string, payload []byte, opts ...PublishOpt) (PubAckFuture, error) + + // PublishMsgAsync performs a publish to a stream and returns + // [PubAckFuture] interface, not blocking while waiting for an + // acknowledgement. It accepts subject name (which must + // be bound to a stream) and nats.Message. + // + // PublishMsgAsync does not guarantee that the message has been + // sent to the server and thus messages can be stored in the stream + // received by the server. It only guarantees that the message has been + // out of order in case of retries. + PublishMsgAsync(msg *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) + + // PublishAsyncPending returns the number of async publishes outstanding + // for this context. An outstanding publish is one that has been + // sent by the publisher but has not yet received an ack. + PublishAsyncPending() int + + // PublishAsyncComplete returns a channel that will be closed when all + // outstanding asynchronously published messages are acknowledged by the + // server. + PublishAsyncComplete() <-chan struct{} + + // CleanupPublisher will cleanup the publishing side of JetStreamContext. + // + // This will unsubscribe from the internal reply subject if needed. + // All pending async publishes will fail with ErrJetStreamContextClosed. + // + // If an error handler was provided, it will be called for each pending async + // publish and PublishAsyncComplete will be closed. + // + // After completing JetStreamContext is still usable - internal subscription + // will be recreated on next publish, but the acks from previous publishes will + // be lost. + CleanupPublisher() + } + + // StreamManager provides CRUD API for managing streams. It is available as + // a part of [JetStream] interface. CreateStream, UpdateStream, + // CreateOrUpdateStream and Stream methods return a [Stream] interface, allowing + // to operate on a stream. + StreamManager interface { + // CreateStream creates a new stream with given config and returns an + // interface to operate on it. If stream with given name already exists + // and its configuration differs from the provided one, + // ErrStreamNameAlreadyInUse is returned. + CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) + + // UpdateStream updates an existing stream. If stream does not exist, + // ErrStreamNotFound is returned. + UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) + + // CreateOrUpdateStream creates a stream with given config. If stream + // already exists, it will be updated (if possible). + CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) + + // Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. + // If stream does not exist, ErrStreamNotFound is returned. + Stream(ctx context.Context, stream string) (Stream, error) + + // StreamNameBySubject returns a stream name stream listening on given + // subject. If no stream is bound to given subject, ErrStreamNotFound + // is returned. + StreamNameBySubject(ctx context.Context, subject string) (string, error) + + // DeleteStream removes a stream with given name. If stream does not + // exist, ErrStreamNotFound is returned. + DeleteStream(ctx context.Context, stream string) error + + // ListStreams returns StreamInfoLister, enabling iterating over a + // channel of stream infos. + ListStreams(context.Context, ...StreamListOpt) StreamInfoLister + + // StreamNames returns a StreamNameLister, enabling iterating over a + // channel of stream names. + StreamNames(context.Context, ...StreamListOpt) StreamNameLister + } + + // StreamConsumerManager provides CRUD API for managing consumers. It is + // available as a part of [JetStream] interface. This is an alternative to + // [Stream] interface, allowing to bypass stream lookup. CreateConsumer, + // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a + // [Consumer] interface, allowing to operate on a consumer (e.g. consume + // messages). + StreamConsumerManager interface { + // CreateOrUpdateConsumer creates a consumer on a given stream with + // given config. If consumer already exists, it will be updated (if + // possible). Consumer interface is returned, allowing to operate on a + // consumer (e.g. fetch messages). + CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) + + // CreateConsumer creates a consumer on a given stream with given + // config. If consumer already exists and the provided configuration + // differs from its configuration, ErrConsumerExists is returned. If the + // provided configuration is the same as the existing consumer, the + // existing consumer is returned. Consumer interface is returned, + // allowing to operate on a consumer (e.g. fetch messages). + CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) + + // UpdateConsumer updates an existing consumer. If consumer does not + // exist, ErrConsumerDoesNotExist is returned. Consumer interface is + // returned, allowing to operate on a consumer (e.g. fetch messages). + UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) + + // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer + // are managed by the library and provide a simple way to consume + // messages from a stream. Ordered consumers are ephemeral in-memory + // pull consumers and are resilient to deletes and restarts. + OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) + + // Consumer returns an interface to an existing consumer, allowing processing + // of messages. If consumer does not exist, ErrConsumerNotFound is + // returned. + Consumer(ctx context.Context, stream string, consumer string) (Consumer, error) + + // DeleteConsumer removes a consumer with given name from a stream. + // If consumer does not exist, ErrConsumerNotFound is returned. + DeleteConsumer(ctx context.Context, stream string, consumer string) error + + // PauseConsumer pauses a consumer until the given time. + PauseConsumer(ctx context.Context, stream string, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) + + // ResumeConsumer resumes a paused consumer. + ResumeConsumer(ctx context.Context, stream string, consumer string) (*ConsumerPauseResponse, error) + } + + // StreamListOpt is a functional option for [StreamManager.ListStreams] and + // [StreamManager.StreamNames] methods. + StreamListOpt func(*streamsRequest) error + + // AccountInfo contains information about the JetStream usage from the + // current account. + AccountInfo struct { + // Tier is the current account usage tier. + Tier + + // Domain is the domain name associated with this account. + Domain string `json:"domain"` + + // API is the API usage statistics for this account. + API APIStats `json:"api"` + + // Tiers is the list of available tiers for this account. + Tiers map[string]Tier `json:"tiers"` + } + + // Tier represents a JetStream account usage tier. + Tier struct { + // Memory is the memory storage being used for Stream Message storage. + Memory uint64 `json:"memory"` + + // Store is the disk storage being used for Stream Message storage. + Store uint64 `json:"storage"` + + // ReservedMemory is the number of bytes reserved for memory usage by + // this account on the server + ReservedMemory uint64 `json:"reserved_memory"` + + // ReservedStore is the number of bytes reserved for disk usage by this + // account on the server + ReservedStore uint64 `json:"reserved_storage"` + + // Streams is the number of streams currently defined for this account. + Streams int `json:"streams"` + + // Consumers is the number of consumers currently defined for this + // account. + Consumers int `json:"consumers"` + + // Limits are the JetStream limits for this account. + Limits AccountLimits `json:"limits"` + } + + // APIStats reports on API calls to JetStream for this account. + APIStats struct { + // Level is the API level for this account. + Level int `json:"level"` + + // Total is the total number of API calls. + Total uint64 `json:"total"` + + // Errors is the total number of API errors. + Errors uint64 `json:"errors"` + + // Inflight is the number of API calls currently in flight. + Inflight uint64 `json:"inflight,omitempty"` + } + + // AccountLimits includes the JetStream limits of the current account. + AccountLimits struct { + // MaxMemory is the maximum amount of memory available for this account. + MaxMemory int64 `json:"max_memory"` + + // MaxStore is the maximum amount of disk storage available for this + // account. + MaxStore int64 `json:"max_storage"` + + // MaxStreams is the maximum number of streams allowed for this account. + MaxStreams int `json:"max_streams"` + + // MaxConsumers is the maximum number of consumers allowed for this + // account. + MaxConsumers int `json:"max_consumers"` + } + + jetStream struct { + conn *nats.Conn + opts JetStreamOptions + + publisher *jetStreamClient + } + + // JetStreamOpt is a functional option for [New], [NewWithAPIPrefix] and + // [NewWithDomain] methods. + JetStreamOpt func(*JetStreamOptions) error + + // JetStreamOptions are used to configure JetStream. + JetStreamOptions struct { + // APIPrefix is the prefix used for JetStream API requests. + APIPrefix string + + // Domain is the domain name token used when sending JetStream requests. + Domain string + + // DefaultTimeout is the default timeout used for JetStream API requests. + // This applies when the context passed to JetStream methods does not have + // a deadline set. + DefaultTimeout time.Duration + + publisherOpts asyncPublisherOpts + + // this is the actual prefix used in the API requests + // it is either APIPrefix or a domain specific prefix + apiPrefix string + replyPrefix string + replyPrefixLen int + clientTrace *ClientTrace + } + + // ClientTrace can be used to trace API interactions for [JetStream]. + ClientTrace struct { + // RequestSent is called when an API request is sent to the server. + RequestSent func(subj string, payload []byte) + + // ResponseReceived is called when a response is received from the + // server. + ResponseReceived func(subj string, payload []byte, hdr nats.Header) + } + streamInfoResponse struct { + apiResponse + apiPaged + *StreamInfo + } + + accountInfoResponse struct { + apiResponse + AccountInfo + } + + streamDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + } + + // StreamInfoLister is used to iterate over a channel of stream infos. + // Err method can be used to check for errors encountered during iteration. + // Info channel is always closed and therefore can be used in a range loop. + StreamInfoLister interface { + Info() <-chan *StreamInfo + Err() error + } + + // StreamNameLister is used to iterate over a channel of stream names. + // Err method can be used to check for errors encountered during iteration. + // Name channel is always closed and therefore can be used in a range loop. + StreamNameLister interface { + Name() <-chan string + Err() error + } + + apiPagedRequest struct { + Offset int `json:"offset"` + } + + streamLister struct { + js *jetStream + offset int + pageInfo *apiPaged + + streams chan *StreamInfo + names chan string + err error + } + + streamListResponse struct { + apiResponse + apiPaged + Streams []*StreamInfo `json:"streams"` + } + + streamNamesResponse struct { + apiResponse + apiPaged + Streams []string `json:"streams"` + } + + streamsRequest struct { + apiPagedRequest + Subject string `json:"subject,omitempty"` + } +) + +// defaultAPITimeout is used if context.Background() or context.TODO() is passed to API calls. +const defaultAPITimeout = 5 * time.Second + +var subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`) + +// New returns a new JetStream instance. +// It uses default API prefix ($JS.API) for JetStream API requests. +// If a custom API prefix is required, use [NewWithAPIPrefix] or [NewWithDomain]. +// +// Available options: +// - [WithClientTrace] - enables request/response tracing. +// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. +// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes +// that can be inflight at one time. +func New(nc *nats.Conn, opts ...JetStreamOpt) (JetStream, error) { + jsOpts := JetStreamOptions{ + apiPrefix: DefaultAPIPrefix, + publisherOpts: asyncPublisherOpts{ + maxpa: defaultAsyncPubAckInflight, + }, + DefaultTimeout: defaultAPITimeout, + } + setReplyPrefix(nc, &jsOpts) + for _, opt := range opts { + if err := opt(&jsOpts); err != nil { + return nil, err + } + } + js := &jetStream{ + conn: nc, + opts: jsOpts, + publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, + } + + return js, nil +} + +const ( + // defaultAsyncPubAckInflight is the number of async pub acks inflight. + defaultAsyncPubAckInflight = 4000 +) + +func setReplyPrefix(nc *nats.Conn, jsOpts *JetStreamOptions) { + jsOpts.replyPrefix = nats.InboxPrefix + if nc.Opts.InboxPrefix != "" { + jsOpts.replyPrefix = nc.Opts.InboxPrefix + "." + } + // Add 1 for the dot separator. + jsOpts.replyPrefixLen = len(jsOpts.replyPrefix) + aReplyTokensize + 1 + +} + +// NewWithAPIPrefix returns a new JetStream instance and sets the API prefix to be used in requests to JetStream API. +// The API prefix will be used in API requests to JetStream, e.g. .STREAM.INFO.. +// +// Available options: +// - [WithClientTrace] - enables request/response tracing. +// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. +// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes +// that can be inflight at one time. +func NewWithAPIPrefix(nc *nats.Conn, apiPrefix string, opts ...JetStreamOpt) (JetStream, error) { + jsOpts := JetStreamOptions{ + publisherOpts: asyncPublisherOpts{ + maxpa: defaultAsyncPubAckInflight, + }, + APIPrefix: apiPrefix, + DefaultTimeout: defaultAPITimeout, + } + setReplyPrefix(nc, &jsOpts) + for _, opt := range opts { + if err := opt(&jsOpts); err != nil { + return nil, err + } + } + if apiPrefix == "" { + return nil, errors.New("API prefix cannot be empty") + } + if !strings.HasSuffix(apiPrefix, ".") { + jsOpts.apiPrefix = fmt.Sprintf("%s.", apiPrefix) + } else { + jsOpts.apiPrefix = apiPrefix + } + js := &jetStream{ + conn: nc, + opts: jsOpts, + publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, + } + return js, nil +} + +// NewWithDomain returns a new JetStream instance and sets the domain name token used when sending JetStream requests. +// The domain name token will be used in API requests to JetStream, e.g. $JS..API.STREAM.INFO.. +// +// Available options: +// - [WithClientTrace] - enables request/response tracing. +// - [WithPublishAsyncErrHandler] - sets error handler for async message publish. +// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes +// that can be inflight at one time. +func NewWithDomain(nc *nats.Conn, domain string, opts ...JetStreamOpt) (JetStream, error) { + jsOpts := JetStreamOptions{ + publisherOpts: asyncPublisherOpts{ + maxpa: defaultAsyncPubAckInflight, + }, + Domain: domain, + DefaultTimeout: defaultAPITimeout, + } + setReplyPrefix(nc, &jsOpts) + for _, opt := range opts { + if err := opt(&jsOpts); err != nil { + return nil, err + } + } + if domain == "" { + return nil, errors.New("domain cannot be empty") + } + jsOpts.apiPrefix = fmt.Sprintf(jsDomainT, domain) + js := &jetStream{ + conn: nc, + opts: jsOpts, + publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts}, + } + return js, nil +} + +// Conn returns the underlying NATS connection. +func (js *jetStream) Conn() *nats.Conn { + return js.conn +} + +func (js *jetStream) Options() JetStreamOptions { + return js.opts +} + +// CreateStream creates a new stream with given config and returns an +// interface to operate on it. If stream with given name already exists, +// ErrStreamNameAlreadyInUse is returned. +func (js *jetStream) CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { + if err := validateStreamName(cfg.Name); err != nil { + return nil, err + } + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + ncfg := cfg + // If we have a mirror and an external domain, convert to ext.APIPrefix. + if ncfg.Mirror != nil && ncfg.Mirror.Domain != "" { + // Copy so we do not change the caller's version. + ncfg.Mirror = ncfg.Mirror.copy() + if err := ncfg.Mirror.convertDomain(); err != nil { + return nil, err + } + } + + // Check sources for the same. + if len(ncfg.Sources) > 0 { + ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) + for i, ss := range ncfg.Sources { + if ss.Domain != "" { + ncfg.Sources[i] = ss.copy() + if err := ncfg.Sources[i].convertDomain(); err != nil { + return nil, err + } + } + } + } + + req, err := json.Marshal(ncfg) + if err != nil { + return nil, err + } + + createSubject := fmt.Sprintf(apiStreamCreateT, cfg.Name) + var resp streamInfoResponse + + if _, err = js.apiRequestJSON(ctx, createSubject, &resp, req); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNameInUse { + return nil, ErrStreamNameAlreadyInUse + } + return nil, resp.Error + } + + // check that input subject transform (if used) is reflected in the returned StreamInfo + if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { + return nil, ErrStreamSubjectTransformNotSupported + } + + if len(cfg.Sources) != 0 { + if len(cfg.Sources) != len(resp.Config.Sources) { + return nil, ErrStreamSourceNotSupported + } + for i := range cfg.Sources { + if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { + return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported + } + } + } + + return &stream{ + js: js, + name: cfg.Name, + info: resp.StreamInfo, + }, nil +} + +// If we have a Domain, convert to the appropriate ext.APIPrefix. +// This will change the stream source, so should be a copy passed in. +func (ss *StreamSource) convertDomain() error { + if ss.Domain == "" { + return nil + } + if ss.External != nil { + return errors.New("nats: domain and external are both set") + } + ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} + return nil +} + +// Helper for copying when we do not want to change user's version. +func (ss *StreamSource) copy() *StreamSource { + nss := *ss + // Check pointers + if ss.OptStartTime != nil { + t := *ss.OptStartTime + nss.OptStartTime = &t + } + if ss.External != nil { + ext := *ss.External + nss.External = &ext + } + return &nss +} + +// UpdateStream updates an existing stream. If stream does not exist, +// ErrStreamNotFound is returned. +func (js *jetStream) UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { + if err := validateStreamName(cfg.Name); err != nil { + return nil, err + } + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + + req, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + updateSubject := fmt.Sprintf(apiStreamUpdateT, cfg.Name) + var resp streamInfoResponse + + if _, err = js.apiRequestJSON(ctx, updateSubject, &resp, req); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNotFound { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + // check that input subject transform (if used) is reflected in the returned StreamInfo + if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil { + return nil, ErrStreamSubjectTransformNotSupported + } + + if len(cfg.Sources) != 0 { + if len(cfg.Sources) != len(resp.Config.Sources) { + return nil, ErrStreamSourceNotSupported + } + for i := range cfg.Sources { + if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 { + return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported + } + } + } + + return &stream{ + js: js, + name: cfg.Name, + info: resp.StreamInfo, + }, nil +} + +// CreateOrUpdateStream creates a stream with given config. If stream +// already exists, it will be updated (if possible). +func (js *jetStream) CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) { + s, err := js.UpdateStream(ctx, cfg) + if err != nil { + if !errors.Is(err, ErrStreamNotFound) { + return nil, err + } + return js.CreateStream(ctx, cfg) + } + + return s, nil +} + +// Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name. +// If stream does not exist, ErrStreamNotFound is returned. +func (js *jetStream) Stream(ctx context.Context, name string) (Stream, error) { + if err := validateStreamName(name); err != nil { + return nil, err + } + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + infoSubject := fmt.Sprintf(apiStreamInfoT, name) + + var resp streamInfoResponse + + if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNotFound { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + return &stream{ + js: js, + name: name, + info: resp.StreamInfo, + }, nil +} + +// DeleteStream removes a stream with given name +func (js *jetStream) DeleteStream(ctx context.Context, name string) error { + if err := validateStreamName(name); err != nil { + return err + } + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + deleteSubject := fmt.Sprintf(apiStreamDeleteT, name) + var resp streamDeleteResponse + + if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil { + return err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNotFound { + return ErrStreamNotFound + } + return resp.Error + } + return nil +} + +// CreateOrUpdateConsumer creates a consumer on a given stream with +// given config. If consumer already exists, it will be updated (if +// possible). Consumer interface is returned, allowing to operate on a +// consumer (e.g. fetch messages). +func (js *jetStream) CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return upsertConsumer(ctx, js, stream, cfg, consumerActionCreateOrUpdate) +} + +// CreateConsumer creates a consumer on a given stream with given +// config. If consumer already exists and the provided configuration +// differs from its configuration, ErrConsumerExists is returned. If the +// provided configuration is the same as the existing consumer, the +// existing consumer is returned. Consumer interface is returned, +// allowing to operate on a consumer (e.g. fetch messages). +func (js *jetStream) CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return upsertConsumer(ctx, js, stream, cfg, consumerActionCreate) +} + +// UpdateConsumer updates an existing consumer. If consumer does not +// exist, ErrConsumerDoesNotExist is returned. Consumer interface is +// returned, allowing to operate on a consumer (e.g. fetch messages). +func (js *jetStream) UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return upsertConsumer(ctx, js, stream, cfg, consumerActionUpdate) +} + +// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer +// are managed by the library and provide a simple way to consume +// messages from a stream. Ordered consumers are ephemeral in-memory +// pull consumers and are resilient to deletes and restarts. +func (js *jetStream) OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + oc := &orderedConsumer{ + js: js, + cfg: &cfg, + stream: stream, + namePrefix: nuid.Next(), + doReset: make(chan struct{}, 1), + } + consCfg := oc.getConsumerConfig() + cons, err := js.CreateOrUpdateConsumer(ctx, stream, *consCfg) + if err != nil { + return nil, err + } + oc.currentConsumer = cons.(*pullConsumer) + + return oc, nil +} + +// Consumer returns an interface to an existing consumer, allowing processing +// of messages. If consumer does not exist, ErrConsumerNotFound is +// returned. +func (js *jetStream) Consumer(ctx context.Context, stream string, name string) (Consumer, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return getConsumer(ctx, js, stream, name) +} + +// DeleteConsumer removes a consumer with given name from a stream. +// If consumer does not exist, ErrConsumerNotFound is returned. +func (js *jetStream) DeleteConsumer(ctx context.Context, stream string, name string) error { + if err := validateStreamName(stream); err != nil { + return err + } + return deleteConsumer(ctx, js, stream, name) +} + +func (js *jetStream) PauseConsumer(ctx context.Context, stream string, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return pauseConsumer(ctx, js, stream, consumer, &pauseUntil) +} + +func (js *jetStream) ResumeConsumer(ctx context.Context, stream string, consumer string) (*ConsumerPauseResponse, error) { + if err := validateStreamName(stream); err != nil { + return nil, err + } + return resumeConsumer(ctx, js, stream, consumer) +} + +func validateStreamName(stream string) error { + if stream == "" { + return ErrStreamNameRequired + } + if strings.ContainsAny(stream, ">*. /\\") { + return fmt.Errorf("%w: '%s'", ErrInvalidStreamName, stream) + } + return nil +} + +func validateSubject(subject string) error { + if subject == "" { + return fmt.Errorf("%w: %s", ErrInvalidSubject, "subject cannot be empty") + } + if subject[0] == '.' || subject[len(subject)-1] == '.' || !subjectRegexp.MatchString(subject) { + return fmt.Errorf("%w: %s", ErrInvalidSubject, subject) + } + return nil +} + +// AccountInfo fetches account information from the server, containing details +// about the account associated with this JetStream connection. If account is +// not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. +// +// If the server does not have JetStream enabled, ErrJetStreamNotEnabled is +// returned (for a single server setup). For clustered topologies, AccountInfo +// will time out. +func (js *jetStream) AccountInfo(ctx context.Context) (*AccountInfo, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + var resp accountInfoResponse + + if _, err := js.apiRequestJSON(ctx, apiAccountInfo, &resp); err != nil { + if errors.Is(err, nats.ErrNoResponders) { + return nil, ErrJetStreamNotEnabled + } + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabledForAccount { + return nil, ErrJetStreamNotEnabledForAccount + } + if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabled { + return nil, ErrJetStreamNotEnabled + } + return nil, resp.Error + } + + return &resp.AccountInfo, nil +} + +// ListStreams returns StreamInfoLister, enabling iterating over a +// channel of stream infos. +func (js *jetStream) ListStreams(ctx context.Context, opts ...StreamListOpt) StreamInfoLister { + l := &streamLister{ + js: js, + streams: make(chan *StreamInfo), + } + var streamsReq streamsRequest + for _, opt := range opts { + if err := opt(&streamsReq); err != nil { + l.err = err + close(l.streams) + return l + } + } + go func() { + defer close(l.streams) + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + for { + page, err := l.streamInfos(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + l.err = err + return + } + for _, info := range page { + select { + case l.streams <- info: + case <-ctx.Done(): + l.err = ctx.Err() + return + } + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return l +} + +// Info returns a channel allowing retrieval of stream infos returned by [ListStreams] +func (s *streamLister) Info() <-chan *StreamInfo { + return s.streams +} + +// Err returns an error channel which will be populated with error from [ListStreams] or [StreamNames] request +func (s *streamLister) Err() error { + return s.err +} + +// StreamNames returns a StreamNameLister, enabling iterating over a +// channel of stream names. +func (js *jetStream) StreamNames(ctx context.Context, opts ...StreamListOpt) StreamNameLister { + l := &streamLister{ + js: js, + names: make(chan string), + } + var streamsReq streamsRequest + for _, opt := range opts { + if err := opt(&streamsReq); err != nil { + l.err = err + close(l.names) + return l + } + } + go func() { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + defer close(l.names) + for { + page, err := l.streamNames(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + l.err = err + return + } + for _, info := range page { + select { + case l.names <- info: + case <-ctx.Done(): + l.err = ctx.Err() + return + } + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return l +} + +// StreamNameBySubject returns a stream name stream listening on given +// subject. If no stream is bound to given subject, ErrStreamNotFound +// is returned. +func (js *jetStream) StreamNameBySubject(ctx context.Context, subject string) (string, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + if err := validateSubject(subject); err != nil { + return "", err + } + + r := &streamsRequest{Subject: subject} + req, err := json.Marshal(r) + if err != nil { + return "", err + } + var resp streamNamesResponse + _, err = js.apiRequestJSON(ctx, apiStreams, &resp, req) + if err != nil { + return "", err + } + if resp.Error != nil { + return "", resp.Error + } + if len(resp.Streams) == 0 { + return "", ErrStreamNotFound + } + + return resp.Streams[0], nil +} + +// Name returns a channel allowing retrieval of stream names returned by [StreamNames] +func (s *streamLister) Name() <-chan string { + return s.names +} + +// infos fetches the next [StreamInfo] page +func (s *streamLister) streamInfos(ctx context.Context, streamsReq streamsRequest) ([]*StreamInfo, error) { + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return nil, ErrEndOfData + } + + req := streamsRequest{ + apiPagedRequest: apiPagedRequest{ + Offset: s.offset, + }, + Subject: streamsReq.Subject, + } + reqJSON, err := json.Marshal(req) + if err != nil { + return nil, err + } + + var resp streamListResponse + _, err = s.js.apiRequestJSON(ctx, apiStreamListT, &resp, reqJSON) + if err != nil { + return nil, err + } + if resp.Error != nil { + return nil, resp.Error + } + + s.pageInfo = &resp.apiPaged + s.offset += len(resp.Streams) + return resp.Streams, nil +} + +// streamNames fetches the next stream names page +func (s *streamLister) streamNames(ctx context.Context, streamsReq streamsRequest) ([]string, error) { + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return nil, ErrEndOfData + } + + req := streamsRequest{ + apiPagedRequest: apiPagedRequest{ + Offset: s.offset, + }, + Subject: streamsReq.Subject, + } + reqJSON, err := json.Marshal(req) + if err != nil { + return nil, err + } + + var resp streamNamesResponse + _, err = s.js.apiRequestJSON(ctx, apiStreams, &resp, reqJSON) + if err != nil { + return nil, err + } + if resp.Error != nil { + return nil, resp.Error + } + + s.pageInfo = &resp.apiPaged + s.offset += len(resp.Streams) + return resp.Streams, nil +} + +// wrapContextWithoutDeadline wraps context without deadline with default timeout. +// If deadline is already set, it will be returned as is, and cancel() will be nil. +// Caller should check if cancel() is nil before calling it. +func (js *jetStream) wrapContextWithoutDeadline(ctx context.Context) (context.Context, context.CancelFunc) { + if _, ok := ctx.Deadline(); ok { + return ctx, nil + } + return context.WithTimeout(ctx, js.opts.DefaultTimeout) +} + +// CleanupPublisher will cleanup the publishing side of JetStreamContext. +// +// This will unsubscribe from the internal reply subject if needed. +// All pending async publishes will fail with ErrJetStreamContextClosed. +// +// If an error handler was provided, it will be called for each pending async +// publish and PublishAsyncComplete will be closed. +// +// After completing JetStreamContext is still usable - internal subscription +// will be recreated on next publish, but the acks from previous publishes will +// be lost. +func (js *jetStream) CleanupPublisher() { + js.cleanupReplySub() + js.publisher.Lock() + errCb := js.publisher.aecb + for id, paf := range js.publisher.acks { + paf.err = ErrJetStreamPublisherClosed + if paf.errCh != nil { + paf.errCh <- paf.err + } + if errCb != nil { + // call error handler after releasing the mutex to avoid contention + defer errCb(js, paf.msg, ErrJetStreamPublisherClosed) + } + delete(js.publisher.acks, id) + } + if js.publisher.doneCh != nil { + close(js.publisher.doneCh) + js.publisher.doneCh = nil + } + js.publisher.Unlock() +} + +func (js *jetStream) cleanupReplySub() { + if js.publisher == nil { + return + } + js.publisher.Lock() + if js.publisher.replySub != nil { + js.publisher.replySub.Unsubscribe() + js.publisher.replySub = nil + } + if js.publisher.connStatusCh != nil { + close(js.publisher.connStatusCh) + js.publisher.connStatusCh = nil + } + js.publisher.Unlock() +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go b/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go new file mode 100644 index 000000000..1baa0e42c --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go @@ -0,0 +1,630 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "fmt" + "time" +) + +type pullOptFunc func(*consumeOpts) error + +func (fn pullOptFunc) configureConsume(opts *consumeOpts) error { + return fn(opts) +} + +func (fn pullOptFunc) configureMessages(opts *consumeOpts) error { + return fn(opts) +} + +// WithClientTrace enables request/response API calls tracing. +func WithClientTrace(ct *ClientTrace) JetStreamOpt { + return func(opts *JetStreamOptions) error { + opts.clientTrace = ct + return nil + } +} + +// WithPublishAsyncErrHandler sets error handler for async message publish. +func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt { + return func(opts *JetStreamOptions) error { + opts.publisherOpts.aecb = cb + return nil + } +} + +// WithPublishAsyncMaxPending sets the maximum outstanding async publishes that +// can be inflight at one time. +func WithPublishAsyncMaxPending(max int) JetStreamOpt { + return func(opts *JetStreamOptions) error { + if max < 1 { + return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption) + } + opts.publisherOpts.maxpa = max + return nil + } +} + +// WithPublishAsyncTimeout sets the timeout for async message publish. +// If not provided, timeout is disabled. +func WithPublishAsyncTimeout(dur time.Duration) JetStreamOpt { + return func(opts *JetStreamOptions) error { + opts.publisherOpts.ackTimeout = dur + return nil + } +} + +// WithDefaultTimeout sets the default timeout for JetStream API requests. +// It is used when context used for the request does not have a deadline set. +// If not provided, a default of 5 seconds will be used. +func WithDefaultTimeout(timeout time.Duration) JetStreamOpt { + return func(opts *JetStreamOptions) error { + if timeout <= 0 { + return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) + } + opts.DefaultTimeout = timeout + return nil + } +} + +// WithPurgeSubject sets a specific subject for which messages on a stream will +// be purged +func WithPurgeSubject(subject string) StreamPurgeOpt { + return func(req *StreamPurgeRequest) error { + req.Subject = subject + return nil + } +} + +// WithPurgeSequence is used to set a specific sequence number up to which (but +// not including) messages will be purged from a stream Can be combined with +// [WithPurgeSubject] option, but not with [WithPurgeKeep] +func WithPurgeSequence(sequence uint64) StreamPurgeOpt { + return func(req *StreamPurgeRequest) error { + if req.Keep != 0 { + return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) + } + req.Sequence = sequence + return nil + } +} + +// WithPurgeKeep sets the number of messages to be kept in the stream after +// purge. Can be combined with [WithPurgeSubject] option, but not with +// [WithPurgeSequence] +func WithPurgeKeep(keep uint64) StreamPurgeOpt { + return func(req *StreamPurgeRequest) error { + if req.Sequence != 0 { + return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption) + } + req.Keep = keep + return nil + } +} + +// WithGetMsgSubject sets the stream subject from which the message should be +// retrieved. Server will return a first message with a seq >= to the input seq +// that has the specified subject. +func WithGetMsgSubject(subject string) GetMsgOpt { + return func(req *apiMsgGetRequest) error { + req.NextFor = subject + return nil + } +} + +// PullMaxMessages limits the number of messages to be buffered in the client. +// If not provided, a default of 500 messages will be used. +// This option is exclusive with PullMaxBytes. +// +// PullMaxMessages implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullMaxMessages int + +func (max PullMaxMessages) configureConsume(opts *consumeOpts) error { + if max <= 0 { + return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) + } + opts.MaxMessages = int(max) + return nil +} + +func (max PullMaxMessages) configureMessages(opts *consumeOpts) error { + if max <= 0 { + return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) + } + opts.MaxMessages = int(max) + return nil +} + +type pullMaxMessagesWithBytesLimit struct { + maxMessages int + maxBytes int +} + +// PullMaxMessagesWithBytesLimit limits the number of messages to be buffered +// in the client. Additionally, it sets the maximum size a single fetch request +// can have. Note that this will not limit the total size of messages buffered +// in the client, but rather can serve as a way to limit what nats server will +// have to internally buffer for a single fetch request. +// +// The byte limit should never be set to a value lower than the maximum message +// size that can be expected from the server. If the byte limit is lower than +// the maximum message size, the consumer will stall and not be able to consume +// messages. +// +// This is an advanced option and should be used with caution. Most users should +// use [PullMaxMessages] or [PullMaxBytes] instead. +// +// PullMaxMessagesWithBytesLimit implements both PullConsumeOpt and +// PullMessagesOpt, allowing it to configure Consumer.Consume and Consumer.Messages. +func PullMaxMessagesWithBytesLimit(maxMessages, byteLimit int) pullMaxMessagesWithBytesLimit { + return pullMaxMessagesWithBytesLimit{maxMessages, byteLimit} +} + +func (m pullMaxMessagesWithBytesLimit) configureConsume(opts *consumeOpts) error { + if m.maxMessages <= 0 { + return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) + } + if m.maxBytes <= 0 { + return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption) + } + if opts.MaxMessages > 0 { + return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption) + } + opts.MaxMessages = m.maxMessages + opts.MaxBytes = m.maxBytes + opts.LimitSize = true + + return nil +} + +func (m pullMaxMessagesWithBytesLimit) configureMessages(opts *consumeOpts) error { + if m.maxMessages <= 0 { + return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption) + } + if m.maxBytes <= 0 { + return fmt.Errorf("%w: maxBytes size must be at least 1", ErrInvalidOption) + } + if opts.MaxMessages > 0 { + return fmt.Errorf("%w: maxMessages already set", ErrInvalidOption) + } + opts.MaxMessages = m.maxMessages + opts.MaxBytes = m.maxBytes + opts.LimitSize = true + + return nil +} + +// PullExpiry sets timeout on a single pull request, waiting until at least one +// message is available. +// If not provided, a default of 30 seconds will be used. +// +// PullExpiry implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullExpiry time.Duration + +func (exp PullExpiry) configureConsume(opts *consumeOpts) error { + expiry := time.Duration(exp) + if expiry < time.Second { + return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) + } + opts.Expires = expiry + return nil +} + +func (exp PullExpiry) configureMessages(opts *consumeOpts) error { + expiry := time.Duration(exp) + if expiry < time.Second { + return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption) + } + opts.Expires = expiry + return nil +} + +// PullMaxBytes limits the number of bytes to be buffered in the client. +// If not provided, the limit is not set (max messages will be used instead). +// This option is exclusive with PullMaxMessages. +// +// The value should be set to a high enough value to accommodate the largest +// message expected from the server. Note that it may not be sufficient to set +// this value to the maximum message size, as this setting controls the client +// buffer size, not the max bytes requested from the server within a single pull +// request. If the value is set too low, the consumer will stall and not be able +// to consume messages. +// +// PullMaxBytes implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullMaxBytes int + +func (max PullMaxBytes) configureConsume(opts *consumeOpts) error { + if max <= 0 { + return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) + } + opts.MaxBytes = int(max) + return nil +} + +func (max PullMaxBytes) configureMessages(opts *consumeOpts) error { + if max <= 0 { + return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption) + } + opts.MaxBytes = int(max) + return nil +} + +// PullThresholdMessages sets the message count on which consuming will trigger +// new pull request to the server. Defaults to 50% of MaxMessages. +// +// PullThresholdMessages implements both PullConsumeOpt and PullMessagesOpt, +// allowing it to configure Consumer.Consume and Consumer.Messages. +type PullThresholdMessages int + +func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error { + opts.ThresholdMessages = int(t) + return nil +} + +func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error { + opts.ThresholdMessages = int(t) + return nil +} + +// PullThresholdBytes sets the byte count on which consuming will trigger +// new pull request to the server. Defaults to 50% of MaxBytes (if set). +// +// PullThresholdBytes implements both PullConsumeOpt and PullMessagesOpt, +// allowing it to configure Consumer.Consume and Consumer.Messages. +type PullThresholdBytes int + +func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error { + opts.ThresholdBytes = int(t) + return nil +} + +func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error { + opts.ThresholdBytes = int(t) + return nil +} + +// PullMinPending sets the minimum number of messages that should be pending for +// a consumer with PriorityPolicyOverflow to be considered for delivery. +// If provided, PullPriorityGroup must be set as well and the consumer has to have +// PriorityPolicy set to PriorityPolicyOverflow. +// +// PullMinPending implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullMinPending int + +func (min PullMinPending) configureConsume(opts *consumeOpts) error { + if min < 1 { + return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) + } + opts.MinPending = int64(min) + return nil +} + +func (min PullMinPending) configureMessages(opts *consumeOpts) error { + if min < 1 { + return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) + } + opts.MinPending = int64(min) + return nil +} + +// PullMinAckPending sets the minimum number of pending acks that should be +// present for a consumer with PriorityPolicyOverflow to be considered for +// delivery. If provided, PullPriorityGroup must be set as well and the consumer +// has to have PriorityPolicy set to PriorityPolicyOverflow. +// +// PullMinAckPending implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullMinAckPending int + +func (min PullMinAckPending) configureConsume(opts *consumeOpts) error { + if min < 1 { + return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) + } + opts.MinAckPending = int64(min) + return nil +} + +func (min PullMinAckPending) configureMessages(opts *consumeOpts) error { + if min < 1 { + return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) + } + opts.MinAckPending = int64(min) + return nil +} + +// PullPriorityGroup sets the priority group for a consumer. +// It has to match one of the priority groups set on the consumer. +// +// PullPriorityGroup implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullPriorityGroup string + +func (g PullPriorityGroup) configureConsume(opts *consumeOpts) error { + opts.Group = string(g) + return nil +} + +func (g PullPriorityGroup) configureMessages(opts *consumeOpts) error { + opts.Group = string(g) + return nil +} + +// PullHeartbeat sets the idle heartbeat duration for a pull subscription +// If a client does not receive a heartbeat message from a stream for more +// than the idle heartbeat setting, the subscription will be removed +// and error will be passed to the message handler. +// If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds) +// +// PullHeartbeat implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type PullHeartbeat time.Duration + +func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error { + hbTime := time.Duration(hb) + if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { + return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) + } + opts.Heartbeat = hbTime + return nil +} + +func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error { + hbTime := time.Duration(hb) + if hbTime < 500*time.Millisecond || hbTime > 30*time.Second { + return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption) + } + opts.Heartbeat = hbTime + return nil +} + +// StopAfter sets the number of messages after which the consumer is +// automatically stopped and no more messages are pulled from the server. +// +// StopAfter implements both PullConsumeOpt and PullMessagesOpt, allowing +// it to configure Consumer.Consume and Consumer.Messages. +type StopAfter int + +func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error { + if nMsgs <= 0 { + return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) + } + opts.StopAfter = int(nMsgs) + return nil +} + +func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error { + if nMsgs <= 0 { + return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption) + } + opts.StopAfter = int(nMsgs) + return nil +} + +// ConsumeErrHandler sets custom error handler invoked when an error was +// encountered while consuming messages It will be invoked for both terminal +// (Consumer Deleted, invalid request body) and non-terminal (e.g. missing +// heartbeats) errors. +func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt { + return pullOptFunc(func(cfg *consumeOpts) error { + cfg.ErrHandler = cb + return nil + }) +} + +// WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error +// should be reported when calling [MessagesContext.Next] (Default: true). +func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt { + return pullOptFunc(func(cfg *consumeOpts) error { + cfg.ReportMissingHeartbeats = hbErr + return nil + }) +} + +// FetchMinPending sets the minimum number of messages that should be pending for +// a consumer with PriorityPolicyOverflow to be considered for delivery. +// If provided, FetchPriorityGroup must be set as well and the consumer has to have +// PriorityPolicy set to PriorityPolicyOverflow. +func FetchMinPending(min int64) FetchOpt { + return func(req *pullRequest) error { + if min < 1 { + return fmt.Errorf("%w: min pending should be more than 0", ErrInvalidOption) + } + req.MinPending = min + return nil + } +} + +// FetchMinAckPending sets the minimum number of pending acks that should be +// present for a consumer with PriorityPolicyOverflow to be considered for +// delivery. If provided, FetchPriorityGroup must be set as well and the consumer +// has to have PriorityPolicy set to PriorityPolicyOverflow. +func FetchMinAckPending(min int64) FetchOpt { + return func(req *pullRequest) error { + if min < 1 { + return fmt.Errorf("%w: min ack pending should be more than 0", ErrInvalidOption) + } + req.MinAckPending = min + return nil + } +} + +// FetchPriorityGroup sets the priority group for a consumer. +// It has to match one of the priority groups set on the consumer. +func FetchPriorityGroup(group string) FetchOpt { + return func(req *pullRequest) error { + req.Group = group + return nil + } +} + +// FetchMaxWait sets custom timeout for fetching predefined batch of messages. +// +// If not provided, a default of 30 seconds will be used. +func FetchMaxWait(timeout time.Duration) FetchOpt { + return func(req *pullRequest) error { + if timeout <= 0 { + return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) + } + req.Expires = timeout + return nil + } +} + +// FetchHeartbeat sets custom heartbeat for individual fetch request. If a +// client does not receive a heartbeat message from a stream for more than 2 +// times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat]. +// +// Heartbeat value has to be lower than FetchMaxWait / 2. +// +// If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s +// and disabled otherwise. +func FetchHeartbeat(hb time.Duration) FetchOpt { + return func(req *pullRequest) error { + if hb <= 0 { + return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption) + } + req.Heartbeat = hb + return nil + } +} + +// WithDeletedDetails can be used to display the information about messages +// deleted from a stream on a stream info request +func WithDeletedDetails(deletedDetails bool) StreamInfoOpt { + return func(req *streamInfoRequest) error { + req.DeletedDetails = deletedDetails + return nil + } +} + +// WithSubjectFilter can be used to display the information about messages +// stored on given subjects. +// NOTE: if the subject filter matches over 100k +// subjects, this will result in multiple requests to the server to retrieve all +// the information, and all of the returned subjects will be kept in memory. +func WithSubjectFilter(subject string) StreamInfoOpt { + return func(req *streamInfoRequest) error { + req.SubjectFilter = subject + return nil + } +} + +// WithStreamListSubject can be used to filter results of ListStreams and +// StreamNames requests to only streams that have given subject in their +// configuration. +func WithStreamListSubject(subject string) StreamListOpt { + return func(req *streamsRequest) error { + req.Subject = subject + return nil + } +} + +// WithMsgID sets the message ID used for deduplication. +func WithMsgID(id string) PublishOpt { + return func(opts *pubOpts) error { + opts.id = id + return nil + } +} + +// WithMsgTTL sets per msg TTL. +// Requires [StreamConfig.AllowMsgTTL] to be enabled. +func WithMsgTTL(dur time.Duration) PublishOpt { + return func(opts *pubOpts) error { + opts.ttl = dur + return nil + } +} + +// WithExpectStream sets the expected stream the message should be published to. +// If the message is published to a different stream server will reject the +// message and publish will fail. +func WithExpectStream(stream string) PublishOpt { + return func(opts *pubOpts) error { + opts.stream = stream + return nil + } +} + +// WithExpectLastSequence sets the expected sequence number the last message +// on a stream should have. If the last message has a different sequence number +// server will reject the message and publish will fail. +func WithExpectLastSequence(seq uint64) PublishOpt { + return func(opts *pubOpts) error { + opts.lastSeq = &seq + return nil + } +} + +// WithExpectLastSequencePerSubject sets the expected sequence number the last +// message on a subject the message is published to. If the last message on a +// subject has a different sequence number server will reject the message and +// publish will fail. +func WithExpectLastSequencePerSubject(seq uint64) PublishOpt { + return func(opts *pubOpts) error { + opts.lastSubjectSeq = &seq + return nil + } +} + +// WithExpectLastMsgID sets the expected message ID the last message on a stream +// should have. If the last message has a different message ID server will +// reject the message and publish will fail. +func WithExpectLastMsgID(id string) PublishOpt { + return func(opts *pubOpts) error { + opts.lastMsgID = id + return nil + } +} + +// WithRetryWait sets the retry wait time when ErrNoResponders is encountered. +// Defaults to 250ms. +func WithRetryWait(dur time.Duration) PublishOpt { + return func(opts *pubOpts) error { + if dur <= 0 { + return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption) + } + opts.retryWait = dur + return nil + } +} + +// WithRetryAttempts sets the retry number of attempts when ErrNoResponders is +// encountered. Defaults to 2 +func WithRetryAttempts(num int) PublishOpt { + return func(opts *pubOpts) error { + if num < 0 { + return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption) + } + opts.retryAttempts = num + return nil + } +} + +// WithStallWait sets the max wait when the producer becomes stall producing +// messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs +// is returned. +func WithStallWait(ttl time.Duration) PublishOpt { + return func(opts *pubOpts) error { + if ttl <= 0 { + return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption) + } + opts.stallWait = ttl + return nil + } +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/kv.go b/vendor/github.com/nats-io/nats.go/jetstream/kv.go new file mode 100644 index 000000000..f2c140502 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/kv.go @@ -0,0 +1,1523 @@ +// Copyright 2023-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/internal/parser" +) + +type ( + // KeyValueManager is used to manage KeyValue stores. It provides methods to + // create, delete, and retrieve KeyValue stores. + KeyValueManager interface { + // KeyValue will lookup and bind to an existing KeyValue store. + // + // If the KeyValue store with given name does not exist, + // ErrBucketNotFound will be returned. + KeyValue(ctx context.Context, bucket string) (KeyValue, error) + + // CreateKeyValue will create a KeyValue store with the given + // configuration. + // + // If a KeyValue store with the same name already exists and the + // configuration is different, ErrBucketExists will be returned. + CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) + + // UpdateKeyValue will update an existing KeyValue store with the given + // configuration. + // + // If a KeyValue store with the given name does not exist, ErrBucketNotFound + // will be returned. + UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) + + // CreateOrUpdateKeyValue will create a KeyValue store if it does not + // exist or update an existing KeyValue store with the given + // configuration (if possible). + CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) + + // DeleteKeyValue will delete this KeyValue store. + // + // If the KeyValue store with given name does not exist, + // ErrBucketNotFound will be returned. + DeleteKeyValue(ctx context.Context, bucket string) error + + // KeyValueStoreNames is used to retrieve a list of key value store + // names. It returns a KeyValueNamesLister exposing a channel to read + // the names from. The lister will always close the channel when done + // (either all names have been read or an error occurred) and therefore + // can be used in range loops. + KeyValueStoreNames(ctx context.Context) KeyValueNamesLister + + // KeyValueStores is used to retrieve a list of key value store + // statuses. It returns a KeyValueLister exposing a channel to read the + // statuses from. The lister will always close the channel when done + // (either all statuses have been read or an error occurred) and + // therefore can be used in range loops. + KeyValueStores(ctx context.Context) KeyValueLister + } + + // KeyValue contains methods to operate on a KeyValue store. + // Using the KeyValue interface, it is possible to: + // + // - Get, Put, Create, Update, Delete and Purge a key + // - Watch for updates to keys + // - List all keys + // - Retrieve historical values for a key + // - Retrieve status and configuration of a key value bucket + // - Purge all delete markers + // - Close the KeyValue store + KeyValue interface { + // Get returns the latest value for the key. If the key does not exist, + // ErrKeyNotFound will be returned. + Get(ctx context.Context, key string) (KeyValueEntry, error) + + // GetRevision returns a specific revision value for the key. If the key + // does not exist or the provided revision does not exists, + // ErrKeyNotFound will be returned. + GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) + + // Put will place the new value for the key into the store. If the key + // does not exist, it will be created. If the key exists, the value will + // be updated. + // + // A key has to consist of alphanumeric characters, dashes, underscores, + // equal signs, and dots. + Put(ctx context.Context, key string, value []byte) (uint64, error) + + // PutString will place the string for the key into the store. If the + // key does not exist, it will be created. If the key exists, the value + // will be updated. + // + // A key has to consist of alphanumeric characters, dashes, underscores, + // equal signs, and dots. + PutString(ctx context.Context, key string, value string) (uint64, error) + + // Create will add the key/value pair if it does not exist. If the key + // already exists, ErrKeyExists will be returned. + // + // A key has to consist of alphanumeric characters, dashes, underscores, + // equal signs, and dots. + Create(ctx context.Context, key string, value []byte, opts ...KVCreateOpt) (uint64, error) + + // Update will update the value if the latest revision matches. + // If the provided revision is not the latest, Update will return an error. + // Update also resets the TTL associated with the key (if any). + Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) + + // Delete will place a delete marker and leave all revisions. A history + // of a deleted key can still be retrieved by using the History method + // or a watch on the key. [Delete] is a non-destructive operation and + // will not remove any previous revisions from the underlying stream. + // + // [LastRevision] option can be specified to only perform delete if the + // latest revision the provided one. + Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error + + // Purge will place a delete marker and remove all previous revisions. + // Only the latest revision will be preserved (with a delete marker). + // Unlike [Delete], Purge is a destructive operation and will remove all + // previous revisions from the underlying streams. + // + // [LastRevision] option can be specified to only perform purge if the + // latest revision the provided one. + Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error + + // Watch for any updates to keys that match the keys argument which + // could include wildcards. By default, the watcher will send the latest + // value for each key and all future updates. Watch will send a nil + // entry when it has received all initial values. There are a few ways + // to configure the watcher: + // + // - IncludeHistory will have the key watcher send all historical values + // for each key (up to KeyValueMaxHistory). + // - IgnoreDeletes will have the key watcher not pass any keys with + // delete markers. + // - UpdatesOnly will have the key watcher only pass updates on values + // (without latest values when started). + // - MetaOnly will have the key watcher retrieve only the entry meta + // data, not the entry value. + // - ResumeFromRevision instructs the key watcher to resume from a + // specific revision number. + Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) + + // WatchAll will watch for any updates to all keys. It can be configured + // with the same options as Watch. + WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) + + // WatchFiltered will watch for any updates to keys that match the keys + // argument. It can be configured with the same options as Watch. + WatchFiltered(ctx context.Context, keys []string, opts ...WatchOpt) (KeyWatcher, error) + + // Keys will return all keys. + // Deprecated: Use ListKeys instead to avoid memory issues. + Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) + + // ListKeys will return KeyLister, allowing to retrieve all keys from + // the key value store in a streaming fashion (on a channel). + ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) + + // ListKeysFiltered ListKeysWithFilters returns a KeyLister for filtered keys in the bucket. + ListKeysFiltered(ctx context.Context, filters ...string) (KeyLister, error) + + // History will return all historical values for the key (up to + // KeyValueMaxHistory). + History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) + + // Bucket returns the KV store name. + Bucket() string + + // PurgeDeletes will remove all current delete markers. It can be + // configured using DeleteMarkersOlderThan option to only remove delete + // markers older than a certain duration. + // + // [PurgeDeletes] is a destructive operation and will remove all entries + // with delete markers from the underlying stream. + PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error + + // Status retrieves the status and configuration of a bucket. + Status(ctx context.Context) (KeyValueStatus, error) + } + + // KeyValueConfig is the configuration for a KeyValue store. + KeyValueConfig struct { + // Bucket is the name of the KeyValue store. Bucket name has to be + // unique and can only contain alphanumeric characters, dashes, and + // underscores. + Bucket string `json:"bucket"` + + // Description is an optional description for the KeyValue store. + Description string `json:"description,omitempty"` + + // MaxValueSize is the maximum size of a value in bytes. If not + // specified, the default is -1 (unlimited). + MaxValueSize int32 `json:"max_value_size,omitempty"` + + // History is the number of historical values to keep per key. If not + // specified, the default is 1. Max is 64. + History uint8 `json:"history,omitempty"` + + // TTL is the expiry time for keys. By default, keys do not expire. + TTL time.Duration `json:"ttl,omitempty"` + + // MaxBytes is the maximum size in bytes of the KeyValue store. If not + // specified, the default is -1 (unlimited). + MaxBytes int64 `json:"max_bytes,omitempty"` + + // Storage is the type of storage to use for the KeyValue store. If not + // specified, the default is FileStorage. + Storage StorageType `json:"storage,omitempty"` + + // Replicas is the number of replicas to keep for the KeyValue store in + // clustered jetstream. Defaults to 1, maximum is 5. + Replicas int `json:"num_replicas,omitempty"` + + // Placement is used to declare where the stream should be placed via + // tags and/or an explicit cluster name. + Placement *Placement `json:"placement,omitempty"` + + // RePublish allows immediate republishing a message to the configured + // subject after it's stored. + RePublish *RePublish `json:"republish,omitempty"` + + // Mirror defines the consiguration for mirroring another KeyValue + // store. + Mirror *StreamSource `json:"mirror,omitempty"` + + // Sources defines the configuration for sources of a KeyValue store. + Sources []*StreamSource `json:"sources,omitempty"` + + // Compression sets the underlying stream compression. + // NOTE: Compression is supported for nats-server 2.10.0+ + Compression bool `json:"compression,omitempty"` + + // LimitMarkerTTL is how long the bucket keeps markers when keys are + // removed by the TTL setting. + LimitMarkerTTL time.Duration + } + + // KeyLister is used to retrieve a list of key value store keys. It returns + // a channel to read the keys from. The lister will always close the channel + // when done (either all keys have been read or an error occurred) and + // therefore can be used in range loops. Stop can be used to stop the lister + // when not all keys have been read. + KeyLister interface { + Keys() <-chan string + Stop() error + } + + // KeyValueLister is used to retrieve a list of key value stores. It returns + // a channel to read the KV store statuses from. The lister will always + // close the channel when done (either all stores have been retrieved or an + // error occurred) and therefore can be used in range loops. Stop can be + // used to stop the lister when not all KeyValue stores have been read. + KeyValueLister interface { + Status() <-chan KeyValueStatus + Error() error + } + + // KeyValueNamesLister is used to retrieve a list of key value store names. + // It returns a channel to read the KV bucket names from. The lister will + // always close the channel when done (either all stores have been retrieved + // or an error occurred) and therefore can be used in range loops. Stop can + // be used to stop the lister when not all bucket names have been read. + KeyValueNamesLister interface { + Name() <-chan string + Error() error + } + + // KeyValueStatus is run-time status about a Key-Value bucket. + KeyValueStatus interface { + // Bucket returns the name of the KeyValue store. + Bucket() string + + // Values is how many messages are in the bucket, including historical values. + Values() uint64 + + // History returns the configured history kept per key. + History() int64 + + // TTL returns the duration for which keys are kept in the bucket. + TTL() time.Duration + + // BackingStore indicates what technology is used for storage of the bucket. + // Currently only JetStream is supported. + BackingStore() string + + // Bytes returns the size of the bucket in bytes. + Bytes() uint64 + + // IsCompressed indicates if the data is compressed on disk. + IsCompressed() bool + + // LimitMarkerTTL is how long the bucket keeps markers when keys are + // removed by the TTL setting, 0 meaning markers are not supported. + LimitMarkerTTL() time.Duration + } + + // KeyWatcher is what is returned when doing a watch. It can be used to + // retrieve updates to keys. If not using UpdatesOnly option, it will also + // send the latest value for each key. After all initial values have been + // sent, a nil entry will be sent. Stop can be used to stop the watcher and + // close the underlying channel. Watcher will not close the channel until + // Stop is called or connection is closed. + KeyWatcher interface { + Updates() <-chan KeyValueEntry + Stop() error + } + + // KeyValueEntry is a retrieved entry for Get, List or Watch. + KeyValueEntry interface { + // Bucket is the bucket the data was loaded from. + Bucket() string + + // Key is the name of the key that was retrieved. + Key() string + + // Value is the retrieved value. + Value() []byte + + // Revision is a unique sequence for this value. + Revision() uint64 + + // Created is the time the data was put in the bucket. + Created() time.Time + + // Delta is distance from the latest value (how far the current sequence + // is from the latest). + Delta() uint64 + + // Operation returns Put or Delete or Purge, depending on the manner in + // which the current revision was created. + Operation() KeyValueOp + } +) + +type ( + WatchOpt interface { + configureWatcher(opts *watchOpts) error + } + + watchOpts struct { + // Do not send delete markers to the update channel. + ignoreDeletes bool + // Include all history per subject, not just last one. + includeHistory bool + // Include only updates for keys. + updatesOnly bool + // retrieve only the meta data of the entry + metaOnly bool + // resumeFromRevision is the revision to resume from. + resumeFromRevision uint64 + } + + // KVDeleteOpt is used to configure delete and purge operations. + KVDeleteOpt interface { + configureDelete(opts *deleteOpts) error + } + + deleteOpts struct { + // Remove all previous revisions. + purge bool + + // Delete only if the latest revision matches. + revision uint64 + + // purge ttl + ttl time.Duration + } + + // KVCreateOpt is used to configure Create. + KVCreateOpt interface { + configureCreate(opts *createOpts) error + } + + createOpts struct { + ttl time.Duration // TTL for the key + } + + // KVPurgeOpt is used to configure PurgeDeletes. + KVPurgeOpt interface { + configurePurge(opts *purgeOpts) error + } + + purgeOpts struct { + dmthr time.Duration // Delete markers threshold + } +) + +// kvs is the implementation of KeyValue +type kvs struct { + name string + streamName string + pre string + putPre string + pushJS nats.JetStreamContext + js *jetStream + stream Stream + // If true, it means that APIPrefix/Domain was set in the context + // and we need to add something to some of our high level protocols + // (such as Put, etc..) + useJSPfx bool + // To know if we can use the stream direct get API + useDirect bool +} + +// KeyValueOp represents the type of KV operation (Put, Delete, Purge). It is a +// part of KeyValueEntry. +type KeyValueOp uint8 + +// Available KeyValueOp values. +const ( + // KeyValuePut is a set on a revision which creates or updates a value for a + // key. + KeyValuePut KeyValueOp = iota + + // KeyValueDelete is a set on a revision which adds a delete marker for a + // key. + KeyValueDelete + + // KeyValuePurge is a set on a revision which removes all previous revisions + // for a key. + KeyValuePurge +) + +func (op KeyValueOp) String() string { + switch op { + case KeyValuePut: + return "KeyValuePutOp" + case KeyValueDelete: + return "KeyValueDeleteOp" + case KeyValuePurge: + return "KeyValuePurgeOp" + default: + return "Unknown Operation" + } +} + +const ( + kvBucketNamePre = "KV_" + kvBucketNameTmpl = "KV_%s" + kvSubjectsTmpl = "$KV.%s.>" + kvSubjectsPreTmpl = "$KV.%s." + kvSubjectsPreDomainTmpl = "%s.$KV.%s." + kvNoPending = "0" +) + +const ( + KeyValueMaxHistory = 64 + AllKeys = ">" + kvLatestRevision = 0 + kvop = "KV-Operation" + kvdel = "DEL" + kvpurge = "PURGE" +) + +// Regex for valid keys and buckets. +var ( + validBucketRe = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + validKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9]+$`) + validSearchKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9*]*[>]?$`) +) + +func (js *jetStream) KeyValue(ctx context.Context, bucket string) (KeyValue, error) { + if !bucketValid(bucket) { + return nil, ErrInvalidBucketName + } + streamName := fmt.Sprintf(kvBucketNameTmpl, bucket) + stream, err := js.Stream(ctx, streamName) + if err != nil { + if errors.Is(err, ErrStreamNotFound) { + err = ErrBucketNotFound + } + return nil, err + } + // Do some quick sanity checks that this is a correctly formed stream for KV. + // Max msgs per subject should be > 0. + if stream.CachedInfo().Config.MaxMsgsPerSubject < 1 { + return nil, ErrBadBucket + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToKVS(js, pushJS, stream), nil +} + +func (js *jetStream) CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { + scfg, err := js.prepareKeyValueConfig(ctx, cfg) + if err != nil { + return nil, err + } + + stream, err := js.CreateStream(ctx, scfg) + if err != nil { + if errors.Is(err, ErrStreamNameAlreadyInUse) { + // errors are joined so that backwards compatibility is retained + // and previous checks for ErrStreamNameAlreadyInUse will still work. + err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) + + // If we have a failure to add, it could be because we have + // a config change if the KV was created against before a bug fix + // that changed the value of discard policy. + // We will check if the stream exists and if the only difference + // is the discard policy, we will update the stream. + // The same logic applies for KVs created pre 2.9.x and + // the AllowDirect setting. + if stream, _ = js.Stream(ctx, scfg.Name); stream != nil { + cfg := stream.CachedInfo().Config + cfg.Discard = scfg.Discard + cfg.AllowDirect = scfg.AllowDirect + if reflect.DeepEqual(cfg, scfg) { + stream, err = js.UpdateStream(ctx, scfg) + } + } + } + if err != nil { + return nil, err + } + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToKVS(js, pushJS, stream), nil +} + +func (js *jetStream) UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { + scfg, err := js.prepareKeyValueConfig(ctx, cfg) + if err != nil { + return nil, err + } + + stream, err := js.UpdateStream(ctx, scfg) + if err != nil { + if errors.Is(err, ErrStreamNotFound) { + err = fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) + } + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToKVS(js, pushJS, stream), nil +} + +func (js *jetStream) CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) { + scfg, err := js.prepareKeyValueConfig(ctx, cfg) + if err != nil { + return nil, err + } + + stream, err := js.CreateOrUpdateStream(ctx, scfg) + if err != nil { + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToKVS(js, pushJS, stream), nil +} + +func (js *jetStream) prepareKeyValueConfig(ctx context.Context, cfg KeyValueConfig) (StreamConfig, error) { + if !bucketValid(cfg.Bucket) { + return StreamConfig{}, ErrInvalidBucketName + } + if _, err := js.AccountInfo(ctx); err != nil { + return StreamConfig{}, err + } + + // Default to 1 for history. Max is 64 for now. + history := int64(1) + if cfg.History > 0 { + if cfg.History > KeyValueMaxHistory { + return StreamConfig{}, ErrHistoryTooLarge + } + history = int64(cfg.History) + } + + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + maxMsgSize := cfg.MaxValueSize + if maxMsgSize == 0 { + maxMsgSize = -1 + } + // When stream's MaxAge is not set, server uses 2 minutes as the default + // for the duplicate window. If MaxAge is set, and lower than 2 minutes, + // then the duplicate window will be set to that. If MaxAge is greater, + // we will cap the duplicate window to 2 minutes (to be consistent with + // previous behavior). + duplicateWindow := 2 * time.Minute + if cfg.TTL > 0 && cfg.TTL < duplicateWindow { + duplicateWindow = cfg.TTL + } + var compression StoreCompression + if cfg.Compression { + compression = S2Compression + } + var allowMsgTTL bool + var subjectDeleteMarkerTTL time.Duration + if cfg.LimitMarkerTTL != 0 { + info, err := js.AccountInfo(ctx) + if err != nil { + return StreamConfig{}, err + } + if info.API.Level < 1 { + return StreamConfig{}, ErrLimitMarkerTTLNotSupported + } + allowMsgTTL = true + subjectDeleteMarkerTTL = cfg.LimitMarkerTTL + } + scfg := StreamConfig{ + Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), + Description: cfg.Description, + MaxMsgsPerSubject: history, + MaxBytes: maxBytes, + MaxAge: cfg.TTL, + MaxMsgSize: maxMsgSize, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + AllowRollup: true, + DenyDelete: true, + Duplicates: duplicateWindow, + MaxMsgs: -1, + MaxConsumers: -1, + AllowDirect: true, + RePublish: cfg.RePublish, + Compression: compression, + Discard: DiscardNew, + AllowMsgTTL: allowMsgTTL, + SubjectDeleteMarkerTTL: subjectDeleteMarkerTTL, + } + if cfg.Mirror != nil { + // Copy in case we need to make changes so we do not change caller's version. + m := cfg.Mirror.copy() + if !strings.HasPrefix(m.Name, kvBucketNamePre) { + m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) + } + scfg.Mirror = m + scfg.MirrorDirect = true + } else if len(cfg.Sources) > 0 { + // For now we do not allow direct subjects for sources. If that is desired a user could use stream API directly. + for _, ss := range cfg.Sources { + var sourceBucketName string + if strings.HasPrefix(ss.Name, kvBucketNamePre) { + sourceBucketName = ss.Name[len(kvBucketNamePre):] + } else { + sourceBucketName = ss.Name + ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) + } + + if ss.External == nil || sourceBucketName != cfg.Bucket { + ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}} + } + scfg.Sources = append(scfg.Sources, ss) + } + scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} + } else { + scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} + } + + return scfg, nil +} + +// DeleteKeyValue will delete this KeyValue store (JetStream stream). +func (js *jetStream) DeleteKeyValue(ctx context.Context, bucket string) error { + if !bucketValid(bucket) { + return ErrInvalidBucketName + } + stream := fmt.Sprintf(kvBucketNameTmpl, bucket) + if err := js.DeleteStream(ctx, stream); err != nil { + if errors.Is(err, ErrStreamNotFound) { + err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err) + } + return err + } + return nil +} + +// KeyValueStoreNames is used to retrieve a list of key value store names +func (js *jetStream) KeyValueStoreNames(ctx context.Context) KeyValueNamesLister { + res := &kvLister{ + kvNames: make(chan string), + } + l := &streamLister{js: js} + streamsReq := streamsRequest{ + Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), + } + go func() { + defer close(res.kvNames) + for { + page, err := l.streamNames(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + res.err = err + return + } + for _, name := range page { + if !strings.HasPrefix(name, kvBucketNamePre) { + continue + } + res.kvNames <- strings.TrimPrefix(name, kvBucketNamePre) + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + return res +} + +// KeyValueStores is used to retrieve a list of key value store statuses +func (js *jetStream) KeyValueStores(ctx context.Context) KeyValueLister { + res := &kvLister{ + kvs: make(chan KeyValueStatus), + } + l := &streamLister{js: js} + streamsReq := streamsRequest{ + Subject: fmt.Sprintf(kvSubjectsTmpl, "*"), + } + go func() { + defer close(res.kvs) + for { + page, err := l.streamInfos(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + res.err = err + return + } + for _, info := range page { + if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { + continue + } + res.kvs <- &KeyValueBucketStatus{info: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + return res +} + +// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus +type KeyValueBucketStatus struct { + info *StreamInfo + bucket string +} + +// Bucket the name of the bucket +func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } + +// Values is how many messages are in the bucket, including historical values +func (s *KeyValueBucketStatus) Values() uint64 { return s.info.State.Msgs } + +// History returns the configured history kept per key +func (s *KeyValueBucketStatus) History() int64 { return s.info.Config.MaxMsgsPerSubject } + +// TTL is how long the bucket keeps values for +func (s *KeyValueBucketStatus) TTL() time.Duration { return s.info.Config.MaxAge } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } + +// StreamInfo is the stream info retrieved to create the status +func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.info } + +// Bytes is the size of the stream +func (s *KeyValueBucketStatus) Bytes() uint64 { return s.info.State.Bytes } + +// IsCompressed indicates if the data is compressed on disk +func (s *KeyValueBucketStatus) IsCompressed() bool { return s.info.Config.Compression != NoCompression } + +// LimitMarkerTTL is how long the bucket keeps markers when keys are +// removed by the TTL setting, 0 meaning markers are not supported. +func (s *KeyValueBucketStatus) LimitMarkerTTL() time.Duration { + return s.info.Config.SubjectDeleteMarkerTTL +} + +type kvLister struct { + kvs chan KeyValueStatus + kvNames chan string + err error +} + +func (kl *kvLister) Status() <-chan KeyValueStatus { + return kl.kvs +} + +func (kl *kvLister) Name() <-chan string { + return kl.kvNames +} + +func (kl *kvLister) Error() error { + return kl.err +} + +func (js *jetStream) legacyJetStream() (nats.JetStreamContext, error) { + opts := make([]nats.JSOpt, 0) + if js.opts.apiPrefix != "" { + opts = append(opts, nats.APIPrefix(js.opts.apiPrefix)) + } + if js.opts.clientTrace != nil { + opts = append(opts, nats.ClientTrace{ + RequestSent: js.opts.clientTrace.RequestSent, + ResponseReceived: js.opts.clientTrace.ResponseReceived, + }) + } + return js.conn.JetStream(opts...) +} + +func bucketValid(bucket string) bool { + if len(bucket) == 0 { + return false + } + return validBucketRe.MatchString(bucket) +} + +func keyValid(key string) bool { + if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { + return false + } + return validKeyRe.MatchString(key) +} + +func searchKeyValid(key string) bool { + if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { + return false + } + return validSearchKeyRe.MatchString(key) +} + +func (kv *kvs) get(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { + if !keyValid(key) { + return nil, ErrInvalidKey + } + + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(key) + + var m *RawStreamMsg + var err error + + if revision == kvLatestRevision { + m, err = kv.stream.GetLastMsgForSubject(ctx, b.String()) + } else { + m, err = kv.stream.GetMsg(ctx, revision) + // If a sequence was provided, just make sure that the retrieved + // message subject matches the request. + if err == nil && m.Subject != b.String() { + return nil, ErrKeyNotFound + } + } + if err != nil { + if errors.Is(err, ErrMsgNotFound) { + err = ErrKeyNotFound + } + return nil, err + } + + entry := &kve{ + bucket: kv.name, + key: key, + value: m.Data, + revision: m.Sequence, + created: m.Time, + } + + // Double check here that this is not a DEL Operation marker. + if len(m.Header) > 0 { + if m.Header.Get(kvop) != "" { + switch m.Header.Get(kvop) { + case kvdel: + entry.op = KeyValueDelete + case kvpurge: + entry.op = KeyValuePurge + } + } else if m.Header.Get(MarkerReasonHeader) != "" { + switch m.Header.Get(MarkerReasonHeader) { + case "MaxAge", "Purge": + entry.op = KeyValuePurge + case "Remove": + entry.op = KeyValueDelete + } + } + if entry.op != KeyValuePut { + return entry, ErrKeyDeleted + } + } + + return entry, nil +} + +// kve is the implementation of KeyValueEntry +type kve struct { + bucket string + key string + value []byte + revision uint64 + delta uint64 + created time.Time + op KeyValueOp +} + +func (e *kve) Bucket() string { return e.bucket } +func (e *kve) Key() string { return e.key } +func (e *kve) Value() []byte { return e.value } +func (e *kve) Revision() uint64 { return e.revision } +func (e *kve) Created() time.Time { return e.created } +func (e *kve) Delta() uint64 { return e.delta } +func (e *kve) Operation() KeyValueOp { return e.op } + +// Get returns the latest value for the key. +func (kv *kvs) Get(ctx context.Context, key string) (KeyValueEntry, error) { + e, err := kv.get(ctx, key, kvLatestRevision) + if err != nil { + if errors.Is(err, ErrKeyDeleted) { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +// GetRevision returns a specific revision value for the key. +func (kv *kvs) GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) { + e, err := kv.get(ctx, key, revision) + if err != nil { + if errors.Is(err, ErrKeyDeleted) { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +// Put will place the new value for the key into the store. +func (kv *kvs) Put(ctx context.Context, key string, value []byte) (uint64, error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.apiPrefix) + } + if kv.putPre != "" { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + pa, err := kv.js.Publish(ctx, b.String(), value) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// PutString will place the string for the key into the store. +func (kv *kvs) PutString(ctx context.Context, key string, value string) (uint64, error) { + return kv.Put(ctx, key, []byte(value)) +} + +// Create will add the key/value pair if it does not exist. +func (kv *kvs) Create(ctx context.Context, key string, value []byte, opts ...KVCreateOpt) (revision uint64, err error) { + var o createOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureCreate(&o); err != nil { + return 0, err + } + } + } + + v, err := kv.updateRevision(ctx, key, value, 0, o.ttl) + if err == nil { + return v, nil + } + + if e, err := kv.get(ctx, key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) { + return kv.updateRevision(ctx, key, value, e.Revision(), o.ttl) + } + + // Check if the expected last subject sequence is not zero which implies + // the key already exists. + if errors.Is(err, ErrKeyExists) { + jserr := ErrKeyExists.(*jsError) + return 0, fmt.Errorf("%w: %s", err, jserr.message) + } + + return 0, err +} + +// Update will update the value if the latest revision matches. +func (kv *kvs) Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) { + return kv.updateRevision(ctx, key, value, revision, 0) +} + +func (kv *kvs) updateRevision(ctx context.Context, key string, value []byte, revision uint64, ttl time.Duration) (uint64, error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.apiPrefix) + } + b.WriteString(kv.pre) + b.WriteString(key) + + m := nats.Msg{Subject: b.String(), Header: nats.Header{}, Data: value} + opts := []PublishOpt{ + WithExpectLastSequencePerSubject(revision), + } + if ttl > 0 { + opts = append(opts, WithMsgTTL(ttl)) + } + + pa, err := kv.js.PublishMsg(ctx, &m, opts...) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// Delete will place a delete marker and leave all revisions. +func (kv *kvs) Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error { + if !keyValid(key) { + return ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.apiPrefix) + } + if kv.putPre != "" { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + // DEL op marker. For watch functionality. + m := nats.NewMsg(b.String()) + + var o deleteOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureDelete(&o); err != nil { + return err + } + } + } + + if o.purge { + m.Header.Set(kvop, kvpurge) + m.Header.Set(MsgRollup, MsgRollupSubject) + } else { + m.Header.Set(kvop, kvdel) + } + pubOpts := make([]PublishOpt, 0) + if o.ttl > 0 && o.purge { + pubOpts = append(pubOpts, WithMsgTTL(o.ttl)) + } else if o.ttl > 0 { + return ErrTTLOnDeleteNotSupported + } + + if o.revision != 0 { + m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(o.revision, 10)) + } + + _, err := kv.js.PublishMsg(ctx, m, pubOpts...) + return err +} + +// Purge will place a delete marker and remove all previous revisions. +func (kv *kvs) Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error { + return kv.Delete(ctx, key, append(opts, purge())...) +} + +// purge removes all previous revisions. +func purge() KVDeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.purge = true + return nil + }) +} + +// Implementation for Watch +type watcher struct { + mu sync.Mutex + updates chan KeyValueEntry + sub *nats.Subscription + initDone bool + initPending uint64 + received uint64 +} + +// Updates returns the interior channel. +func (w *watcher) Updates() <-chan KeyValueEntry { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *watcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +func (kv *kvs) WatchFiltered(ctx context.Context, keys []string, opts ...WatchOpt) (KeyWatcher, error) { + for _, key := range keys { + if !searchKeyValid(key) { + return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "key cannot be empty and must be a valid NATS subject") + } + } + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + // Could be a pattern so don't check for validity as we normally do. + for i, key := range keys { + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(key) + keys[i] = b.String() + } + + // if no keys are provided, watch all keys + if len(keys) == 0 { + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(AllKeys) + keys = []string{b.String()} + } + + // We will block below on placing items on the chan. That is by design. + w := &watcher{updates: make(chan KeyValueEntry, 256)} + + update := func(m *nats.Msg) { + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + return + } + if len(m.Subject) <= len(kv.pre) { + return + } + subj := m.Subject[len(kv.pre):] + + var op KeyValueOp + if len(m.Header) > 0 { + if m.Header.Get(kvop) != "" { + switch m.Header.Get(kvop) { + case kvdel: + op = KeyValueDelete + case kvpurge: + op = KeyValuePurge + } + } else if m.Header.Get(MarkerReasonHeader) != "" { + switch m.Header.Get(MarkerReasonHeader) { + case "MaxAge", "Purge": + op = KeyValuePurge + case "Remove": + op = KeyValueDelete + } + } + } + delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos]) + w.mu.Lock() + defer w.mu.Unlock() + if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { + entry := &kve{ + bucket: kv.name, + key: subj, + value: m.Data, + revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), + created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), + delta: delta, + op: op, + } + w.updates <- entry + } + // Check if done and initial values. + if !w.initDone { + w.received++ + // We set this on the first trip through.. + if w.initPending == 0 { + w.initPending = delta + } + if w.received > w.initPending || delta == 0 { + w.initDone = true + w.updates <- nil + } + } + } + + // Used ordered consumer to deliver results. + subOpts := []nats.SubOpt{nats.BindStream(kv.streamName), nats.OrderedConsumer()} + if !o.includeHistory { + subOpts = append(subOpts, nats.DeliverLastPerSubject()) + } + if o.updatesOnly { + subOpts = append(subOpts, nats.DeliverNew()) + } + if o.metaOnly { + subOpts = append(subOpts, nats.HeadersOnly()) + } + if o.resumeFromRevision > 0 { + subOpts = append(subOpts, nats.StartSequence(o.resumeFromRevision)) + } + subOpts = append(subOpts, nats.Context(ctx)) + // Create the sub and rest of initialization under the lock. + // We want to prevent the race between this code and the + // update() callback. + w.mu.Lock() + defer w.mu.Unlock() + var sub *nats.Subscription + var err error + if len(keys) == 1 { + sub, err = kv.pushJS.Subscribe(keys[0], update, subOpts...) + } else { + subOpts = append(subOpts, nats.ConsumerFilterSubjects(keys...)) + sub, err = kv.pushJS.Subscribe("", update, subOpts...) + } + if err != nil { + return nil, err + } + sub.SetClosedHandler(func(_ string) { + close(w.updates) + }) + // If there were no pending messages at the time of the creation + // of the consumer, send the marker. + // Skip if UpdatesOnly() is set, since there will never be updates initially. + if !o.updatesOnly { + initialPending, err := sub.InitialConsumerPending() + if err == nil && initialPending == 0 { + w.initDone = true + w.updates <- nil + } + } else { + // if UpdatesOnly was used, mark initialization as complete + w.initDone = true + } + w.sub = sub + return w, nil +} + +// Watch for any updates to keys that match the keys argument which could include wildcards. +// Watch will send a nil entry when it has received all initial values. +func (kv *kvs) Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) { + return kv.WatchFiltered(ctx, []string{keys}, opts...) +} + +// WatchAll will invoke the callback for all updates. +func (kv *kvs) WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) { + return kv.Watch(ctx, AllKeys, opts...) +} + +// Keys will return all keys. +func (kv *kvs) Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) { + opts = append(opts, IgnoreDeletes(), MetaOnly()) + watcher, err := kv.WatchAll(ctx, opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var keys []string + for entry := range watcher.Updates() { + if entry == nil { + break + } + keys = append(keys, entry.Key()) + } + if len(keys) == 0 { + return nil, ErrNoKeysFound + } + return keys, nil +} + +type keyLister struct { + watcher KeyWatcher + keys chan string +} + +// Keys will return all keys. +func (kv *kvs) ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) { + opts = append(opts, IgnoreDeletes(), MetaOnly()) + watcher, err := kv.WatchAll(ctx, opts...) + if err != nil { + return nil, err + } + kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} + + go func() { + defer close(kl.keys) + defer watcher.Stop() + for { + select { + case entry := <-watcher.Updates(): + if entry == nil { + return + } + kl.keys <- entry.Key() + case <-ctx.Done(): + return + } + } + }() + return kl, nil +} + +// ListKeysWithFilters returns a channel of keys matching the provided filters using WatchFiltered. +func (kv *kvs) ListKeysFiltered(ctx context.Context, filters ...string) (KeyLister, error) { + watcher, err := kv.WatchFiltered(ctx, filters, IgnoreDeletes(), MetaOnly()) + if err != nil { + return nil, err + } + + // Reuse the existing keyLister implementation + kl := &keyLister{watcher: watcher, keys: make(chan string, 256)} + + go func() { + defer close(kl.keys) + defer watcher.Stop() + + for { + select { + case entry := <-watcher.Updates(): + if entry == nil { // Indicates all initial values are received + return + } + kl.keys <- entry.Key() + case <-ctx.Done(): + return + } + } + }() + + return kl, nil +} + +func (kl *keyLister) Keys() <-chan string { + return kl.keys +} + +func (kl *keyLister) Stop() error { + return kl.watcher.Stop() +} + +// History will return all historical values for the key. +func (kv *kvs) History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) { + opts = append(opts, IncludeHistory()) + watcher, err := kv.Watch(ctx, key, opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var entries []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + entries = append(entries, entry) + } + if len(entries) == 0 { + return nil, ErrKeyNotFound + } + return entries, nil +} + +// Bucket returns the current bucket name. +func (kv *kvs) Bucket() string { + return kv.name +} + +const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute + +// PurgeDeletes will remove all current delete markers. +func (kv *kvs) PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error { + var o purgeOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configurePurge(&o); err != nil { + return err + } + } + } + watcher, err := kv.WatchAll(ctx) + if err != nil { + return err + } + defer watcher.Stop() + + var limit time.Time + olderThan := o.dmthr + // Negative value is used to instruct to always remove markers, regardless + // of age. If set to 0 (or not set), use our default value. + if olderThan == 0 { + olderThan = kvDefaultPurgeDeletesMarkerThreshold + } + if olderThan > 0 { + limit = time.Now().Add(-olderThan) + } + + var deleteMarkers []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { + deleteMarkers = append(deleteMarkers, entry) + } + } + // Stop watcher here so as we purge we do not have the system continually updating numPending. + watcher.Stop() + + var b strings.Builder + // Do actual purges here. + for _, entry := range deleteMarkers { + b.WriteString(kv.pre) + b.WriteString(entry.Key()) + purgeOpts := []StreamPurgeOpt{WithPurgeSubject(b.String())} + if olderThan > 0 && entry.Created().After(limit) { + purgeOpts = append(purgeOpts, WithPurgeKeep(1)) + } + if err := kv.stream.Purge(ctx, purgeOpts...); err != nil { + return err + } + b.Reset() + } + return nil +} + +// Status retrieves the status and configuration of a bucket +func (kv *kvs) Status(ctx context.Context) (KeyValueStatus, error) { + nfo, err := kv.stream.Info(ctx) + if err != nil { + return nil, err + } + + return &KeyValueBucketStatus{info: nfo, bucket: kv.name}, nil +} + +func mapStreamToKVS(js *jetStream, pushJS nats.JetStreamContext, stream Stream) *kvs { + info := stream.CachedInfo() + bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) + kv := &kvs{ + name: bucket, + streamName: info.Config.Name, + pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), + js: js, + pushJS: pushJS, + stream: stream, + // Determine if we need to use the JS prefix in front of Put and Delete operations + useJSPfx: js.opts.apiPrefix != DefaultAPIPrefix, + useDirect: info.Config.AllowDirect, + } + + // If we are mirroring, we will have mirror direct on, so just use the mirror name + // and override use + if m := info.Config.Mirror; m != nil { + bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) + if m.External != nil && m.External.APIPrefix != "" { + kv.useJSPfx = false + kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) + } else { + kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + } + } + + return kv +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go b/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go new file mode 100644 index 000000000..8699c8802 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go @@ -0,0 +1,131 @@ +// Copyright 2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "fmt" + "time" +) + +type watchOptFn func(opts *watchOpts) error + +func (opt watchOptFn) configureWatcher(opts *watchOpts) error { + return opt(opts) +} + +// IncludeHistory instructs the key watcher to include historical values as +// well (up to KeyValueMaxHistory). +func IncludeHistory() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + if opts.updatesOnly { + return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption) + } + opts.includeHistory = true + return nil + }) +} + +// UpdatesOnly instructs the key watcher to only include updates on values +// (without latest values when started). +func UpdatesOnly() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + if opts.includeHistory { + return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption) + } + opts.updatesOnly = true + return nil + }) +} + +// IgnoreDeletes will have the key watcher not pass any deleted keys. +func IgnoreDeletes() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.ignoreDeletes = true + return nil + }) +} + +// MetaOnly instructs the key watcher to retrieve only the entry meta data, not +// the entry value. +func MetaOnly() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.metaOnly = true + return nil + }) +} + +// ResumeFromRevision instructs the key watcher to resume from a specific +// revision number. +func ResumeFromRevision(revision uint64) WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.resumeFromRevision = revision + return nil + }) +} + +// DeleteMarkersOlderThan indicates that delete or purge markers older than that +// will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data +// will be removed but markers that are recent will be kept. +// Note that if no option is specified, the default is 30 minutes. You can set +// this option to a negative value to instruct to always remove the markers, +// regardless of their age. +type DeleteMarkersOlderThan time.Duration + +func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { + opts.dmthr = time.Duration(ttl) + return nil +} + +type deleteOptFn func(opts *deleteOpts) error + +func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { + return opt(opts) +} + +// LastRevision deletes if the latest revision matches the provided one. If the +// provided revision is not the latest, the delete will return an error. +func LastRevision(revision uint64) KVDeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.revision = revision + return nil + }) +} + +// PurgeTTL sets the TTL for the purge operation. +// After the TTL expires, the delete markers will be removed. +// This requires LimitMarkerTTL to be enabled on the bucket. +// Note that this is not the same as the TTL for the key itself, which is set +// using the KeyTTL option when creating the key. +func PurgeTTL(ttl time.Duration) KVDeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.ttl = ttl + return nil + }) +} + +type createOptFn func(opts *createOpts) error + +func (opt createOptFn) configureCreate(opts *createOpts) error { + return opt(opts) +} + +// KeyTTL sets the TTL for the key. This is the time after which the key will be +// automatically deleted. The TTL is set when the key is created and can not be +// changed later. This requires LimitMarkerTTL to be enabled on the bucket. +func KeyTTL(ttl time.Duration) KVCreateOpt { + return createOptFn(func(opts *createOpts) error { + opts.ttl = ttl + return nil + }) +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/message.go b/vendor/github.com/nats-io/nats.go/jetstream/message.go new file mode 100644 index 000000000..c21091b59 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/message.go @@ -0,0 +1,471 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "bytes" + "context" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/internal/parser" +) + +type ( + // Msg contains methods to operate on a JetStream message. Metadata, Data, + // Headers, Subject and Reply can be used to retrieve the specific parts of + // the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and + // Term are various flavors of ack requests. + Msg interface { + // Metadata returns [MsgMetadata] for a JetStream message. + Metadata() (*MsgMetadata, error) + + // Data returns the message body. + Data() []byte + + // Headers returns a map of headers for a message. + Headers() nats.Header + + // Subject returns a subject on which a message was published/received. + Subject() string + + // Reply returns a reply subject for a message. + Reply() string + + // Ack acknowledges a message. This tells the server that the message was + // successfully processed and it can move on to the next message. + Ack() error + + // DoubleAck acknowledges a message and waits for ack reply from the server. + // While it impacts performance, it is useful for scenarios where + // message loss is not acceptable. + DoubleAck(context.Context) error + + // Nak negatively acknowledges a message. This tells the server to + // redeliver the message. + // + // Nak does not adhere to AckWait or Backoff configured on the consumer + // and triggers instant redelivery. For a delayed redelivery, use + // NakWithDelay. + Nak() error + + // NakWithDelay negatively acknowledges a message. This tells the server + // to redeliver the message after the given delay. + NakWithDelay(delay time.Duration) error + + // InProgress tells the server that this message is being worked on. It + // resets the redelivery timer on the server. + InProgress() error + + // Term tells the server to not redeliver this message, regardless of + // the value of MaxDeliver. + Term() error + + // TermWithReason tells the server to not redeliver this message, regardless of + // the value of MaxDeliver. The provided reason will be included in JetStream + // advisory event sent by the server. + // + // Note: This will only work with JetStream servers >= 2.10.4. + // For older servers, TermWithReason will be ignored by the server and the message + // will not be terminated. + TermWithReason(reason string) error + } + + // MsgMetadata is the JetStream metadata associated with received messages. + MsgMetadata struct { + // Sequence is the sequence information for the message. + Sequence SequencePair + + // NumDelivered is the number of times this message was delivered to the + // consumer. + NumDelivered uint64 + + // NumPending is the number of messages that match the consumer's + // filter, but have not been delivered yet. + NumPending uint64 + + // Timestamp is the time the message was originally stored on a stream. + Timestamp time.Time + + // Stream is the stream name this message is stored on. + Stream string + + // Consumer is the consumer name this message was delivered to. + Consumer string + + // Domain is the domain this message was received on. + Domain string + } + + // SequencePair includes the consumer and stream sequence numbers for a + // message. + SequencePair struct { + // Consumer is the consumer sequence number for message deliveries. This + // is the total number of messages the consumer has seen (including + // redeliveries). + Consumer uint64 `json:"consumer_seq"` + + // Stream is the stream sequence number for a message. + Stream uint64 `json:"stream_seq"` + } + + jetStreamMsg struct { + msg *nats.Msg + ackd bool + js *jetStream + sync.Mutex + } + + ackOpts struct { + nakDelay time.Duration + termReason string + } + + ackType []byte +) + +const ( + controlMsg = "100" + badRequest = "400" + noMessages = "404" + reqTimeout = "408" + maxBytesExceeded = "409" + noResponders = "503" + pinIdMismatch = "423" +) + +// Headers used when publishing messages. +const ( + // MsgIdHeader is used to specify a user-defined message ID. It can be used + // e.g. for deduplication in conjunction with the Duplicates duration on + // ConsumerConfig or to provide optimistic concurrency safety together with + // [ExpectedLastMsgIDHeader]. + // + // This can be set when publishing messages using [WithMsgID] option. + MsgIDHeader = "Nats-Msg-Id" + + // ExpectedStreamHeader contains stream name and is used to assure that the + // published message is received by expected stream. Server will reject the + // message if it is not the case. + // + // This can be set when publishing messages using [WithExpectStream] option. + ExpectedStreamHeader = "Nats-Expected-Stream" + + // ExpectedLastSeqHeader contains the expected last sequence number of the + // stream and can be used to apply optimistic concurrency control at stream + // level. Server will reject the message if it is not the case. + // + // This can be set when publishing messages using [WithExpectLastSequence] + // option. option. + ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence" + + // ExpectedLastSubjSeqHeader contains the expected last sequence number on + // the subject and can be used to apply optimistic concurrency control at + // subject level. Server will reject the message if it is not the case. + // + // This can be set when publishing messages using + // [WithExpectLastSequencePerSubject] option. + ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence" + + // ExpectedLastMsgIDHeader contains the expected last message ID on the + // subject and can be used to apply optimistic concurrency control at + // stream level. Server will reject the message if it is not the case. + // + // This can be set when publishing messages using [WithExpectLastMsgID] + // option. + ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id" + + // MsgTTLHeader is used to specify the TTL for a specific message. This will + // override the default TTL for the stream. + MsgTTLHeader = "Nats-TTL" + + // MsgRollup is used to apply a purge of all prior messages in the stream + // ("all") or at the subject ("sub") before this message. + MsgRollup = "Nats-Rollup" + + // MarkerReasonHeader is used to specify a reason for message deletion. + MarkerReasonHeader = "Nats-Marker-Reason" +) + +// Headers for republished messages and direct gets. Those headers are set by +// the server and should not be set by the client. +const ( + // StreamHeader contains the stream name the message was republished from or + // the stream name the message was retrieved from using direct get. + StreamHeader = "Nats-Stream" + + // SequenceHeader contains the original sequence number of the message. + SequenceHeader = "Nats-Sequence" + + // TimeStampHeader contains the original timestamp of the message. + TimeStampHeaer = "Nats-Time-Stamp" + + // SubjectHeader contains the original subject the message was published to. + SubjectHeader = "Nats-Subject" + + // LastSequenceHeader contains the last sequence of the message having the + // same subject, otherwise zero if this is the first message for the + // subject. + LastSequenceHeader = "Nats-Last-Sequence" +) + +// Rollups, can be subject only or all messages. +const ( + // MsgRollupSubject is used to purge all messages before this message on the + // message subject. + MsgRollupSubject = "sub" + + // MsgRollupAll is used to purge all messages before this message on the + // stream. + MsgRollupAll = "all" +) + +var ( + ackAck ackType = []byte("+ACK") + ackNak ackType = []byte("-NAK") + ackProgress ackType = []byte("+WPI") + ackTerm ackType = []byte("+TERM") +) + +// Metadata returns [MsgMetadata] for a JetStream message. +func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) { + if err := m.checkReply(); err != nil { + return nil, err + } + + tokens, err := parser.GetMetadataFields(m.msg.Reply) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err) + } + + meta := &MsgMetadata{ + Domain: tokens[parser.AckDomainTokenPos], + NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]), + NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]), + Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))), + Stream: tokens[parser.AckStreamTokenPos], + Consumer: tokens[parser.AckConsumerTokenPos], + } + meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]) + meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos]) + return meta, nil +} + +// Data returns the message body. +func (m *jetStreamMsg) Data() []byte { + return m.msg.Data +} + +// Headers returns a map of headers for a message. +func (m *jetStreamMsg) Headers() nats.Header { + return m.msg.Header +} + +// Subject returns a subject on which a message is published. +func (m *jetStreamMsg) Subject() string { + return m.msg.Subject +} + +// Reply returns a reply subject for a JetStream message. +func (m *jetStreamMsg) Reply() string { + return m.msg.Reply +} + +// Ack acknowledges a message. This tells the server that the message was +// successfully processed and it can move on to the next message. +func (m *jetStreamMsg) Ack() error { + return m.ackReply(context.Background(), ackAck, false, ackOpts{}) +} + +// DoubleAck acknowledges a message and waits for ack reply from the server. +// While it impacts performance, it is useful for scenarios where +// message loss is not acceptable. +func (m *jetStreamMsg) DoubleAck(ctx context.Context) error { + return m.ackReply(ctx, ackAck, true, ackOpts{}) +} + +// Nak negatively acknowledges a message. This tells the server to +// redeliver the message. +func (m *jetStreamMsg) Nak() error { + return m.ackReply(context.Background(), ackNak, false, ackOpts{}) +} + +// NakWithDelay negatively acknowledges a message. This tells the server +// to redeliver the message after the given delay. +func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error { + return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay}) +} + +// InProgress tells the server that this message is being worked on. It +// resets the redelivery timer on the server. +func (m *jetStreamMsg) InProgress() error { + return m.ackReply(context.Background(), ackProgress, false, ackOpts{}) +} + +// Term tells the server to not redeliver this message, regardless of +// the value of MaxDeliver. +func (m *jetStreamMsg) Term() error { + return m.ackReply(context.Background(), ackTerm, false, ackOpts{}) +} + +// TermWithReason tells the server to not redeliver this message, regardless of +// the value of MaxDeliver. The provided reason will be included in JetStream +// advisory event sent by the server. +// +// Note: This will only work with JetStream servers >= 2.10.4. +// For older servers, TermWithReason will be ignored by the server and the message +// will not be terminated. +func (m *jetStreamMsg) TermWithReason(reason string) error { + return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason}) +} + +func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error { + err := m.checkReply() + if err != nil { + return err + } + + m.Lock() + if m.ackd { + m.Unlock() + return ErrMsgAlreadyAckd + } + m.Unlock() + + if sync { + var cancel context.CancelFunc + ctx, cancel = m.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + } + + var body []byte + if opts.nakDelay > 0 { + body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds())) + } else if opts.termReason != "" { + body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason)) + } else { + body = ackType + } + + if sync { + _, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body) + } else { + err = m.js.conn.Publish(m.msg.Reply, body) + } + if err != nil { + return err + } + + // Mark that the message has been acked unless it is ackProgress + // which can be sent many times. + if !bytes.Equal(ackType, ackProgress) { + m.Lock() + m.ackd = true + m.Unlock() + } + return nil +} + +func (m *jetStreamMsg) checkReply() error { + if m == nil || m.msg.Sub == nil { + return ErrMsgNotBound + } + if m.msg.Reply == "" { + return ErrMsgNoReply + } + return nil +} + +// Returns if the given message is a user message or not, and if +// checkSts() is true, returns appropriate error based on the +// content of the status (404, etc..) +func checkMsg(msg *nats.Msg) (bool, error) { + // If payload or no header, consider this a user message + if len(msg.Data) > 0 || len(msg.Header) == 0 { + return true, nil + } + // Look for status header + val := msg.Header.Get("Status") + descr := msg.Header.Get("Description") + // If not present, then this is considered a user message + if val == "" { + return true, nil + } + + switch val { + case badRequest: + return false, ErrBadRequest + case noResponders: + return false, nats.ErrNoResponders + case noMessages: + // 404 indicates that there are no messages. + return false, ErrNoMessages + case reqTimeout: + return false, nats.ErrTimeout + case controlMsg: + return false, nil + case pinIdMismatch: + return false, ErrPinIDMismatch + case maxBytesExceeded: + if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") { + return false, ErrMaxBytesExceeded + } + if strings.Contains(strings.ToLower(descr), "batch completed") { + return false, ErrBatchCompleted + } + if strings.Contains(strings.ToLower(descr), "consumer deleted") { + return false, ErrConsumerDeleted + } + if strings.Contains(strings.ToLower(descr), "leadership change") { + return false, ErrConsumerLeadershipChanged + } + } + return false, fmt.Errorf("nats: %s", msg.Header.Get("Description")) +} + +func parsePending(msg *nats.Msg) (int, int, error) { + msgsLeftStr := msg.Header.Get("Nats-Pending-Messages") + var msgsLeft int + var err error + if msgsLeftStr != "" { + msgsLeft, err = strconv.Atoi(msgsLeftStr) + if err != nil { + return 0, 0, errors.New("nats: invalid format of Nats-Pending-Messages") + } + } + bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes") + var bytesLeft int + if bytesLeftStr != "" { + bytesLeft, err = strconv.Atoi(bytesLeftStr) + if err != nil { + return 0, 0, errors.New("nats: invalid format of Nats-Pending-Bytes") + } + } + return msgsLeft, bytesLeft, nil +} + +// toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations +func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg { + return &jetStreamMsg{ + msg: msg, + js: js, + } +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/object.go b/vendor/github.com/nats-io/nats.go/jetstream/object.go new file mode 100644 index 000000000..3ccd39797 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/object.go @@ -0,0 +1,1625 @@ +// Copyright 2023-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "net" + "os" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/internal/parser" + "github.com/nats-io/nuid" +) + +type ( + // ObjectStoreManager is used to manage object stores. It provides methods + // CRUD operations on object stores. + ObjectStoreManager interface { + // ObjectStore will look up and bind to an existing object store + // instance. + // + // If the object store with given name does not exist, ErrBucketNotFound + // will be returned. + ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) + + // CreateObjectStore will create a new object store with the given + // configuration. + // + // If the object store with given name already exists, ErrBucketExists + // will be returned. + CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) + + // UpdateObjectStore will update an existing object store with the given + // configuration. + // + // If the object store with given name does not exist, ErrBucketNotFound + // will be returned. + UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) + + // CreateOrUpdateObjectStore will create a new object store with the given + // configuration if it does not exist, or update an existing object store + // with the given configuration. + CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) + + // DeleteObjectStore will delete the provided object store. + // + // If the object store with given name does not exist, ErrBucketNotFound + // will be returned. + DeleteObjectStore(ctx context.Context, bucket string) error + + // ObjectStoreNames is used to retrieve a list of bucket names. + // It returns an ObjectStoreNamesLister exposing a channel to receive + // the names of the object stores. + // + // The lister will always close the channel when done (either all names + // have been read or an error occurred) and therefore can be used in a + // for-range loop. + ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister + + // ObjectStores is used to retrieve a list of bucket statuses. + // It returns an ObjectStoresLister exposing a channel to receive + // the statuses of the object stores. + // + // The lister will always close the channel when done (either all statuses + // have been read or an error occurred) and therefore can be used in a + // for-range loop. + ObjectStores(ctx context.Context) ObjectStoresLister + } + + // ObjectStore contains methods to operate on an object store. + // Using the ObjectStore interface, it is possible to: + // + // - Perform CRUD operations on objects (Get, Put, Delete). + // Get and put expose convenience methods to work with + // byte slices, strings and files, in addition to streaming [io.Reader] + // - Get information about an object without retrieving it. + // - Update the metadata of an object. + // - Add links to other objects or object stores. + // - Watch for updates to a store + // - List information about objects in a store + // - Retrieve status and configuration of an object store. + ObjectStore interface { + // Put will place the contents from the reader into a new object. If the + // object already exists, it will be overwritten. The object name is + // required and is taken from the ObjectMeta.Name field. + // + // The reader will be read until EOF. ObjectInfo will be returned, containing + // the object's metadata, digest and instance information. + Put(ctx context.Context, obj ObjectMeta, reader io.Reader) (*ObjectInfo, error) + + // PutBytes is convenience function to put a byte slice into this object + // store under the given name. + // + // ObjectInfo will be returned, containing the object's metadata, digest + // and instance information. + PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) + + // PutString is convenience function to put a string into this object + // store under the given name. + // + // ObjectInfo will be returned, containing the object's metadata, digest + // and instance information. + PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) + + // PutFile is convenience function to put a file contents into this + // object store. The name of the object will be the path of the file. + // + // ObjectInfo will be returned, containing the object's metadata, digest + // and instance information. + PutFile(ctx context.Context, file string) (*ObjectInfo, error) + + // Get will pull the named object from the object store. If the object + // does not exist, ErrObjectNotFound will be returned. + // + // The returned ObjectResult will contain the object's metadata and a + // reader to read the object's contents. The reader will be closed when + // all data has been read or an error occurs. + // + // A GetObjectShowDeleted option can be supplied to return an object + // even if it was marked as deleted. + Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) + + // GetBytes is a convenience function to pull an object from this object + // store and return it as a byte slice. + // + // If the object does not exist, ErrObjectNotFound will be returned. + // + // A GetObjectShowDeleted option can be supplied to return an object + // even if it was marked as deleted. + GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) + + // GetString is a convenience function to pull an object from this + // object store and return it as a string. + // + // If the object does not exist, ErrObjectNotFound will be returned. + // + // A GetObjectShowDeleted option can be supplied to return an object + // even if it was marked as deleted. + GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) + + // GetFile is a convenience function to pull an object from this object + // store and place it in a file. If the file already exists, it will be + // overwritten, otherwise it will be created. + // + // If the object does not exist, ErrObjectNotFound will be returned. + // A GetObjectShowDeleted option can be supplied to return an object + // even if it was marked as deleted. + GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error + + // GetInfo will retrieve the current information for the object, containing + // the object's metadata and instance information. + // + // If the object does not exist, ErrObjectNotFound will be returned. + // + // A GetObjectInfoShowDeleted option can be supplied to return an object + // even if it was marked as deleted. + GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) + + // UpdateMeta will update the metadata for the object. + // + // If the object does not exist, ErrUpdateMetaDeleted will be returned. + // If the new name is different from the old name, and an object with the + // new name already exists, ErrObjectAlreadyExists will be returned. + UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error + + // Delete will delete the named object from the object store. If the object + // does not exist, ErrObjectNotFound will be returned. If the object is + // already deleted, no error will be returned. + // + // All chunks for the object will be purged, and the object will be marked + // as deleted. + Delete(ctx context.Context, name string) error + + // AddLink will add a link to another object. A link is a reference to + // another object. The provided name is the name of the link object. + // The provided ObjectInfo is the info of the object being linked to. + // + // If an object with given name already exists, ErrObjectAlreadyExists + // will be returned. + // If object being linked to is deleted, ErrNoLinkToDeleted will be + // returned. + // If the provided object is a link, ErrNoLinkToLink will be returned. + // If the provided object is nil or the name is empty, ErrObjectRequired + // will be returned. + AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) + + // AddBucketLink will add a link to another object store. A link is a + // reference to another object store. The provided name is the name of + // the link object. + // The provided ObjectStore is the object store being linked to. + // + // If an object with given name already exists, ErrObjectAlreadyExists + // will be returned. + // If the provided object store is nil ErrBucketRequired will be returned. + AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) + + // Seal will seal the object store, no further modifications will be allowed. + Seal(ctx context.Context) error + + // Watch for any updates to objects in the store. By default, the watcher will send the latest + // info for each object and all future updates. Watch will send a nil + // entry when it has received all initial values. There are a few ways + // to configure the watcher: + // + // - IncludeHistory will have the watcher send all historical information + // for each object. + // - IgnoreDeletes will have the watcher not pass any objects with + // delete markers. + // - UpdatesOnly will have the watcher only pass updates on objects + // (without latest info when started). + Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) + + // List will list information about objects in the store. + // + // If the object store is empty, ErrNoObjectsFound will be returned. + List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) + + // Status retrieves the status and configuration of the bucket. + Status(ctx context.Context) (ObjectStoreStatus, error) + } + + // ObjectWatcher is what is returned when doing a watch. It can be used to + // retrieve updates to objects in a bucket. If not using UpdatesOnly option, + // it will also send the latest value for each key. After all initial values + // have been sent, a nil entry will be sent. Stop can be used to stop the + // watcher and close the underlying channel. Watcher will not close the + // channel until Stop is called or connection is closed. + ObjectWatcher interface { + Updates() <-chan *ObjectInfo + Stop() error + } + + // ObjectStoreConfig is the configuration for the object store. + ObjectStoreConfig struct { + // Bucket is the name of the object store. Bucket name has to be + // unique and can only contain alphanumeric characters, dashes, and + // underscores. + Bucket string `json:"bucket"` + + // Description is an optional description for the object store. + Description string `json:"description,omitempty"` + + // TTL is the maximum age of objects in the store. If an object is not + // updated within this time, it will be removed from the store. + // By default, objects do not expire. + TTL time.Duration `json:"max_age,omitempty"` + + // MaxBytes is the maximum size of the object store. If not specified, + // the default is -1 (unlimited). + MaxBytes int64 `json:"max_bytes,omitempty"` + + // Storage is the type of storage to use for the object store. If not + // specified, the default is FileStorage. + Storage StorageType `json:"storage,omitempty"` + + // Replicas is the number of replicas to keep for the object store in + // clustered jetstream. Defaults to 1, maximum is 5. + Replicas int `json:"num_replicas,omitempty"` + + // Placement is used to declare where the object store should be placed via + // tags and/or an explicit cluster name. + Placement *Placement `json:"placement,omitempty"` + + // Compression enables the underlying stream compression. + // NOTE: Compression is supported for nats-server 2.10.0+ + Compression bool `json:"compression,omitempty"` + + // Bucket-specific metadata + // NOTE: Metadata requires nats-server v2.10.0+ + Metadata map[string]string `json:"metadata,omitempty"` + } + + // ObjectStoresLister is used to retrieve a list of object stores. It returns + // a channel to read the bucket store statuses from. The lister will always + // close the channel when done (either all stores have been retrieved or an + // error occurred) and therefore can be used in range loops. Stop can be + // used to stop the lister when not all object stores have been read. + ObjectStoresLister interface { + Status() <-chan ObjectStoreStatus + Error() error + } + + // ObjectStoreNamesLister is used to retrieve a list of object store names. + // It returns a channel to read the bucket names from. The lister will + // always close the channel when done (either all stores have been retrieved + // or an error occurred) and therefore can be used in range loops. Stop can + // be used to stop the lister when not all bucket names have been read. + ObjectStoreNamesLister interface { + Name() <-chan string + Error() error + } + + // ObjectStoreStatus is run-time status about a bucket. + ObjectStoreStatus interface { + // Bucket returns the name of the object store. + Bucket() string + + // Description is the description supplied when creating the bucket. + Description() string + + // TTL indicates how long objects are kept in the bucket. + TTL() time.Duration + + // Storage indicates the underlying JetStream storage technology used to + // store data. + Storage() StorageType + + // Replicas indicates how many storage replicas are kept for the data in + // the bucket. + Replicas() int + + // Sealed indicates the stream is sealed and cannot be modified in any + // way. + Sealed() bool + + // Size is the combined size of all data in the bucket including + // metadata, in bytes. + Size() uint64 + + // BackingStore indicates what technology is used for storage of the + // bucket. Currently only JetStream is supported. + BackingStore() string + + // Metadata is the user supplied metadata for the bucket. + Metadata() map[string]string + + // IsCompressed indicates if the data is compressed on disk. + IsCompressed() bool + } + + // ObjectMetaOptions is used to set additional options when creating an object. + ObjectMetaOptions struct { + // Link contains information about a link to another object or object store. + // It should not be set manually, but rather by using the AddLink or + // AddBucketLink methods. + Link *ObjectLink `json:"link,omitempty"` + + // ChunkSize is the maximum size of each chunk in bytes. If not specified, + // the default is 128k. + ChunkSize uint32 `json:"max_chunk_size,omitempty"` + } + + // ObjectMeta is high level information about an object. + ObjectMeta struct { + // Name is the name of the object. The name is required when adding an + // object and has to be unique within the object store. + Name string `json:"name"` + + // Description is an optional description for the object. + Description string `json:"description,omitempty"` + + // Headers is an optional set of user-defined headers for the object. + Headers nats.Header `json:"headers,omitempty"` + + // Metadata is the user supplied metadata for the object. + Metadata map[string]string `json:"metadata,omitempty"` + + // Additional options for the object. + Opts *ObjectMetaOptions `json:"options,omitempty"` + } + + // ObjectInfo contains ObjectMeta and additional information about an + // object. + ObjectInfo struct { + // ObjectMeta contains high level information about the object. + ObjectMeta + + // Bucket is the name of the object store. + Bucket string `json:"bucket"` + + // NUID is the unique identifier for the object set when putting the + // object into the store. + NUID string `json:"nuid"` + + // Size is the size of the object in bytes. It only includes the size of + // the object itself, not the metadata. + Size uint64 `json:"size"` + + // ModTime is the last modification time of the object. + ModTime time.Time `json:"mtime"` + + // Chunks is the number of chunks the object is split into. Maximum size + // of each chunk can be specified in ObjectMetaOptions. + Chunks uint32 `json:"chunks"` + + // Digest is the SHA-256 digest of the object. It is used to verify the + // integrity of the object. + Digest string `json:"digest,omitempty"` + + // Deleted indicates if the object is marked as deleted. + Deleted bool `json:"deleted,omitempty"` + } + + // ObjectLink is used to embed links to other buckets and objects. + ObjectLink struct { + // Bucket is the name of the object store the link is pointing to. + Bucket string `json:"bucket"` + + // Name can be used to link to a single object. + // If empty means this is a link to the whole store, like a directory. + Name string `json:"name,omitempty"` + } + + // ObjectResult will return the object info and a reader to read the object's + // contents. The reader will be closed when all data has been read or an + // error occurs. + ObjectResult interface { + io.ReadCloser + Info() (*ObjectInfo, error) + Error() error + } + + // GetObjectOpt is used to set additional options when getting an object. + GetObjectOpt func(opts *getObjectOpts) error + + // GetObjectInfoOpt is used to set additional options when getting object info. + GetObjectInfoOpt func(opts *getObjectInfoOpts) error + + // ListObjectsOpt is used to set additional options when listing objects. + ListObjectsOpt func(opts *listObjectOpts) error + + getObjectOpts struct { + // Include deleted object in the result. + showDeleted bool + } + + getObjectInfoOpts struct { + // Include deleted object in the result. + showDeleted bool + } + + listObjectOpts struct { + // Include deleted objects in the result channel. + showDeleted bool + } + + obs struct { + name string + streamName string + stream Stream + pushJS nats.JetStreamContext + js *jetStream + } + + // ObjectResult impl. + objResult struct { + sync.Mutex + info *ObjectInfo + r io.ReadCloser + err error + ctx context.Context + digest hash.Hash + } +) + +const ( + objNameTmpl = "OBJ_%s" // OBJ_ // stream name + objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject + objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject + objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject + objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject + objNoPending = "0" + objDefaultChunkSize = uint32(128 * 1024) // 128k + objDigestType = "SHA-256=" + objDigestTmpl = objDigestType + "%s" +) + +func (js *jetStream) CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { + scfg, err := js.prepareObjectStoreConfig(cfg) + if err != nil { + return nil, err + } + + stream, err := js.CreateStream(ctx, scfg) + if err != nil { + if errors.Is(err, ErrStreamNameAlreadyInUse) { + // errors are joined so that backwards compatibility is retained + // and previous checks for ErrStreamNameAlreadyInUse will still work. + err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err) + } + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil +} + +func (js *jetStream) UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { + scfg, err := js.prepareObjectStoreConfig(cfg) + if err != nil { + return nil, err + } + + // Attempt to update the stream. + stream, err := js.UpdateStream(ctx, scfg) + if err != nil { + if errors.Is(err, ErrStreamNotFound) { + return nil, fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket) + } + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil +} + +func (js *jetStream) CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) { + scfg, err := js.prepareObjectStoreConfig(cfg) + if err != nil { + return nil, err + } + + stream, err := js.CreateOrUpdateStream(ctx, scfg) + if err != nil { + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + + return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil +} + +func (js *jetStream) prepareObjectStoreConfig(cfg ObjectStoreConfig) (StreamConfig, error) { + if !validBucketRe.MatchString(cfg.Bucket) { + return StreamConfig{}, ErrInvalidStoreName + } + + name := cfg.Bucket + chunks := fmt.Sprintf(objAllChunksPreTmpl, name) + meta := fmt.Sprintf(objAllMetaPreTmpl, name) + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + // See kv + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + var compression StoreCompression + if cfg.Compression { + compression = S2Compression + } + scfg := StreamConfig{ + Name: fmt.Sprintf(objNameTmpl, name), + Description: cfg.Description, + Subjects: []string{chunks, meta}, + MaxAge: cfg.TTL, + MaxBytes: maxBytes, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + Discard: DiscardNew, + AllowRollup: true, + AllowDirect: true, + Metadata: cfg.Metadata, + Compression: compression, + } + + return scfg, nil +} + +// ObjectStore will look up and bind to an existing object store instance. +func (js *jetStream) ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) { + if !validBucketRe.MatchString(bucket) { + return nil, ErrInvalidStoreName + } + + streamName := fmt.Sprintf(objNameTmpl, bucket) + stream, err := js.Stream(ctx, streamName) + if err != nil { + if errors.Is(err, ErrStreamNotFound) { + err = ErrBucketNotFound + } + return nil, err + } + pushJS, err := js.legacyJetStream() + if err != nil { + return nil, err + } + return mapStreamToObjectStore(js, pushJS, bucket, stream), nil +} + +// DeleteObjectStore will delete the underlying stream for the named object. +func (js *jetStream) DeleteObjectStore(ctx context.Context, bucket string) error { + if !validBucketRe.MatchString(bucket) { + return ErrInvalidStoreName + } + stream := fmt.Sprintf(objNameTmpl, bucket) + if err := js.DeleteStream(ctx, stream); err != nil { + if errors.Is(err, ErrStreamNotFound) { + err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err) + } + return err + } + return nil +} + +func encodeName(name string) string { + return base64.URLEncoding.EncodeToString([]byte(name)) +} + +// Put will place the contents from the reader into this object-store. +func (obs *obs) Put(ctx context.Context, meta ObjectMeta, r io.Reader) (*ObjectInfo, error) { + if meta.Name == "" { + return nil, ErrBadObjectMeta + } + + if meta.Opts == nil { + meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} + } else if meta.Opts.Link != nil { + return nil, ErrLinkNotAllowed + } else if meta.Opts.ChunkSize == 0 { + meta.Opts.ChunkSize = objDefaultChunkSize + } + + // Create the new nuid so chunks go on a new subject if the name is re-used + newnuid := nuid.Next() + + // These will be used in more than one place + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) + + // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem + // Chunks on the old nuid can be cleaned up at the end + einfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name + if err != nil && err != ErrObjectNotFound { + return nil, err + } + + // For async error handling + var perr error + var mu sync.Mutex + setErr := func(err error) { + mu.Lock() + defer mu.Unlock() + perr = err + } + getErr := func() error { + mu.Lock() + defer mu.Unlock() + return perr + } + + // Create our own JS context to handle errors etc. + pubJS, err := New(obs.js.conn, WithPublishAsyncErrHandler(func(js JetStream, _ *nats.Msg, err error) { setErr(err) })) + if err != nil { + return nil, err + } + + defer pubJS.(*jetStream).cleanupReplySub() + + purgePartial := func() { + // wait until all pubs are complete or up to default timeout before attempting purge + select { + case <-pubJS.PublishAsyncComplete(): + case <-ctx.Done(): + } + _ = obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) + } + + m, h := nats.NewMsg(chunkSubj), sha256.New() + chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) + + // set up the info object. The chunk upload sets the size and digest + info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: meta} + + for r != nil { + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = nats.ErrTimeout + } + default: + } + if err != nil { + purgePartial() + return nil, err + } + } + + // Actual read. + // TODO(dlc) - Deadline? + n, readErr := r.Read(chunk) + + // Handle all non EOF errors + if readErr != nil && readErr != io.EOF { + purgePartial() + return nil, readErr + } + + // Add chunk only if we received data + if n > 0 { + // Chunk processing. + m.Data = chunk[:n] + h.Write(m.Data) + + // Send msg itself. + if _, err := pubJS.PublishMsgAsync(m); err != nil { + purgePartial() + return nil, err + } + if err := getErr(); err != nil { + purgePartial() + return nil, err + } + // Update totals. + sent++ + total += uint64(n) + } + + // EOF Processing. + if readErr == io.EOF { + // Place meta info. + info.Size, info.Chunks = uint64(total), uint32(sent) + info.Digest = GetObjectDigestValue(h) + break + } + } + + // Prepare the meta message + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) + mm := nats.NewMsg(metaSubj) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data, err = json.Marshal(info) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Publish the meta message. + _, err = pubJS.PublishMsgAsync(mm) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Wait for all to be processed. + select { + case <-pubJS.PublishAsyncComplete(): + if err := getErr(); err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + case <-ctx.Done(): + return nil, nats.ErrTimeout + } + + info.ModTime = time.Now().UTC() // This time is not actually the correct time + + // Delete any original chunks. + if einfo != nil && !einfo.Deleted { + echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) + _ = obs.stream.Purge(ctx, WithPurgeSubject(echunkSubj)) + } + + // TODO would it be okay to do this to return the info with the correct time? + // With the understanding that it is an extra call to the server. + // Otherwise the time the user gets back is the client time, not the server time. + // return obs.GetInfo(info.Name) + + return info, nil +} + +// GetObjectDigestValue calculates the base64 value of hashed data +func GetObjectDigestValue(data hash.Hash) string { + sha := data.Sum(nil) + return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) +} + +// DecodeObjectDigest decodes base64 hash +func DecodeObjectDigest(data string) ([]byte, error) { + digest := strings.SplitN(data, "=", 2) + if len(digest) != 2 { + return nil, ErrInvalidDigestFormat + } + return base64.URLEncoding.DecodeString(digest[1]) +} + +func (info *ObjectInfo) isLink() bool { + return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil +} + +// Get will pull the object from the underlying stream. +func (obs *obs) Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) { + ctx, cancel := obs.js.wrapContextWithoutDeadline(ctx) + var o getObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return nil, err + } + } + } + infoOpts := make([]GetObjectInfoOpt, 0) + if o.showDeleted { + infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) + } + + // Grab meta info. + info, err := obs.GetInfo(ctx, name, infoOpts...) + if err != nil { + return nil, err + } + if info.NUID == "" { + return nil, ErrBadObjectMeta + } + + // Check for object links. If single objects we do a pass through. + if info.isLink() { + if info.ObjectMeta.Opts.Link.Name == "" { + return nil, ErrCantGetBucket + } + + // is the link in the same bucket? + lbuck := info.ObjectMeta.Opts.Link.Bucket + if lbuck == obs.name { + return obs.Get(ctx, info.ObjectMeta.Opts.Link.Name) + } + + // different bucket + lobs, err := obs.js.ObjectStore(ctx, lbuck) + if err != nil { + return nil, err + } + return lobs.Get(ctx, info.ObjectMeta.Opts.Link.Name) + } + + result := &objResult{info: info, ctx: ctx} + if info.Size == 0 { + return result, nil + } + + pr, pw := net.Pipe() + result.r = pr + + gotErr := func(m *nats.Msg, err error) { + pw.Close() + m.Sub.Unsubscribe() + result.setErr(err) + } + + // For calculating sum256 + result.digest = sha256.New() + + processChunk := func(m *nats.Msg) { + var err error + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = nats.ErrTimeout + } + default: + } + if err != nil { + gotErr(m, err) + return + } + } + + tokens, err := parser.GetMetadataFields(m.Reply) + if err != nil { + gotErr(m, err) + return + } + + // Write to our pipe. + for b := m.Data; len(b) > 0; { + n, err := pw.Write(b) + if err != nil { + gotErr(m, err) + return + } + b = b[n:] + } + // Update sha256 + result.digest.Write(m.Data) + + // Check if we are done. + if tokens[parser.AckNumPendingTokenPos] == objNoPending { + pw.Close() + m.Sub.Unsubscribe() + } + } + + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + streamName := fmt.Sprintf(objNameTmpl, obs.name) + subscribeOpts := []nats.SubOpt{ + nats.OrderedConsumer(), + nats.Context(ctx), + nats.BindStream(streamName), + } + sub, err := obs.pushJS.Subscribe(chunkSubj, processChunk, subscribeOpts...) + if err != nil { + return nil, err + } + sub.SetClosedHandler(func(subject string) { + if cancel != nil { + cancel() + } + }) + + return result, nil +} + +// Delete will delete the object. +func (obs *obs) Delete(ctx context.Context, name string) error { + // Grab meta info. + info, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) + if err != nil { + return err + } + if info.NUID == "" { + return ErrBadObjectMeta + } + + // Place a rollup delete marker and publish the info + info.Deleted = true + info.Size, info.Chunks, info.Digest = 0, 0, "" + + if err = publishMeta(ctx, info, obs.js); err != nil { + return err + } + + // Purge chunks for the object. + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + return obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj)) +} + +func publishMeta(ctx context.Context, info *ObjectInfo, js *jetStream) error { + // marshal the object into json, don't store an actual time + info.ModTime = time.Time{} + data, err := json.Marshal(info) + if err != nil { + return err + } + + // Prepare and publish the message. + mm := nats.NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data = data + if _, err := js.PublishMsg(ctx, mm); err != nil { + return err + } + + // set the ModTime in case it's returned to the user, even though it's not the correct time. + info.ModTime = time.Now().UTC() + return nil +} + +// AddLink will add a link to another object if it's not deleted and not another link +// name is the name of this link object +// obj is what is being linked too +func (obs *obs) AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + + // TODO Handle stale info + + if obj == nil || obj.Name == "" { + return nil, ErrObjectRequired + } + if obj.Deleted { + return nil, ErrNoLinkToDeleted + } + if obj.isLink() { + return nil, ErrNoLinkToLink + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, + } + info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} + + // put the link object + if err = publishMeta(ctx, info, obs.js); err != nil { + return nil, err + } + + return info, nil +} + +// AddBucketLink will add a link to another object store. +func (ob *obs) AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + if bucket == nil { + return nil, ErrBucketRequired + } + bos, ok := bucket.(*obs) + if !ok { + return nil, ErrBucketMalformed + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := ob.GetInfo(ctx, name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, + } + info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} + + // put the link object + err = publishMeta(ctx, info, ob.js) + if err != nil { + return nil, err + } + + return info, nil +} + +// PutBytes is convenience function to put a byte slice into this object store. +func (obs *obs) PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) { + return obs.Put(ctx, ObjectMeta{Name: name}, bytes.NewReader(data)) +} + +// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. +func (obs *obs) GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) { + result, err := obs.Get(ctx, name, opts...) + if err != nil { + return nil, err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// PutString is convenience function to put a string into this object store. +func (obs *obs) PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) { + return obs.Put(ctx, ObjectMeta{Name: name}, strings.NewReader(data)) +} + +// GetString is a convenience function to pull an object from this object store and return it as a string. +func (obs *obs) GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) { + result, err := obs.Get(ctx, name, opts...) + if err != nil { + return "", err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return "", err + } + return b.String(), nil +} + +// PutFile is convenience function to put a file into an object store. +func (obs *obs) PutFile(ctx context.Context, file string) (*ObjectInfo, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return obs.Put(ctx, ObjectMeta{Name: file}, f) +} + +// GetFile is a convenience function to pull and object and place in a file. +func (obs *obs) GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error { + // Expect file to be new. + f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + result, err := obs.Get(ctx, name, opts...) + if err != nil { + os.Remove(f.Name()) + return err + } + defer result.Close() + + // Stream copy to the file. + _, err = io.Copy(f, result) + return err +} + +// GetInfo will retrieve the current information for the object. +func (obs *obs) GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { + // Grab last meta value we have. + if name == "" { + return nil, ErrNameRequired + } + var o getObjectInfoOpts + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return nil, err + } + } + } + + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call + + m, err := obs.stream.GetLastMsgForSubject(ctx, metaSubj) + if err != nil { + if errors.Is(err, ErrMsgNotFound) { + err = ErrObjectNotFound + } + if errors.Is(err, ErrStreamNotFound) { + err = ErrBucketNotFound + } + return nil, err + } + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return nil, ErrBadObjectMeta + } + if !o.showDeleted && info.Deleted { + return nil, ErrObjectNotFound + } + info.ModTime = m.Time + return &info, nil +} + +// UpdateMeta will update the meta for the object. +func (obs *obs) UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error { + // Grab the current meta. + info, err := obs.GetInfo(ctx, name) + if err != nil { + if errors.Is(err, ErrObjectNotFound) { + return ErrUpdateMetaDeleted + } + return err + } + + // If the new name is different from the old, and it exists, error + // If there was an error that was not ErrObjectNotFound, error. + if name != meta.Name { + existingInfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return err + } + if err == nil && !existingInfo.Deleted { + return ErrObjectAlreadyExists + } + } + + // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) + // These should only be updated internally when appropriate. + info.Name = meta.Name + info.Description = meta.Description + info.Headers = meta.Headers + info.Metadata = meta.Metadata + + // Prepare the meta message + if err = publishMeta(ctx, info, obs.js); err != nil { + return err + } + + // did the name of this object change? We just stored the meta under the new name + // so delete the meta from the old name via purge stream for subject + if name != meta.Name { + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) + return obs.stream.Purge(ctx, WithPurgeSubject(metaSubj)) + } + + return nil +} + +// Seal will seal the object store, no further modifications will be allowed. +func (obs *obs) Seal(ctx context.Context) error { + si, err := obs.stream.Info(ctx) + if err != nil { + return err + } + // Seal the stream from being able to take on more messages. + cfg := si.Config + cfg.Sealed = true + _, err = obs.js.UpdateStream(ctx, cfg) + return err +} + +// Implementation for Watch +type objWatcher struct { + updates chan *ObjectInfo + sub *nats.Subscription +} + +// Updates returns the interior channel. +func (w *objWatcher) Updates() <-chan *ObjectInfo { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *objWatcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +// Watch for changes in the underlying store and receive meta information updates. +func (obs *obs) Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) { + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + var initDoneMarker bool + + w := &objWatcher{updates: make(chan *ObjectInfo, 32)} + + update := func(m *nats.Msg) { + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return // TODO(dlc) - Communicate this upwards? + } + meta, err := m.Metadata() + if err != nil { + return + } + + if !o.ignoreDeletes || !info.Deleted { + info.ModTime = meta.Timestamp + w.updates <- &info + } + + // if UpdatesOnly is set, no not send nil to the channel + // as it would always be triggered after initializing the watcher + if !initDoneMarker && meta.NumPending == 0 { + initDoneMarker = true + w.updates <- nil + } + } + + allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) + _, err := obs.stream.GetLastMsgForSubject(ctx, allMeta) + // if there are no messages on the stream and we are not watching + // updates only, send nil to the channel to indicate that the initial + // watch is done + if !o.updatesOnly { + if errors.Is(err, ErrMsgNotFound) { + initDoneMarker = true + w.updates <- nil + } + } else { + // if UpdatesOnly was used, mark initialization as complete + initDoneMarker = true + } + + // Used ordered consumer to deliver results. + streamName := fmt.Sprintf(objNameTmpl, obs.name) + subOpts := []nats.SubOpt{nats.OrderedConsumer(), nats.BindStream(streamName)} + if !o.includeHistory { + subOpts = append(subOpts, nats.DeliverLastPerSubject()) + } + if o.updatesOnly { + subOpts = append(subOpts, nats.DeliverNew()) + } + subOpts = append(subOpts, nats.Context(ctx)) + sub, err := obs.pushJS.Subscribe(allMeta, update, subOpts...) + if err != nil { + return nil, err + } + sub.SetClosedHandler(func(_ string) { + close(w.updates) + }) + w.sub = sub + return w, nil +} + +// List will list all the objects in this store. +func (obs *obs) List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) { + var o listObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return nil, err + } + } + } + watchOpts := make([]WatchOpt, 0) + if !o.showDeleted { + watchOpts = append(watchOpts, IgnoreDeletes()) + } + watcher, err := obs.Watch(ctx, watchOpts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var objs []*ObjectInfo + updates := watcher.Updates() +Updates: + for { + select { + case entry := <-updates: + if entry == nil { + break Updates + } + objs = append(objs, entry) + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if len(objs) == 0 { + return nil, ErrNoObjectsFound + } + return objs, nil +} + +// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus +type ObjectBucketStatus struct { + nfo *StreamInfo + bucket string +} + +// Bucket is the name of the bucket +func (s *ObjectBucketStatus) Bucket() string { return s.bucket } + +// Description is the description supplied when creating the bucket +func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } + +// TTL indicates how long objects are kept in the bucket +func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } + +// Storage indicates the underlying JetStream storage technology used to store data +func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } + +// Replicas indicates how many storage replicas are kept for the data in the bucket +func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } + +// Sealed indicates the stream is sealed and cannot be modified in any way +func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } + +// Size is the combined size of all data in the bucket including metadata, in bytes +func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } + +// Metadata is the metadata supplied when creating the bucket +func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata } + +// StreamInfo is the stream info retrieved to create the status +func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } + +// IsCompressed indicates if the data is compressed on disk +func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression } + +// Status retrieves run-time status about a bucket +func (obs *obs) Status(ctx context.Context) (ObjectStoreStatus, error) { + nfo, err := obs.stream.Info(ctx) + if err != nil { + return nil, err + } + + status := &ObjectBucketStatus{ + nfo: nfo, + bucket: obs.name, + } + + return status, nil +} + +// Read impl. +func (o *objResult) Read(p []byte) (n int, err error) { + o.Lock() + defer o.Unlock() + readDeadline := time.Now().Add(defaultAPITimeout) + if ctx := o.ctx; ctx != nil { + if deadline, ok := ctx.Deadline(); ok { + readDeadline = deadline + } + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + o.err = ctx.Err() + } else { + o.err = nats.ErrTimeout + } + default: + } + } + if o.err != nil { + return 0, o.err + } + if o.r == nil { + return 0, io.EOF + } + + r := o.r.(net.Conn) + _ = r.SetReadDeadline(readDeadline) + n, err = r.Read(p) + if err, ok := err.(net.Error); ok && err.Timeout() { + if ctx := o.ctx; ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + return 0, ctx.Err() + } else { + return 0, nats.ErrTimeout + } + default: + err = nil + } + } + } + if err == io.EOF { + // Make sure the digest matches. + sha := o.digest.Sum(nil) + rsha, decodeErr := DecodeObjectDigest(o.info.Digest) + if decodeErr != nil { + o.err = decodeErr + return 0, o.err + } + if !bytes.Equal(sha[:], rsha) { + o.err = ErrDigestMismatch + return 0, o.err + } + } + return n, err +} + +// Close impl. +func (o *objResult) Close() error { + o.Lock() + defer o.Unlock() + if o.r == nil { + return nil + } + return o.r.Close() +} + +func (o *objResult) setErr(err error) { + o.Lock() + defer o.Unlock() + o.err = err +} + +func (o *objResult) Info() (*ObjectInfo, error) { + o.Lock() + defer o.Unlock() + return o.info, o.err +} + +func (o *objResult) Error() error { + o.Lock() + defer o.Unlock() + return o.err +} + +// ObjectStoreNames is used to retrieve a list of bucket names +func (js *jetStream) ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister { + res := &obsLister{ + obsNames: make(chan string), + } + l := &streamLister{js: js} + streamsReq := streamsRequest{ + Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), + } + + go func() { + defer close(res.obsNames) + for { + page, err := l.streamNames(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + res.err = err + return + } + for _, name := range page { + if !strings.HasPrefix(name, "OBJ_") { + continue + } + res.obsNames <- strings.TrimPrefix(name, "OBJ_") + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return res +} + +// ObjectStores is used to retrieve a list of bucket statuses +func (js *jetStream) ObjectStores(ctx context.Context) ObjectStoresLister { + res := &obsLister{ + obs: make(chan ObjectStoreStatus), + } + l := &streamLister{js: js} + streamsReq := streamsRequest{ + Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"), + } + go func() { + defer close(res.obs) + for { + page, err := l.streamInfos(ctx, streamsReq) + if err != nil && !errors.Is(err, ErrEndOfData) { + res.err = err + return + } + for _, info := range page { + if !strings.HasPrefix(info.Config.Name, "OBJ_") { + continue + } + res.obs <- &ObjectBucketStatus{ + nfo: info, + bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), + } + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return res +} + +type obsLister struct { + obs chan ObjectStoreStatus + obsNames chan string + err error +} + +func (ol *obsLister) Status() <-chan ObjectStoreStatus { + return ol.obs +} + +func (ol *obsLister) Name() <-chan string { + return ol.obsNames +} + +func (ol *obsLister) Error() error { + return ol.err +} + +func mapStreamToObjectStore(js *jetStream, pushJS nats.JetStreamContext, bucket string, stream Stream) *obs { + info := stream.CachedInfo() + + obs := &obs{ + name: bucket, + js: js, + pushJS: pushJS, + streamName: info.Config.Name, + stream: stream, + } + + return obs +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/object_options.go b/vendor/github.com/nats-io/nats.go/jetstream/object_options.go new file mode 100644 index 000000000..df583646c --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/object_options.go @@ -0,0 +1,41 @@ +// Copyright 2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +// GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was +// marked as deleted. +func GetObjectShowDeleted() GetObjectOpt { + return func(opts *getObjectOpts) error { + opts.showDeleted = true + return nil + } +} + +// GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event +// if it was marked as deleted. +func GetObjectInfoShowDeleted() GetObjectInfoOpt { + return func(opts *getObjectInfoOpts) error { + opts.showDeleted = true + return nil + } +} + +// ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted +// objects. +func ListObjectsShowDeleted() ListObjectsOpt { + return func(opts *listObjectOpts) error { + opts.showDeleted = true + return nil + } +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/ordered.go b/vendor/github.com/nats-io/nats.go/jetstream/ordered.go new file mode 100644 index 000000000..8d79bc42d --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/ordered.go @@ -0,0 +1,810 @@ +// Copyright 2022-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nats.go" +) + +type ( + orderedConsumer struct { + js *jetStream + cfg *OrderedConsumerConfig + stream string + currentConsumer *pullConsumer + currentSub *pullSubscription + cursor cursor + namePrefix string + serial int + consumerType consumerType + doReset chan struct{} + resetInProgress atomic.Uint32 + userErrHandler ConsumeErrHandlerFunc + stopAfter int + stopAfterMsgsLeft chan int + withStopAfter bool + runningFetch *fetchResult + subscription *orderedSubscription + sync.Mutex + } + + orderedSubscription struct { + consumer *orderedConsumer + opts []PullMessagesOpt + done chan struct{} + closed atomic.Uint32 + } + + cursor struct { + streamSeq uint64 + deliverSeq uint64 + } + + consumerType int +) + +const ( + consumerTypeNotSet consumerType = iota + consumerTypeConsume + consumerTypeFetch +) + +var ( + errOrderedSequenceMismatch = errors.New("sequence mismatch") + errOrderedConsumerClosed = errors.New("ordered consumer closed") +) + +// Consume can be used to continuously receive messages and handle them +// with the provided callback function. Consume cannot be used concurrently +// when using ordered consumer. +// +// See [Consumer.Consume] for more details. +func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { + if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { + err := c.reset() + if err != nil { + return nil, err + } + } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { + return nil, ErrOrderedConsumerConcurrentRequests + } + if c.consumerType == consumerTypeFetch { + return nil, ErrOrderConsumerUsedAsFetch + } + c.consumerType = consumerTypeConsume + consumeOpts, err := parseConsumeOpts(true, opts...) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) + } + c.userErrHandler = consumeOpts.ErrHandler + opts = append(opts, consumeReconnectNotify(), + ConsumeErrHandler(c.errHandler(c.serial))) + if consumeOpts.StopAfter > 0 { + c.withStopAfter = true + c.stopAfter = consumeOpts.StopAfter + } + c.stopAfterMsgsLeft = make(chan int, 1) + if c.stopAfter > 0 { + opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) + } + sub := &orderedSubscription{ + consumer: c, + done: make(chan struct{}, 1), + } + c.subscription = sub + internalHandler := func(serial int) func(msg Msg) { + return func(msg Msg) { + // handler is a noop if message was delivered for a consumer with different serial + if serial != c.serial { + return + } + meta, err := msg.Metadata() + if err != nil { + c.errHandler(serial)(c.currentSub, err) + return + } + dseq := meta.Sequence.Consumer + if dseq != c.cursor.deliverSeq+1 { + c.errHandler(serial)(sub, errOrderedSequenceMismatch) + return + } + c.cursor.deliverSeq = dseq + c.cursor.streamSeq = meta.Sequence.Stream + handler(msg) + } + } + + cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...) + if err != nil { + return nil, err + } + c.currentSub = cc.(*pullSubscription) + + go func() { + for { + select { + case <-c.doReset: + if err := c.reset(); err != nil { + if errors.Is(err, errOrderedConsumerClosed) { + continue + } + c.errHandler(c.serial)(c.currentSub, err) + } + if c.withStopAfter { + select { + case c.stopAfter = <-c.stopAfterMsgsLeft: + default: + } + if c.stopAfter <= 0 { + sub.Stop() + return + } + } + if c.stopAfter > 0 { + opts = opts[:len(opts)-2] + } else { + opts = opts[:len(opts)-1] + } + + // overwrite the previous err handler to use the new serial + opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial))) + if c.withStopAfter { + opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) + } + if cc, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil { + c.errHandler(c.serial)(cc, err) + } else { + c.Lock() + c.currentSub = cc.(*pullSubscription) + c.Unlock() + } + case <-sub.done: + s := sub.consumer.currentSub + if s != nil { + sub.consumer.Lock() + s.Stop() + sub.consumer.Unlock() + } + return + case msgsLeft, ok := <-c.stopAfterMsgsLeft: + if !ok { + close(sub.done) + } + c.stopAfter = msgsLeft + return + } + } + }() + return sub, nil +} + +func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) { + return func(cc ConsumeContext, err error) { + c.Lock() + defer c.Unlock() + if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) && !errors.Is(err, errConnected) { + c.userErrHandler(cc, err) + } + if errors.Is(err, ErrNoHeartbeat) || + errors.Is(err, errOrderedSequenceMismatch) || + errors.Is(err, ErrConsumerDeleted) || + errors.Is(err, errConnected) || + errors.Is(err, nats.ErrNoResponders) { + // only reset if serial matches the current consumer serial and there is no reset in progress + if serial == c.serial && c.resetInProgress.Load() == 0 { + c.resetInProgress.Store(1) + c.doReset <- struct{}{} + } + } + } +} + +// Messages returns MessagesContext, allowing continuously iterating +// over messages on a stream. Messages cannot be used concurrently +// when using ordered consumer. +// +// See [Consumer.Messages] for more details. +func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { + if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil { + err := c.reset() + if err != nil { + return nil, err + } + } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil { + return nil, ErrOrderedConsumerConcurrentRequests + } + if c.consumerType == consumerTypeFetch { + return nil, ErrOrderConsumerUsedAsFetch + } + c.consumerType = consumerTypeConsume + consumeOpts, err := parseMessagesOpts(true, opts...) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) + } + opts = append(opts, + WithMessagesErrOnMissingHeartbeat(true), + messagesReconnectNotify()) + c.stopAfterMsgsLeft = make(chan int, 1) + if consumeOpts.StopAfter > 0 { + c.withStopAfter = true + c.stopAfter = consumeOpts.StopAfter + } + c.userErrHandler = consumeOpts.ErrHandler + if c.stopAfter > 0 { + opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft)) + } + cc, err := c.currentConsumer.Messages(opts...) + if err != nil { + return nil, err + } + c.currentSub = cc.(*pullSubscription) + + sub := &orderedSubscription{ + consumer: c, + opts: opts, + done: make(chan struct{}, 1), + } + c.subscription = sub + + return sub, nil +} + +func (s *orderedSubscription) Next() (Msg, error) { + for { + msg, err := s.consumer.currentSub.Next() + if err != nil { + if errors.Is(err, ErrMsgIteratorClosed) { + s.Stop() + return nil, err + } + if s.consumer.withStopAfter { + select { + case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft: + default: + } + if s.consumer.stopAfter <= 0 { + s.Stop() + return nil, ErrMsgIteratorClosed + } + s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter) + } + if err := s.consumer.reset(); err != nil { + if errors.Is(err, errOrderedConsumerClosed) { + return nil, ErrMsgIteratorClosed + } + return nil, err + } + cc, err := s.consumer.currentConsumer.Messages(s.opts...) + if err != nil { + return nil, err + } + s.consumer.currentSub = cc.(*pullSubscription) + continue + } + + meta, err := msg.Metadata() + if err != nil { + return nil, err + } + serial := serialNumberFromConsumer(meta.Consumer) + if serial != s.consumer.serial { + continue + } + dseq := meta.Sequence.Consumer + if dseq != s.consumer.cursor.deliverSeq+1 { + if err := s.consumer.reset(); err != nil { + if errors.Is(err, errOrderedConsumerClosed) { + return nil, ErrMsgIteratorClosed + } + return nil, err + } + cc, err := s.consumer.currentConsumer.Messages(s.opts...) + if err != nil { + return nil, err + } + s.consumer.currentSub = cc.(*pullSubscription) + continue + } + s.consumer.cursor.deliverSeq = dseq + s.consumer.cursor.streamSeq = meta.Sequence.Stream + return msg, nil + } +} + +func (s *orderedSubscription) Stop() { + if !s.closed.CompareAndSwap(0, 1) { + return + } + s.consumer.Lock() + defer s.consumer.Unlock() + if s.consumer.currentSub != nil { + s.consumer.currentSub.Stop() + } + close(s.done) +} + +func (s *orderedSubscription) Drain() { + if !s.closed.CompareAndSwap(0, 1) { + return + } + if s.consumer.currentSub != nil { + s.consumer.currentConsumer.Lock() + s.consumer.currentSub.Drain() + s.consumer.currentConsumer.Unlock() + } + close(s.done) +} + +// Closed returns a channel that is closed when the consuming is +// fully stopped/drained. When the channel is closed, no more messages +// will be received and processing is complete. +func (s *orderedSubscription) Closed() <-chan struct{} { + s.consumer.Lock() + defer s.consumer.Unlock() + closedCh := make(chan struct{}) + + go func() { + for { + s.consumer.Lock() + if s.consumer.currentSub == nil { + return + } + + closed := s.consumer.currentSub.Closed() + s.consumer.Unlock() + + // wait until the underlying pull consumer is closed + <-closed + // if the subscription is closed and ordered consumer is closed as well, + // send a signal that the Consume() is fully stopped + if s.closed.Load() == 1 { + close(closedCh) + return + } + } + }() + return closedCh +} + +// Fetch is used to retrieve up to a provided number of messages from a +// stream. This method will always send a single request and wait until +// either all messages are retrieved or request times out. +// +// It is not efficient to use Fetch with on an ordered consumer, as it will +// reset the consumer for each subsequent Fetch call. +// Consider using [Consumer.Consume] or [Consumer.Messages] instead. +func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { + c.Lock() + if c.consumerType == consumerTypeConsume { + c.Unlock() + return nil, ErrOrderConsumerUsedAsConsume + } + if c.runningFetch != nil { + if !c.runningFetch.closed() { + return nil, ErrOrderedConsumerConcurrentRequests + } + if c.runningFetch.sseq != 0 { + c.cursor.streamSeq = c.runningFetch.sseq + } + } + c.consumerType = consumerTypeFetch + sub := orderedSubscription{ + consumer: c, + done: make(chan struct{}), + } + c.subscription = &sub + c.Unlock() + err := c.reset() + if err != nil { + return nil, err + } + msgs, err := c.currentConsumer.Fetch(batch, opts...) + if err != nil { + return nil, err + } + c.runningFetch = msgs.(*fetchResult) + return msgs, nil +} + +// FetchBytes is used to retrieve up to a provided bytes from the +// stream. This method will always send a single request and wait until +// provided number of bytes is exceeded or request times out. +// +// It is not efficient to use FetchBytes with on an ordered consumer, as it will +// reset the consumer for each subsequent Fetch call. +// Consider using [Consumer.Consume] or [Consumer.Messages] instead. +func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { + c.Lock() + if c.consumerType == consumerTypeConsume { + c.Unlock() + return nil, ErrOrderConsumerUsedAsConsume + } + if c.runningFetch != nil { + if !c.runningFetch.closed() { + return nil, ErrOrderedConsumerConcurrentRequests + } + if c.runningFetch.sseq != 0 { + c.cursor.streamSeq = c.runningFetch.sseq + } + } + c.consumerType = consumerTypeFetch + sub := orderedSubscription{ + consumer: c, + done: make(chan struct{}), + } + c.subscription = &sub + c.Unlock() + err := c.reset() + if err != nil { + return nil, err + } + msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...) + if err != nil { + return nil, err + } + c.runningFetch = msgs.(*fetchResult) + return msgs, nil +} + +// FetchNoWait is used to retrieve up to a provided number of messages +// from a stream. This method will always send a single request and +// immediately return up to a provided number of messages or wait until +// at least one message is available or request times out. +// +// It is not efficient to use FetchNoWait with on an ordered consumer, as it will +// reset the consumer for each subsequent Fetch call. +// Consider using [Consumer.Consume] or [Consumer.Messages] instead. +func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) { + if c.consumerType == consumerTypeConsume { + return nil, ErrOrderConsumerUsedAsConsume + } + if c.runningFetch != nil && !c.runningFetch.done { + return nil, ErrOrderedConsumerConcurrentRequests + } + c.consumerType = consumerTypeFetch + sub := orderedSubscription{ + consumer: c, + done: make(chan struct{}), + } + c.subscription = &sub + err := c.reset() + if err != nil { + return nil, err + } + return c.currentConsumer.FetchNoWait(batch) +} + +// Next is used to retrieve the next message from the stream. This +// method will block until the message is retrieved or timeout is +// reached. +// +// It is not efficient to use Next with on an ordered consumer, as it will +// reset the consumer for each subsequent Fetch call. +// Consider using [Consumer.Consume] or [Consumer.Messages] instead. +func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) { + res, err := c.Fetch(1, opts...) + if err != nil { + return nil, err + } + msg := <-res.Messages() + if msg != nil { + return msg, nil + } + if res.Error() == nil { + return nil, nats.ErrTimeout + } + return nil, res.Error() +} + +func serialNumberFromConsumer(name string) int { + if len(name) == 0 { + return 0 + } + parts := strings.Split(name, "_") + if len(parts) < 2 { + return 0 + } + serial, err := strconv.Atoi(parts[len(parts)-1]) + if err != nil { + return 0 + } + return serial +} + +func (c *orderedConsumer) reset() error { + c.Lock() + defer c.Unlock() + defer c.resetInProgress.Store(0) + if c.currentConsumer != nil { + c.currentConsumer.Lock() + if c.currentSub != nil { + c.currentSub.Stop() + } + consName := c.currentConsumer.CachedInfo().Name + c.currentConsumer.Unlock() + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _ = c.js.DeleteConsumer(ctx, c.stream, consName) + cancel() + }() + } + + c.cursor.deliverSeq = 0 + consumerConfig := c.getConsumerConfig() + + var err error + var cons Consumer + + backoffOpts := backoffOpts{ + attempts: c.cfg.MaxResetAttempts, + initialInterval: time.Second, + factor: 2, + maxInterval: 10 * time.Second, + cancel: c.subscription.done, + } + err = retryWithBackoff(func(attempt int) (bool, error) { + isClosed := c.subscription.closed.Load() == 1 + if isClosed { + return false, errOrderedConsumerClosed + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + cons, err = c.js.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig) + if err != nil { + return true, err + } + return false, nil + }, backoffOpts) + if err != nil { + return err + } + c.currentConsumer = cons.(*pullConsumer) + return nil +} + +func (c *orderedConsumer) getConsumerConfig() *ConsumerConfig { + c.serial++ + var nextSeq uint64 + + // if stream sequence is not initialized, no message was consumed yet + // therefore, start from the beginning (either from 1 or from the provided sequence) + if c.cursor.streamSeq == 0 { + if c.cfg.OptStartSeq != 0 { + nextSeq = c.cfg.OptStartSeq + } else { + nextSeq = 1 + } + } else { + // otherwise, start from the next sequence + nextSeq = c.cursor.streamSeq + 1 + } + + if c.cfg.MaxResetAttempts == 0 { + c.cfg.MaxResetAttempts = -1 + } + name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial) + cfg := &ConsumerConfig{ + Name: name, + DeliverPolicy: DeliverByStartSequencePolicy, + OptStartSeq: nextSeq, + AckPolicy: AckNonePolicy, + InactiveThreshold: 5 * time.Minute, + Replicas: 1, + HeadersOnly: c.cfg.HeadersOnly, + MemoryStorage: true, + Metadata: c.cfg.Metadata, + } + if len(c.cfg.FilterSubjects) == 1 { + cfg.FilterSubject = c.cfg.FilterSubjects[0] + } else { + cfg.FilterSubjects = c.cfg.FilterSubjects + } + if c.cfg.InactiveThreshold != 0 { + cfg.InactiveThreshold = c.cfg.InactiveThreshold + } + + // if the cursor is not yet set, use the provided deliver policy + if c.cursor.streamSeq != 0 { + return cfg + } + + // initial request, some options may be modified at that point + cfg.DeliverPolicy = c.cfg.DeliverPolicy + if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy || + c.cfg.DeliverPolicy == DeliverLastPolicy || + c.cfg.DeliverPolicy == DeliverNewPolicy || + c.cfg.DeliverPolicy == DeliverAllPolicy { + + cfg.OptStartSeq = 0 + } else if c.cfg.DeliverPolicy == DeliverByStartTimePolicy { + cfg.OptStartSeq = 0 + cfg.OptStartTime = c.cfg.OptStartTime + } else { + cfg.OptStartSeq = c.cfg.OptStartSeq + } + + if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 { + cfg.FilterSubjects = []string{">"} + } + + return cfg +} + +func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt { + return pullOptFunc(func(opts *consumeOpts) error { + opts.StopAfter = numMsgs + opts.stopAfterMsgsLeft = msgsLeftAfterStop + return nil + }) +} + +func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt { + return pullOptFunc(func(opts *consumeOpts) error { + opts.StopAfter = numMsgs + opts.stopAfterMsgsLeft = msgsLeftAfterStop + return nil + }) +} + +func consumeReconnectNotify() PullConsumeOpt { + return pullOptFunc(func(opts *consumeOpts) error { + opts.notifyOnReconnect = true + return nil + }) +} + +func messagesReconnectNotify() PullMessagesOpt { + return pullOptFunc(func(opts *consumeOpts) error { + opts.notifyOnReconnect = true + return nil + }) +} + +// Info returns information about the ordered consumer. +// Note that this method will fetch the latest instance of the +// consumer from the server, which can be deleted by the library at any time. +func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) { + c.Lock() + defer c.Unlock() + if c.currentConsumer == nil { + return nil, ErrOrderedConsumerNotCreated + } + infoSubject := fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name) + var resp consumerInfoResponse + + if _, err := c.js.apiRequestJSON(ctx, infoSubject, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeConsumerNotFound { + return nil, ErrConsumerNotFound + } + return nil, resp.Error + } + if resp.Error == nil && resp.ConsumerInfo == nil { + return nil, ErrConsumerNotFound + } + + c.currentConsumer.info = resp.ConsumerInfo + return resp.ConsumerInfo, nil +} + +// CachedInfo returns cached information about the consumer currently +// used by the ordered consumer. Cached info will be updated on every call +// to [Consumer.Info] or on consumer reset. +func (c *orderedConsumer) CachedInfo() *ConsumerInfo { + c.Lock() + defer c.Unlock() + if c.currentConsumer == nil { + return nil + } + return c.currentConsumer.info +} + +type backoffOpts struct { + // total retry attempts + // -1 for unlimited + attempts int + // initial interval after which first retry will be performed + // defaults to 1s + initialInterval time.Duration + // determines whether first function execution should be performed immediately + disableInitialExecution bool + // multiplier on each attempt + // defaults to 2 + factor float64 + // max interval between retries + // after reaching this value, all subsequent + // retries will be performed with this interval + // defaults to 1 minute + maxInterval time.Duration + // custom backoff intervals + // if set, overrides all other options except attempts + // if attempts are set, then the last interval will be used + // for all subsequent retries after reaching the limit + customBackoff []time.Duration + // cancel channel + // if set, retry will be canceled when this channel is closed + cancel <-chan struct{} +} + +func retryWithBackoff(f func(int) (bool, error), opts backoffOpts) error { + var err error + var shouldContinue bool + // if custom backoff is set, use it instead of other options + if len(opts.customBackoff) > 0 { + if opts.attempts != 0 { + return errors.New("cannot use custom backoff intervals when attempts are set") + } + for i, interval := range opts.customBackoff { + select { + case <-opts.cancel: + return nil + case <-time.After(interval): + } + shouldContinue, err = f(i) + if !shouldContinue { + return err + } + } + return err + } + + // set default options + if opts.initialInterval == 0 { + opts.initialInterval = 1 * time.Second + } + if opts.factor == 0 { + opts.factor = 2 + } + if opts.maxInterval == 0 { + opts.maxInterval = 1 * time.Minute + } + if opts.attempts == 0 { + return errors.New("retry attempts have to be set when not using custom backoff intervals") + } + interval := opts.initialInterval + for i := 0; ; i++ { + if i == 0 && opts.disableInitialExecution { + time.Sleep(interval) + continue + } + shouldContinue, err = f(i) + if !shouldContinue { + return err + } + if opts.attempts > 0 && i >= opts.attempts-1 { + break + } + select { + case <-opts.cancel: + return nil + case <-time.After(interval): + } + interval = time.Duration(float64(interval) * opts.factor) + if interval >= opts.maxInterval { + interval = opts.maxInterval + } + } + return err +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/publish.go b/vendor/github.com/nats-io/nats.go/jetstream/publish.go new file mode 100644 index 000000000..2b8513143 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/publish.go @@ -0,0 +1,661 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nuid" +) + +type ( + asyncPublisherOpts struct { + // For async publish error handling. + aecb MsgErrHandler + // Max async pub ack in flight + maxpa int + // ackTimeout is the max time to wait for an ack. + ackTimeout time.Duration + } + + // PublishOpt are the options that can be passed to Publish methods. + PublishOpt func(*pubOpts) error + + pubOpts struct { + id string + lastMsgID string // Expected last msgId + stream string // Expected stream name + lastSeq *uint64 // Expected last sequence + lastSubjectSeq *uint64 // Expected last sequence per subject + ttl time.Duration // Message TTL + + // Publish retries for NoResponders err. + retryWait time.Duration // Retry wait between attempts + retryAttempts int // Retry attempts + + // stallWait is the max wait of a async pub ack. + stallWait time.Duration + + // internal option to re-use existing paf in case of retry. + pafRetry *pubAckFuture + } + + // PubAckFuture is a future for a PubAck. + // It can be used to wait for a PubAck or an error after an async publish. + PubAckFuture interface { + // Ok returns a receive only channel that can be used to get a PubAck. + Ok() <-chan *PubAck + + // Err returns a receive only channel that can be used to get the error from an async publish. + Err() <-chan error + + // Msg returns the message that was sent to the server. + Msg() *nats.Msg + } + + pubAckFuture struct { + jsClient *jetStreamClient + msg *nats.Msg + retries int + maxRetries int + retryWait time.Duration + ack *PubAck + err error + errCh chan error + doneCh chan *PubAck + reply string + timeout *time.Timer + } + + jetStreamClient struct { + asyncPublishContext + asyncPublisherOpts + } + + // MsgErrHandler is used to process asynchronous errors from JetStream + // PublishAsync. It will return the original message sent to the server for + // possible retransmitting and the error encountered. + MsgErrHandler func(JetStream, *nats.Msg, error) + + asyncPublishContext struct { + sync.RWMutex + replyPrefix string + replySub *nats.Subscription + acks map[string]*pubAckFuture + stallCh chan struct{} + doneCh chan struct{} + rr *rand.Rand + // channel to signal when server is disconnected or conn is closed + connStatusCh chan (nats.Status) + } + + pubAckResponse struct { + apiResponse + *PubAck + } + + // PubAck is an ack received after successfully publishing a message. + PubAck struct { + // Stream is the stream name the message was published to. + Stream string `json:"stream"` + + // Sequence is the stream sequence number of the message. + Sequence uint64 `json:"seq"` + + // Duplicate indicates whether the message was a duplicate. + // Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates]. + Duplicate bool `json:"duplicate,omitempty"` + + // Domain is the domain the message was published to. + Domain string `json:"domain,omitempty"` + } +) + +const ( + // Default time wait between retries on Publish if err is ErrNoResponders. + DefaultPubRetryWait = 250 * time.Millisecond + + // Default number of retries + DefaultPubRetryAttempts = 2 +) + +const ( + statusHdr = "Status" + + rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 +) + +// Publish performs a synchronous publish to a stream and waits for ack +// from server. It accepts subject name (which must be bound to a stream) +// and message payload. +func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) { + return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...) +} + +// PublishMsg performs a synchronous publish to a stream and waits for +// ack from server. It accepts subject name (which must be bound to a +// stream) and nats.Message. +func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) { + ctx, cancel := js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + o := pubOpts{ + retryWait: DefaultPubRetryWait, + retryAttempts: DefaultPubRetryAttempts, + } + if len(opts) > 0 { + if m.Header == nil { + m.Header = nats.Header{} + } + for _, opt := range opts { + if err := opt(&o); err != nil { + return nil, err + } + } + } + if o.stallWait > 0 { + return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption) + } + + if o.id != "" { + m.Header.Set(MsgIDHeader, o.id) + } + if o.lastMsgID != "" { + m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) + } + if o.stream != "" { + m.Header.Set(ExpectedStreamHeader, o.stream) + } + if o.lastSeq != nil { + m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) + } + if o.lastSubjectSeq != nil { + m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) + } + if o.ttl > 0 { + m.Header.Set(MsgTTLHeader, o.ttl.String()) + } + + var resp *nats.Msg + var err error + + resp, err = js.conn.RequestMsgWithContext(ctx, m) + + if err != nil { + for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ { + // To protect against small blips in leadership changes etc, if we get a no responders here retry. + select { + case <-ctx.Done(): + case <-time.After(o.retryWait): + } + resp, err = js.conn.RequestMsgWithContext(ctx, m) + } + if err != nil { + if errors.Is(err, nats.ErrNoResponders) { + return nil, ErrNoStreamResponse + } + return nil, err + } + } + + var ackResp pubAckResponse + if err := json.Unmarshal(resp.Data, &ackResp); err != nil { + return nil, ErrInvalidJSAck + } + if ackResp.Error != nil { + return nil, fmt.Errorf("nats: %w", ackResp.Error) + } + if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" { + return nil, ErrInvalidJSAck + } + return ackResp.PubAck, nil +} + +// PublishAsync performs an asynchronous publish to a stream and returns +// [PubAckFuture] interface. It accepts subject name (which must be bound +// to a stream) and message payload. +func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) { + return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...) +} + +// PublishMsgAsync performs an asynchronous publish to a stream and +// returns [PubAckFuture] interface. It accepts subject name (which must +// be bound to a stream) and nats.Message. +func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) { + o := pubOpts{ + retryWait: DefaultPubRetryWait, + retryAttempts: DefaultPubRetryAttempts, + } + if len(opts) > 0 { + if m.Header == nil { + m.Header = nats.Header{} + } + for _, opt := range opts { + if err := opt(&o); err != nil { + return nil, err + } + } + } + defaultStallWait := 200 * time.Millisecond + + stallWait := defaultStallWait + if o.stallWait > 0 { + stallWait = o.stallWait + } + + if o.id != "" { + m.Header.Set(MsgIDHeader, o.id) + } + if o.lastMsgID != "" { + m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID) + } + if o.stream != "" { + m.Header.Set(ExpectedStreamHeader, o.stream) + } + if o.lastSeq != nil { + m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10)) + } + if o.lastSubjectSeq != nil { + m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10)) + } + if o.ttl > 0 { + m.Header.Set(MsgTTLHeader, o.ttl.String()) + } + + paf := o.pafRetry + if paf == nil && m.Reply != "" { + return nil, ErrAsyncPublishReplySubjectSet + } + + var id string + var reply string + + // register new paf if not retrying + if paf == nil { + var err error + reply, err = js.newAsyncReply() + if err != nil { + return nil, fmt.Errorf("nats: error creating async reply handler: %s", err) + } + id = reply[js.opts.replyPrefixLen:] + paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait, reply: reply} + numPending, maxPending := js.registerPAF(id, paf) + + if maxPending > 0 && numPending > maxPending { + select { + case <-js.asyncStall(): + case <-time.After(stallWait): + js.clearPAF(id) + return nil, ErrTooManyStalledMsgs + } + } + if js.publisher.ackTimeout > 0 { + paf.timeout = time.AfterFunc(js.publisher.ackTimeout, func() { + js.publisher.Lock() + defer js.publisher.Unlock() + + if _, ok := js.publisher.acks[id]; !ok { + // paf has already been resolved + // while waiting for the lock + return + } + + // ack timed out, remove from pending acks + delete(js.publisher.acks, id) + + // check on anyone stalled and waiting. + if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa { + close(js.publisher.stallCh) + js.publisher.stallCh = nil + } + + // send error to user + paf.err = ErrAsyncPublishTimeout + if paf.errCh != nil { + paf.errCh <- paf.err + } + + // call error callback if set + if js.publisher.asyncPublisherOpts.aecb != nil { + js.publisher.asyncPublisherOpts.aecb(js, paf.msg, ErrAsyncPublishTimeout) + } + + // check on anyone one waiting on done status. + if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 { + close(js.publisher.doneCh) + js.publisher.doneCh = nil + } + }) + } + } else { + // when retrying, get the ID from existing reply subject + reply = paf.reply + if paf.timeout != nil { + paf.timeout.Reset(js.publisher.ackTimeout) + } + id = reply[js.opts.replyPrefixLen:] + } + + pubMsg := &nats.Msg{ + Subject: m.Subject, + Reply: reply, + Data: m.Data, + Header: m.Header, + } + if err := js.conn.PublishMsg(pubMsg); err != nil { + js.clearPAF(id) + return nil, err + } + + return paf, nil +} + +// For quick token lookup etc. +const ( + aReplyTokensize = 6 +) + +func (js *jetStream) newAsyncReply() (string, error) { + js.publisher.Lock() + if js.publisher.replySub == nil { + // Create our wildcard reply subject. + sha := sha256.New() + sha.Write([]byte(nuid.Next())) + b := sha.Sum(nil) + for i := 0; i < aReplyTokensize; i++ { + b[i] = rdigits[int(b[i]%base)] + } + js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.opts.replyPrefix, b[:aReplyTokensize]) + sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply) + if err != nil { + js.publisher.Unlock() + return "", err + } + js.publisher.replySub = sub + js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano())) + } + if js.publisher.connStatusCh == nil { + js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED) + go js.resetPendingAcksOnReconnect() + } + var sb strings.Builder + sb.WriteString(js.publisher.replyPrefix) + for { + rn := js.publisher.rr.Int63() + var b [aReplyTokensize]byte + for i, l := 0, rn; i < len(b); i++ { + b[i] = rdigits[l%base] + l /= base + } + if _, ok := js.publisher.acks[string(b[:])]; ok { + continue + } + sb.Write(b[:]) + break + } + + js.publisher.Unlock() + return sb.String(), nil +} + +// Handle an async reply from PublishAsync. +func (js *jetStream) handleAsyncReply(m *nats.Msg) { + if len(m.Subject) <= js.opts.replyPrefixLen { + return + } + id := m.Subject[js.opts.replyPrefixLen:] + + js.publisher.Lock() + + paf := js.getPAF(id) + if paf == nil { + js.publisher.Unlock() + return + } + + closeStc := func() { + // Check on anyone stalled and waiting. + if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.maxpa { + close(js.publisher.stallCh) + js.publisher.stallCh = nil + } + } + + closeDchFn := func() func() { + var dch chan struct{} + // Check on anyone one waiting on done status. + if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 { + dch = js.publisher.doneCh + js.publisher.doneCh = nil + } + // Return function to close done channel which + // should be deferred so that error is processed and + // can be checked. + return func() { + if dch != nil { + close(dch) + } + } + } + + doErr := func(err error) { + paf.err = err + if paf.errCh != nil { + paf.errCh <- paf.err + } + cb := js.publisher.asyncPublisherOpts.aecb + js.publisher.Unlock() + if cb != nil { + cb(js, paf.msg, err) + } + } + + if paf.timeout != nil { + paf.timeout.Stop() + } + + // Process no responders etc. + if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + if paf.retries < paf.maxRetries { + paf.retries++ + time.AfterFunc(paf.retryWait, func() { + js.publisher.Lock() + paf := js.getPAF(id) + js.publisher.Unlock() + if paf == nil { + return + } + _, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error { + po.pafRetry = paf + return nil + }) + if err != nil { + js.publisher.Lock() + doErr(err) + } + }) + js.publisher.Unlock() + return + } + delete(js.publisher.acks, id) + closeStc() + defer closeDchFn()() + doErr(ErrNoStreamResponse) + return + } + + // Remove + delete(js.publisher.acks, id) + closeStc() + defer closeDchFn()() + + var pa pubAckResponse + if err := json.Unmarshal(m.Data, &pa); err != nil { + doErr(ErrInvalidJSAck) + return + } + if pa.Error != nil { + doErr(pa.Error) + return + } + if pa.PubAck == nil || pa.PubAck.Stream == "" { + doErr(ErrInvalidJSAck) + return + } + + // So here we have received a proper puback. + paf.ack = pa.PubAck + if paf.doneCh != nil { + paf.doneCh <- paf.ack + } + js.publisher.Unlock() +} + +func (js *jetStream) resetPendingAcksOnReconnect() { + js.publisher.Lock() + connStatusCh := js.publisher.connStatusCh + js.publisher.Unlock() + for { + newStatus, ok := <-connStatusCh + if !ok || newStatus == nats.CLOSED { + return + } + js.publisher.Lock() + errCb := js.publisher.asyncPublisherOpts.aecb + for id, paf := range js.publisher.acks { + paf.err = nats.ErrDisconnected + if paf.errCh != nil { + paf.errCh <- paf.err + } + if errCb != nil { + defer errCb(js, paf.msg, nats.ErrDisconnected) + } + delete(js.publisher.acks, id) + } + if js.publisher.doneCh != nil { + close(js.publisher.doneCh) + js.publisher.doneCh = nil + } + js.publisher.Unlock() + } +} + +// registerPAF will register for a PubAckFuture. +func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) { + js.publisher.Lock() + if js.publisher.acks == nil { + js.publisher.acks = make(map[string]*pubAckFuture) + } + js.publisher.acks[id] = paf + np := len(js.publisher.acks) + maxpa := js.publisher.asyncPublisherOpts.maxpa + js.publisher.Unlock() + return np, maxpa +} + +// Lock should be held. +func (js *jetStream) getPAF(id string) *pubAckFuture { + if js.publisher.acks == nil { + return nil + } + return js.publisher.acks[id] +} + +// clearPAF will remove a PubAckFuture that was registered. +func (js *jetStream) clearPAF(id string) { + js.publisher.Lock() + delete(js.publisher.acks, id) + js.publisher.Unlock() +} + +func (js *jetStream) asyncStall() <-chan struct{} { + js.publisher.Lock() + if js.publisher.stallCh == nil { + js.publisher.stallCh = make(chan struct{}) + } + stc := js.publisher.stallCh + js.publisher.Unlock() + return stc +} + +func (paf *pubAckFuture) Ok() <-chan *PubAck { + paf.jsClient.Lock() + defer paf.jsClient.Unlock() + + if paf.doneCh == nil { + paf.doneCh = make(chan *PubAck, 1) + if paf.ack != nil { + paf.doneCh <- paf.ack + } + } + + return paf.doneCh +} + +func (paf *pubAckFuture) Err() <-chan error { + paf.jsClient.Lock() + defer paf.jsClient.Unlock() + + if paf.errCh == nil { + paf.errCh = make(chan error, 1) + if paf.err != nil { + paf.errCh <- paf.err + } + } + + return paf.errCh +} + +func (paf *pubAckFuture) Msg() *nats.Msg { + paf.jsClient.RLock() + defer paf.jsClient.RUnlock() + return paf.msg +} + +// PublishAsyncPending returns the number of async publishes outstanding +// for this context. +func (js *jetStream) PublishAsyncPending() int { + js.publisher.RLock() + defer js.publisher.RUnlock() + return len(js.publisher.acks) +} + +// PublishAsyncComplete returns a channel that will be closed when all +// outstanding asynchronously published messages are acknowledged by the +// server. +func (js *jetStream) PublishAsyncComplete() <-chan struct{} { + js.publisher.Lock() + defer js.publisher.Unlock() + if js.publisher.doneCh == nil { + js.publisher.doneCh = make(chan struct{}) + } + dch := js.publisher.doneCh + if len(js.publisher.acks) == 0 { + close(js.publisher.doneCh) + js.publisher.doneCh = nil + } + return dch +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/pull.go b/vendor/github.com/nats-io/nats.go/jetstream/pull.go new file mode 100644 index 000000000..2b37397d9 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/pull.go @@ -0,0 +1,1127 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "slices" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/internal/syncx" + "github.com/nats-io/nuid" +) + +type ( + // MessagesContext supports iterating over a messages on a stream. + // It is returned by [Consumer.Messages] method. + MessagesContext interface { + // Next retrieves next message on a stream. It will block until the next + // message is available. If the context is canceled, Next will return + // ErrMsgIteratorClosed error. + Next() (Msg, error) + + // Stop unsubscribes from the stream and cancels subscription. Calling + // Next after calling Stop will return ErrMsgIteratorClosed error. + // All messages that are already in the buffer are discarded. + Stop() + + // Drain unsubscribes from the stream and cancels subscription. All + // messages that are already in the buffer will be available on + // subsequent calls to Next. After the buffer is drained, Next will + // return ErrMsgIteratorClosed error. + Drain() + } + + // ConsumeContext supports processing incoming messages from a stream. + // It is returned by [Consumer.Consume] method. + ConsumeContext interface { + // Stop unsubscribes from the stream and cancels subscription. + // No more messages will be received after calling this method. + // All messages that are already in the buffer are discarded. + Stop() + + // Drain unsubscribes from the stream and cancels subscription. + // All messages that are already in the buffer will be processed in callback function. + Drain() + + // Closed returns a channel that is closed when the consuming is + // fully stopped/drained. When the channel is closed, no more messages + // will be received and processing is complete. + Closed() <-chan struct{} + } + + // MessageHandler is a handler function used as callback in [Consume]. + MessageHandler func(msg Msg) + + // PullConsumeOpt represent additional options used in [Consume] for pull consumers. + PullConsumeOpt interface { + configureConsume(*consumeOpts) error + } + + // PullMessagesOpt represent additional options used in [Messages] for pull consumers. + PullMessagesOpt interface { + configureMessages(*consumeOpts) error + } + + pullConsumer struct { + sync.Mutex + js *jetStream + stream string + durable bool + name string + info *ConsumerInfo + subs syncx.Map[string, *pullSubscription] + pinID string + } + + pullRequest struct { + Expires time.Duration `json:"expires,omitempty"` + Batch int `json:"batch,omitempty"` + MaxBytes int `json:"max_bytes,omitempty"` + NoWait bool `json:"no_wait,omitempty"` + Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` + MinPending int64 `json:"min_pending,omitempty"` + MinAckPending int64 `json:"min_ack_pending,omitempty"` + PinID string `json:"id,omitempty"` + Group string `json:"group,omitempty"` + } + + consumeOpts struct { + Expires time.Duration + MaxMessages int + MaxBytes int + LimitSize bool + MinPending int64 + MinAckPending int64 + Group string + Heartbeat time.Duration + ErrHandler ConsumeErrHandlerFunc + ReportMissingHeartbeats bool + ThresholdMessages int + ThresholdBytes int + StopAfter int + stopAfterMsgsLeft chan int + notifyOnReconnect bool + } + + ConsumeErrHandlerFunc func(consumeCtx ConsumeContext, err error) + + pullSubscription struct { + sync.Mutex + id string + consumer *pullConsumer + subscription *nats.Subscription + msgs chan *nats.Msg + errs chan error + pending pendingMsgs + hbMonitor *hbMonitor + fetchInProgress atomic.Uint32 + closed atomic.Uint32 + draining atomic.Uint32 + done chan struct{} + connStatusChanged chan nats.Status + fetchNext chan *pullRequest + consumeOpts *consumeOpts + delivered int + closedCh chan struct{} + } + + pendingMsgs struct { + msgCount int + byteCount int + } + + MessageBatch interface { + Messages() <-chan Msg + Error() error + } + + fetchResult struct { + sync.Mutex + msgs chan Msg + err error + done bool + sseq uint64 + } + + FetchOpt func(*pullRequest) error + + hbMonitor struct { + timer *time.Timer + sync.Mutex + } +) + +const ( + DefaultMaxMessages = 500 + DefaultExpires = 30 * time.Second + defaultBatchMaxBytesOnly = 1_000_000 + unset = -1 +) + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +// Consume can be used to continuously receive messages and handle them +// with the provided callback function. Consume cannot be used concurrently +// when using ordered consumer. +// +// See [Consumer.Consume] for more details. +func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) { + if handler == nil { + return nil, ErrHandlerRequired + } + consumeOpts, err := parseConsumeOpts(false, opts...) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) + } + + if len(p.info.Config.PriorityGroups) != 0 { + if consumeOpts.Group == "" { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is required for priority consumer") + } + + if !slices.Contains(p.info.Config.PriorityGroups, consumeOpts.Group) { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "invalid priority group") + } + } else if consumeOpts.Group != "" { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is not supported for this consumer") + } + + p.Lock() + + subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) + + consumeID := nuid.Next() + sub := &pullSubscription{ + id: consumeID, + consumer: p, + errs: make(chan error, 10), + done: make(chan struct{}, 1), + fetchNext: make(chan *pullRequest, 1), + consumeOpts: consumeOpts, + } + sub.connStatusChanged = p.js.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) + + sub.hbMonitor = sub.scheduleHeartbeatCheck(consumeOpts.Heartbeat) + + p.subs.Store(sub.id, sub) + p.Unlock() + + internalHandler := func(msg *nats.Msg) { + if sub.hbMonitor != nil { + sub.hbMonitor.Stop() + } + userMsg, msgErr := checkMsg(msg) + if !userMsg && msgErr == nil { + if sub.hbMonitor != nil { + sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) + } + return + } + defer func() { + sub.Lock() + sub.checkPending() + if sub.hbMonitor != nil { + sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat) + } + sub.Unlock() + }() + if !userMsg { + // heartbeat message + if msgErr == nil { + return + } + + sub.Lock() + err := sub.handleStatusMsg(msg, msgErr) + sub.Unlock() + + if err != nil { + if sub.closed.Load() == 1 { + return + } + if sub.consumeOpts.ErrHandler != nil { + sub.consumeOpts.ErrHandler(sub, err) + } + sub.Stop() + } + return + } + if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { + p.setPinID(pinId) + } + handler(p.js.toJSMsg(msg)) + sub.Lock() + sub.decrementPendingMsgs(msg) + sub.incrementDeliveredMsgs() + sub.Unlock() + + if sub.consumeOpts.StopAfter > 0 && sub.consumeOpts.StopAfter == sub.delivered { + sub.Stop() + } + } + inbox := p.js.conn.NewInbox() + sub.subscription, err = p.js.conn.Subscribe(inbox, internalHandler) + if err != nil { + return nil, err + } + sub.subscription.SetClosedHandler(func(sid string) func(string) { + return func(subject string) { + p.subs.Delete(sid) + sub.draining.CompareAndSwap(1, 0) + sub.Lock() + if sub.closedCh != nil { + close(sub.closedCh) + sub.closedCh = nil + } + sub.Unlock() + } + }(sub.id)) + + sub.Lock() + // initial pull + sub.resetPendingMsgs() + batchSize := sub.consumeOpts.MaxMessages + if sub.consumeOpts.StopAfter > 0 { + batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) + } + if err := sub.pull(&pullRequest{ + Expires: consumeOpts.Expires, + Batch: batchSize, + MaxBytes: consumeOpts.MaxBytes, + Heartbeat: consumeOpts.Heartbeat, + MinPending: consumeOpts.MinPending, + MinAckPending: consumeOpts.MinAckPending, + Group: consumeOpts.Group, + PinID: p.getPinID(), + }, subject); err != nil { + sub.errs <- err + } + sub.Unlock() + + go func() { + isConnected := true + for { + if sub.closed.Load() == 1 { + return + } + select { + case status, ok := <-sub.connStatusChanged: + if !ok { + continue + } + if status == nats.RECONNECTING { + if sub.hbMonitor != nil { + sub.hbMonitor.Stop() + } + isConnected = false + } + if status == nats.CONNECTED { + sub.Lock() + if !isConnected { + isConnected = true + if sub.consumeOpts.notifyOnReconnect { + sub.errs <- errConnected + } + + sub.fetchNext <- &pullRequest{ + Expires: sub.consumeOpts.Expires, + Batch: sub.consumeOpts.MaxMessages, + MaxBytes: sub.consumeOpts.MaxBytes, + Heartbeat: sub.consumeOpts.Heartbeat, + MinPending: sub.consumeOpts.MinPending, + MinAckPending: sub.consumeOpts.MinAckPending, + Group: sub.consumeOpts.Group, + PinID: p.getPinID(), + } + if sub.hbMonitor != nil { + sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) + } + sub.resetPendingMsgs() + } + sub.Unlock() + } + case err := <-sub.errs: + sub.Lock() + if sub.consumeOpts.ErrHandler != nil { + sub.consumeOpts.ErrHandler(sub, err) + } + if errors.Is(err, ErrNoHeartbeat) { + batchSize := sub.consumeOpts.MaxMessages + if sub.consumeOpts.StopAfter > 0 { + batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered) + } + sub.fetchNext <- &pullRequest{ + Expires: sub.consumeOpts.Expires, + Batch: batchSize, + MaxBytes: sub.consumeOpts.MaxBytes, + Heartbeat: sub.consumeOpts.Heartbeat, + MinPending: sub.consumeOpts.MinPending, + MinAckPending: sub.consumeOpts.MinAckPending, + Group: sub.consumeOpts.Group, + PinID: p.getPinID(), + } + if sub.hbMonitor != nil { + sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat) + } + sub.resetPendingMsgs() + } + sub.Unlock() + case <-sub.done: + return + } + } + }() + + go sub.pullMessages(subject) + + return sub, nil +} + +// resetPendingMsgs resets pending message count and byte count +// to the values set in consumeOpts +// lock should be held before calling this method +func (s *pullSubscription) resetPendingMsgs() { + s.pending.msgCount = s.consumeOpts.MaxMessages + s.pending.byteCount = s.consumeOpts.MaxBytes +} + +// decrementPendingMsgs decrements pending message count and byte count +// lock should be held before calling this method +func (s *pullSubscription) decrementPendingMsgs(msg *nats.Msg) { + s.pending.msgCount-- + if s.consumeOpts.MaxBytes != 0 && !s.consumeOpts.LimitSize { + s.pending.byteCount -= msg.Size() + } +} + +// incrementDeliveredMsgs increments delivered message count +// lock should be held before calling this method +func (s *pullSubscription) incrementDeliveredMsgs() { + s.delivered++ +} + +// checkPending verifies whether there are enough messages in +// the buffer to trigger a new pull request. +// lock should be held before calling this method +func (s *pullSubscription) checkPending() { + // check if we went below any threshold + // we don't want to track bytes threshold if either it's not set or we used + // PullMaxMessagesWithBytesLimit + if (s.pending.msgCount < s.consumeOpts.ThresholdMessages || + (s.pending.byteCount < s.consumeOpts.ThresholdBytes && s.consumeOpts.MaxBytes != 0 && !s.consumeOpts.LimitSize)) && + s.fetchInProgress.Load() == 0 { + + var batchSize, maxBytes int + batchSize = s.consumeOpts.MaxMessages - s.pending.msgCount + if s.consumeOpts.MaxBytes != 0 { + if s.consumeOpts.LimitSize { + maxBytes = s.consumeOpts.MaxBytes + } else { + maxBytes = s.consumeOpts.MaxBytes - s.pending.byteCount + // when working with max bytes only, always ask for full batch + batchSize = s.consumeOpts.MaxMessages + } + } + if s.consumeOpts.StopAfter > 0 { + batchSize = min(batchSize, s.consumeOpts.StopAfter-s.delivered-s.pending.msgCount) + } + if batchSize > 0 { + pinID := "" + if s.consumer != nil { + pinID = s.consumer.getPinID() + } + s.fetchNext <- &pullRequest{ + Expires: s.consumeOpts.Expires, + Batch: batchSize, + MaxBytes: maxBytes, + Heartbeat: s.consumeOpts.Heartbeat, + PinID: pinID, + Group: s.consumeOpts.Group, + MinPending: s.consumeOpts.MinPending, + MinAckPending: s.consumeOpts.MinAckPending, + } + + s.pending.msgCount = s.consumeOpts.MaxMessages + s.pending.byteCount = s.consumeOpts.MaxBytes + } + } +} + +// Messages returns MessagesContext, allowing continuously iterating +// over messages on a stream. Messages cannot be used concurrently +// when using ordered consumer. +// +// See [Consumer.Messages] for more details. +func (p *pullConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) { + consumeOpts, err := parseMessagesOpts(false, opts...) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err) + } + + if len(p.info.Config.PriorityGroups) != 0 { + if consumeOpts.Group == "" { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is required for priority consumer") + } + + if !slices.Contains(p.info.Config.PriorityGroups, consumeOpts.Group) { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "invalid priority group") + } + } else if consumeOpts.Group != "" { + return nil, fmt.Errorf("%w: %s", ErrInvalidOption, "priority group is not supported for this consumer") + } + + p.Lock() + subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) + + msgs := make(chan *nats.Msg, consumeOpts.MaxMessages) + + consumeID := nuid.Next() + sub := &pullSubscription{ + id: consumeID, + consumer: p, + done: make(chan struct{}, 1), + msgs: msgs, + errs: make(chan error, 10), + fetchNext: make(chan *pullRequest, 1), + consumeOpts: consumeOpts, + } + sub.connStatusChanged = p.js.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING) + inbox := p.js.conn.NewInbox() + sub.subscription, err = p.js.conn.ChanSubscribe(inbox, sub.msgs) + if err != nil { + p.Unlock() + return nil, err + } + sub.subscription.SetClosedHandler(func(sid string) func(string) { + return func(subject string) { + if sub.draining.Load() != 1 { + // if we're not draining, subscription can be closed as soon + // as closed handler is called + // otherwise, we need to wait until all messages are drained + // in Next + p.subs.Delete(sid) + } + close(msgs) + } + }(sub.id)) + + p.subs.Store(sub.id, sub) + p.Unlock() + + go sub.pullMessages(subject) + + go func() { + for { + select { + case status, ok := <-sub.connStatusChanged: + if !ok { + return + } + if status == nats.CONNECTED { + sub.errs <- errConnected + } + if status == nats.RECONNECTING { + sub.errs <- errDisconnected + } + case <-sub.done: + return + } + } + }() + + return sub, nil +} + +var ( + errConnected = errors.New("connected") + errDisconnected = errors.New("disconnected") +) + +// Next retrieves next message on a stream. It will block until the next +// message is available. If the context is canceled, Next will return +// ErrMsgIteratorClosed error. +func (s *pullSubscription) Next() (Msg, error) { + s.Lock() + defer s.Unlock() + drainMode := s.draining.Load() == 1 + closed := s.closed.Load() == 1 + if closed && !drainMode { + return nil, ErrMsgIteratorClosed + } + hbMonitor := s.scheduleHeartbeatCheck(s.consumeOpts.Heartbeat) + defer func() { + if hbMonitor != nil { + hbMonitor.Stop() + } + }() + + isConnected := true + if s.consumeOpts.StopAfter > 0 && s.delivered >= s.consumeOpts.StopAfter { + s.Stop() + return nil, ErrMsgIteratorClosed + } + + for { + s.checkPending() + select { + case msg, ok := <-s.msgs: + if !ok { + // if msgs channel is closed, it means that subscription was either drained or stopped + s.consumer.subs.Delete(s.id) + s.draining.CompareAndSwap(1, 0) + return nil, ErrMsgIteratorClosed + } + if hbMonitor != nil { + hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) + } + userMsg, msgErr := checkMsg(msg) + if !userMsg { + // heartbeat message + if msgErr == nil { + continue + } + if err := s.handleStatusMsg(msg, msgErr); err != nil { + s.Stop() + return nil, err + } + continue + } + if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { + s.consumer.setPinID(pinId) + } + s.decrementPendingMsgs(msg) + s.incrementDeliveredMsgs() + return s.consumer.js.toJSMsg(msg), nil + case err := <-s.errs: + if errors.Is(err, ErrNoHeartbeat) { + s.pending.msgCount = 0 + s.pending.byteCount = 0 + if s.consumeOpts.ReportMissingHeartbeats { + return nil, err + } + if hbMonitor != nil { + hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) + } + } + if errors.Is(err, errConnected) { + if !isConnected { + isConnected = true + + if s.consumeOpts.notifyOnReconnect { + return nil, errConnected + } + s.pending.msgCount = 0 + s.pending.byteCount = 0 + if hbMonitor != nil { + hbMonitor.Reset(2 * s.consumeOpts.Heartbeat) + } + } + } + if errors.Is(err, errDisconnected) { + if hbMonitor != nil { + hbMonitor.Stop() + } + isConnected = false + } + } + } +} + +func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error { + if !errors.Is(msgErr, nats.ErrTimeout) && !errors.Is(msgErr, ErrMaxBytesExceeded) && !errors.Is(msgErr, ErrBatchCompleted) { + if errors.Is(msgErr, ErrConsumerDeleted) || errors.Is(msgErr, ErrBadRequest) { + return msgErr + } + if errors.Is(msgErr, ErrPinIDMismatch) { + s.consumer.setPinID("") + s.pending.msgCount = 0 + s.pending.byteCount = 0 + } + if s.consumeOpts.ErrHandler != nil { + s.consumeOpts.ErrHandler(s, msgErr) + } + if errors.Is(msgErr, ErrConsumerLeadershipChanged) { + s.pending.msgCount = 0 + s.pending.byteCount = 0 + } + return nil + } + msgsLeft, bytesLeft, err := parsePending(msg) + if err != nil { + return err + } + s.pending.msgCount -= msgsLeft + if s.pending.msgCount < 0 { + s.pending.msgCount = 0 + } + if s.consumeOpts.MaxBytes > 0 && !s.consumeOpts.LimitSize { + s.pending.byteCount -= bytesLeft + if s.pending.byteCount < 0 { + s.pending.byteCount = 0 + } + } + return nil +} + +func (hb *hbMonitor) Stop() { + hb.Mutex.Lock() + hb.timer.Stop() + hb.Mutex.Unlock() +} + +func (hb *hbMonitor) Reset(dur time.Duration) { + hb.Mutex.Lock() + hb.timer.Reset(dur) + hb.Mutex.Unlock() +} + +// Stop unsubscribes from the stream and cancels subscription. Calling +// Next after calling Stop will return ErrMsgIteratorClosed error. +// All messages that are already in the buffer are discarded. +func (s *pullSubscription) Stop() { + if !s.closed.CompareAndSwap(0, 1) { + return + } + close(s.done) + if s.consumeOpts.stopAfterMsgsLeft != nil { + if s.delivered >= s.consumeOpts.StopAfter { + close(s.consumeOpts.stopAfterMsgsLeft) + } else { + s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered + } + } +} + +// Drain unsubscribes from the stream and cancels subscription. All +// messages that are already in the buffer will be available on +// subsequent calls to Next. After the buffer is drained, Next will +// return ErrMsgIteratorClosed error. +func (s *pullSubscription) Drain() { + if !s.closed.CompareAndSwap(0, 1) { + return + } + s.draining.Store(1) + close(s.done) + if s.consumeOpts.stopAfterMsgsLeft != nil { + if s.delivered >= s.consumeOpts.StopAfter { + close(s.consumeOpts.stopAfterMsgsLeft) + } else { + s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered + } + } +} + +// Closed returns a channel that is closed when consuming is +// fully stopped/drained. When the channel is closed, no more messages +// will be received and processing is complete. +func (s *pullSubscription) Closed() <-chan struct{} { + s.Lock() + defer s.Unlock() + closedCh := s.closedCh + if closedCh == nil { + closedCh = make(chan struct{}) + s.closedCh = closedCh + } + if !s.subscription.IsValid() { + close(s.closedCh) + s.closedCh = nil + } + return closedCh +} + +// Fetch sends a single request to retrieve given number of messages. +// It will wait up to provided expiry time if not all messages are available. +func (p *pullConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) { + req := &pullRequest{ + Batch: batch, + Expires: DefaultExpires, + Heartbeat: unset, + } + for _, opt := range opts { + if err := opt(req); err != nil { + return nil, err + } + } + // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls + // and disable it for shorter pulls + if req.Heartbeat == unset { + if req.Expires >= 10*time.Second { + req.Heartbeat = 5 * time.Second + } else { + req.Heartbeat = 0 + } + } + if req.Expires < 2*req.Heartbeat { + return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) + } + + return p.fetch(req) +} + +// FetchBytes is used to retrieve up to a provided bytes from the stream. +func (p *pullConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) { + req := &pullRequest{ + Batch: defaultBatchMaxBytesOnly, + MaxBytes: maxBytes, + Expires: DefaultExpires, + Heartbeat: unset, + } + for _, opt := range opts { + if err := opt(req); err != nil { + return nil, err + } + } + // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls + // and disable it for shorter pulls + if req.Heartbeat == unset { + if req.Expires >= 10*time.Second { + req.Heartbeat = 5 * time.Second + } else { + req.Heartbeat = 0 + } + } + if req.Expires < 2*req.Heartbeat { + return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption) + } + + return p.fetch(req) +} + +// FetchNoWait sends a single request to retrieve given number of messages. +// FetchNoWait will only return messages that are available at the time of the +// request. It will not wait for more messages to arrive. +func (p *pullConsumer) FetchNoWait(batch int) (MessageBatch, error) { + req := &pullRequest{ + Batch: batch, + NoWait: true, + } + + return p.fetch(req) +} + +func (p *pullConsumer) fetch(req *pullRequest) (MessageBatch, error) { + res := &fetchResult{ + msgs: make(chan Msg, req.Batch), + } + msgs := make(chan *nats.Msg, 2*req.Batch) + subject := p.js.apiSubject(fmt.Sprintf(apiRequestNextT, p.stream, p.name)) + + sub := &pullSubscription{ + consumer: p, + done: make(chan struct{}, 1), + msgs: msgs, + errs: make(chan error, 10), + } + inbox := p.js.conn.NewInbox() + var err error + sub.subscription, err = p.js.conn.ChanSubscribe(inbox, sub.msgs) + if err != nil { + return nil, err + } + req.PinID = p.getPinID() + if err := sub.pull(req, subject); err != nil { + return nil, err + } + + var receivedMsgs, receivedBytes int + hbTimer := sub.scheduleHeartbeatCheck(req.Heartbeat) + go func(res *fetchResult) { + defer sub.subscription.Unsubscribe() + defer close(res.msgs) + for { + select { + case msg := <-msgs: + res.Lock() + if hbTimer != nil { + hbTimer.Reset(2 * req.Heartbeat) + } + userMsg, err := checkMsg(msg) + if err != nil { + errNotTimeoutOrNoMsgs := !errors.Is(err, nats.ErrTimeout) && !errors.Is(err, ErrNoMessages) + if errNotTimeoutOrNoMsgs && !errors.Is(err, ErrMaxBytesExceeded) { + res.err = err + } + if errors.Is(err, ErrPinIDMismatch) { + p.setPinID("") + } + res.done = true + res.Unlock() + return + } + if !userMsg { + res.Unlock() + continue + } + if pinId := msg.Header.Get("Nats-Pin-Id"); pinId != "" { + p.setPinID(pinId) + } + res.msgs <- p.js.toJSMsg(msg) + meta, err := msg.Metadata() + if err != nil { + res.err = fmt.Errorf("parsing message metadata: %s", err) + res.done = true + res.Unlock() + return + } + res.sseq = meta.Sequence.Stream + receivedMsgs++ + if req.MaxBytes != 0 { + receivedBytes += msg.Size() + } + if receivedMsgs == req.Batch || (req.MaxBytes != 0 && receivedBytes >= req.MaxBytes) { + res.done = true + res.Unlock() + return + } + res.Unlock() + case err := <-sub.errs: + res.Lock() + res.err = err + res.done = true + res.Unlock() + return + case <-time.After(req.Expires + 1*time.Second): + res.Lock() + res.done = true + res.Unlock() + return + } + } + }(res) + return res, nil +} + +func (fr *fetchResult) Messages() <-chan Msg { + fr.Lock() + defer fr.Unlock() + return fr.msgs +} + +func (fr *fetchResult) Error() error { + fr.Lock() + defer fr.Unlock() + return fr.err +} + +func (fr *fetchResult) closed() bool { + fr.Lock() + defer fr.Unlock() + return fr.done +} + +// Next is used to retrieve the next message from the stream. This +// method will block until the message is retrieved or timeout is +// reached. +func (p *pullConsumer) Next(opts ...FetchOpt) (Msg, error) { + res, err := p.Fetch(1, opts...) + if err != nil { + return nil, err + } + msg := <-res.Messages() + if msg != nil { + return msg, nil + } + if res.Error() == nil { + return nil, nats.ErrTimeout + } + return nil, res.Error() +} + +func (s *pullSubscription) pullMessages(subject string) { + for { + select { + case req := <-s.fetchNext: + s.fetchInProgress.Store(1) + + if err := s.pull(req, subject); err != nil { + if errors.Is(err, ErrMsgIteratorClosed) { + s.cleanup() + return + } + s.errs <- err + } + s.fetchInProgress.Store(0) + case <-s.done: + s.cleanup() + return + } + } +} + +func (s *pullSubscription) scheduleHeartbeatCheck(dur time.Duration) *hbMonitor { + if dur == 0 { + return nil + } + return &hbMonitor{ + timer: time.AfterFunc(2*dur, func() { + s.errs <- ErrNoHeartbeat + }), + } +} + +func (s *pullSubscription) cleanup() { + // For now this function does not need to hold the lock. + // Holding the lock here might cause a deadlock if Next() + // is already holding the lock and waiting. + // The fields that are read (subscription, hbMonitor) + // are read only (Only written on creation of pullSubscription). + if s.subscription == nil || !s.subscription.IsValid() { + return + } + if s.hbMonitor != nil { + s.hbMonitor.Stop() + } + drainMode := s.draining.Load() == 1 + if drainMode { + s.subscription.Drain() + } else { + s.subscription.Unsubscribe() + } + s.closed.Store(1) +} + +// pull sends a pull request to the server and waits for messages using a subscription from [pullSubscription]. +// Messages will be fetched up to given batch_size or until there are no more messages or timeout is returned +func (s *pullSubscription) pull(req *pullRequest, subject string) error { + s.consumer.Lock() + defer s.consumer.Unlock() + if s.closed.Load() == 1 { + return ErrMsgIteratorClosed + } + if req.Batch < 1 { + return fmt.Errorf("%w: batch size must be at least 1", nats.ErrInvalidArg) + } + reqJSON, err := json.Marshal(req) + if err != nil { + return err + } + reply := s.subscription.Subject + + if err := s.consumer.js.conn.PublishRequest(subject, reply, reqJSON); err != nil { + return err + } + return nil +} + +func parseConsumeOpts(ordered bool, opts ...PullConsumeOpt) (*consumeOpts, error) { + consumeOpts := &consumeOpts{ + MaxMessages: unset, + MaxBytes: unset, + Expires: DefaultExpires, + Heartbeat: unset, + ReportMissingHeartbeats: true, + StopAfter: unset, + } + for _, opt := range opts { + if err := opt.configureConsume(consumeOpts); err != nil { + return nil, err + } + } + if err := consumeOpts.setDefaults(ordered); err != nil { + return nil, err + } + return consumeOpts, nil +} + +func parseMessagesOpts(ordered bool, opts ...PullMessagesOpt) (*consumeOpts, error) { + consumeOpts := &consumeOpts{ + MaxMessages: unset, + MaxBytes: unset, + Expires: DefaultExpires, + Heartbeat: unset, + ReportMissingHeartbeats: true, + StopAfter: unset, + } + for _, opt := range opts { + if err := opt.configureMessages(consumeOpts); err != nil { + return nil, err + } + } + if err := consumeOpts.setDefaults(ordered); err != nil { + return nil, err + } + return consumeOpts, nil +} + +func (consumeOpts *consumeOpts) setDefaults(ordered bool) error { + // we cannot use both max messages and max bytes unless we're using max bytes as fetch size limiter + if consumeOpts.MaxBytes != unset && consumeOpts.MaxMessages != unset && !consumeOpts.LimitSize { + return errors.New("only one of MaxMessages and MaxBytes can be specified") + } + if consumeOpts.MaxBytes != unset && !consumeOpts.LimitSize { + // we used PullMaxBytes setting, set MaxMessages to a high value + consumeOpts.MaxMessages = defaultBatchMaxBytesOnly + } else if consumeOpts.MaxMessages == unset { + // otherwise, if max messages is not set, set it to default value + consumeOpts.MaxMessages = DefaultMaxMessages + } + // if user did not set max bytes, set it to 0 + if consumeOpts.MaxBytes == unset { + consumeOpts.MaxBytes = 0 + } + + if consumeOpts.ThresholdMessages == 0 { + // half of the max messages, rounded up + consumeOpts.ThresholdMessages = int(math.Ceil(float64(consumeOpts.MaxMessages) / 2)) + } + if consumeOpts.ThresholdBytes == 0 { + // half of the max bytes, rounded up + consumeOpts.ThresholdBytes = int(math.Ceil(float64(consumeOpts.MaxBytes) / 2)) + } + + // set default heartbeats + if consumeOpts.Heartbeat == unset { + // by default, use 50% of expiry time + consumeOpts.Heartbeat = consumeOpts.Expires / 2 + if ordered { + // for ordered consumers, the default heartbeat is 5 seconds + if consumeOpts.Expires < 10*time.Second { + consumeOpts.Heartbeat = consumeOpts.Expires / 2 + } else { + consumeOpts.Heartbeat = 5 * time.Second + } + } else if consumeOpts.Heartbeat > 30*time.Second { + // cap the heartbeat to 30 seconds + consumeOpts.Heartbeat = 30 * time.Second + } + } + if consumeOpts.Heartbeat > consumeOpts.Expires/2 { + return errors.New("the value of Heartbeat must be less than 50%% of expiry") + } + return nil +} + +func (c *pullConsumer) getPinID() string { + c.Lock() + defer c.Unlock() + return c.pinID +} + +func (c *pullConsumer) setPinID(pinID string) { + c.Lock() + defer c.Unlock() + c.pinID = pinID +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/stream.go b/vendor/github.com/nats-io/nats.go/jetstream/stream.go new file mode 100644 index 000000000..42a5f11c1 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/stream.go @@ -0,0 +1,766 @@ +// Copyright 2022-2025 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/nats-io/nats.go" + "github.com/nats-io/nuid" +) + +type ( + // Stream contains CRUD methods on a consumer via [ConsumerManager], as well + // as operations on an existing stream. It allows fetching and removing + // messages from a stream, as well as purging a stream. + Stream interface { + ConsumerManager + + // Info returns StreamInfo from the server. + Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) + + // CachedInfo returns ConsumerInfo currently cached on this stream. + // This method does not perform any network requests. The cached + // StreamInfo is updated on every call to Info and Update. + CachedInfo() *StreamInfo + + // Purge removes messages from a stream. It is a destructive operation. + // Use with caution. See StreamPurgeOpt for available options. + Purge(ctx context.Context, opts ...StreamPurgeOpt) error + + // GetMsg retrieves a raw stream message stored in JetStream by sequence number. + GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) + + // GetLastMsgForSubject retrieves the last raw stream message stored in + // JetStream on a given subject subject. + GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) + + // DeleteMsg deletes a message from a stream. + // On the server, the message is marked as erased, but not overwritten. + DeleteMsg(ctx context.Context, seq uint64) error + + // SecureDeleteMsg deletes a message from a stream. The deleted message + // is overwritten with random data. As a result, this operation is slower + // than DeleteMsg. + SecureDeleteMsg(ctx context.Context, seq uint64) error + } + + // ConsumerManager provides CRUD API for managing consumers. It is + // available as a part of [Stream] interface. CreateConsumer, + // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a + // [Consumer] interface, allowing to operate on a consumer (e.g. consume + // messages). + ConsumerManager interface { + // CreateOrUpdateConsumer creates a consumer on a given stream with + // given config. If consumer already exists, it will be updated (if + // possible). Consumer interface is returned, allowing to operate on a + // consumer (e.g. fetch messages). + CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) + + // CreateConsumer creates a consumer on a given stream with given + // config. If consumer already exists and the provided configuration + // differs from its configuration, ErrConsumerExists is returned. If the + // provided configuration is the same as the existing consumer, the + // existing consumer is returned. Consumer interface is returned, + // allowing to operate on a consumer (e.g. fetch messages). + CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) + + // UpdateConsumer updates an existing consumer. If consumer does not + // exist, ErrConsumerDoesNotExist is returned. Consumer interface is + // returned, allowing to operate on a consumer (e.g. fetch messages). + UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) + + // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer + // are managed by the library and provide a simple way to consume + // messages from a stream. Ordered consumers are ephemeral in-memory + // pull consumers and are resilient to deletes and restarts. + OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) + + // Consumer returns an interface to an existing consumer, allowing processing + // of messages. If consumer does not exist, ErrConsumerNotFound is + // returned. + Consumer(ctx context.Context, consumer string) (Consumer, error) + + // DeleteConsumer removes a consumer with given name from a stream. + // If consumer does not exist, ErrConsumerNotFound is returned. + DeleteConsumer(ctx context.Context, consumer string) error + + // PauseConsumer pauses a consumer. + PauseConsumer(ctx context.Context, consumer string, pauseUntil time.Time) (*ConsumerPauseResponse, error) + + // ResumeConsumer resumes a consumer. + ResumeConsumer(ctx context.Context, consumer string) (*ConsumerPauseResponse, error) + + // ListConsumers returns ConsumerInfoLister enabling iterating over a + // channel of consumer infos. + ListConsumers(context.Context) ConsumerInfoLister + + // ConsumerNames returns a ConsumerNameLister enabling iterating over a + // channel of consumer names. + ConsumerNames(context.Context) ConsumerNameLister + + // UnpinConsumer unpins the currently pinned client for a consumer for the given group name. + // If consumer does not exist, ErrConsumerNotFound is returned. + UnpinConsumer(ctx context.Context, consumer string, group string) error + } + + RawStreamMsg struct { + Subject string + Sequence uint64 + Header nats.Header + Data []byte + Time time.Time + } + + stream struct { + name string + info *StreamInfo + js *jetStream + } + + // StreamInfoOpt is a function setting options for [Stream.Info] + StreamInfoOpt func(*streamInfoRequest) error + + streamInfoRequest struct { + apiPaged + DeletedDetails bool `json:"deleted_details,omitempty"` + SubjectFilter string `json:"subjects_filter,omitempty"` + } + + consumerInfoResponse struct { + apiResponse + *ConsumerInfo + } + + // StreamPurgeOpt is a function setting options for [Stream.Purge] + StreamPurgeOpt func(*StreamPurgeRequest) error + + // StreamPurgeRequest is an API request body to purge a stream. + + StreamPurgeRequest struct { + // Purge up to but not including sequence. + Sequence uint64 `json:"seq,omitempty"` + // Subject to match against messages for the purge command. + Subject string `json:"filter,omitempty"` + // Number of messages to keep. + Keep uint64 `json:"keep,omitempty"` + } + + streamPurgeResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + Purged uint64 `json:"purged"` + } + + consumerDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + } + + consumerPauseRequest struct { + PauseUntil *time.Time `json:"pause_until,omitempty"` + } + + ConsumerPauseResponse struct { + // Paused is true if the consumer is paused. + Paused bool `json:"paused"` + // PauseUntil is the time until the consumer is paused. + PauseUntil time.Time `json:"pause_until"` + // PauseRemaining is the time remaining until the consumer is paused. + PauseRemaining time.Duration `json:"pause_remaining,omitempty"` + } + + consumerPauseApiResponse struct { + apiResponse + ConsumerPauseResponse + } + + // GetMsgOpt is a function setting options for [Stream.GetMsg] + GetMsgOpt func(*apiMsgGetRequest) error + + apiMsgGetRequest struct { + Seq uint64 `json:"seq,omitempty"` + LastFor string `json:"last_by_subj,omitempty"` + NextFor string `json:"next_by_subj,omitempty"` + } + + // apiMsgGetResponse is the response for a Stream get request. + apiMsgGetResponse struct { + apiResponse + Message *storedMsg `json:"message,omitempty"` + } + + // storedMsg is a raw message stored in JetStream. + storedMsg struct { + Subject string `json:"subject"` + Sequence uint64 `json:"seq"` + Header []byte `json:"hdrs,omitempty"` + Data []byte `json:"data,omitempty"` + Time time.Time `json:"time"` + } + + msgDeleteRequest struct { + Seq uint64 `json:"seq"` + NoErase bool `json:"no_erase,omitempty"` + } + + msgDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + } + + // ConsumerInfoLister is used to iterate over a channel of consumer infos. + // Err method can be used to check for errors encountered during iteration. + // Info channel is always closed and therefore can be used in a range loop. + ConsumerInfoLister interface { + Info() <-chan *ConsumerInfo + Err() error + } + + // ConsumerNameLister is used to iterate over a channel of consumer names. + // Err method can be used to check for errors encountered during iteration. + // Name channel is always closed and therefore can be used in a range loop. + ConsumerNameLister interface { + Name() <-chan string + Err() error + } + + consumerLister struct { + js *jetStream + offset int + pageInfo *apiPaged + + consumers chan *ConsumerInfo + names chan string + err error + } + + consumerListResponse struct { + apiResponse + apiPaged + Consumers []*ConsumerInfo `json:"consumers"` + } + + consumerNamesResponse struct { + apiResponse + apiPaged + Consumers []string `json:"consumers"` + } + + consumerUnpinRequest struct { + Group string `json:"group"` + } +) + +// CreateOrUpdateConsumer creates a consumer on a given stream with +// given config. If consumer already exists, it will be updated (if +// possible). Consumer interface is returned, allowing to operate on a +// consumer (e.g. fetch messages). +func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { + return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreateOrUpdate) +} + +// CreateConsumer creates a consumer on a given stream with given +// config. If consumer already exists and the provided configuration +// differs from its configuration, ErrConsumerExists is returned. If the +// provided configuration is the same as the existing consumer, the +// existing consumer is returned. Consumer interface is returned, +// allowing to operate on a consumer (e.g. fetch messages). +func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { + return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionCreate) +} + +// UpdateConsumer updates an existing consumer. If consumer does not +// exist, ErrConsumerDoesNotExist is returned. Consumer interface is +// returned, allowing to operate on a consumer (e.g. fetch messages). +func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) { + return upsertConsumer(ctx, s.js, s.name, cfg, consumerActionUpdate) +} + +// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer +// are managed by the library and provide a simple way to consume +// messages from a stream. Ordered consumers are ephemeral in-memory +// pull consumers and are resilient to deletes and restarts. +func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) { + oc := &orderedConsumer{ + js: s.js, + cfg: &cfg, + stream: s.name, + namePrefix: nuid.Next(), + doReset: make(chan struct{}, 1), + } + consCfg := oc.getConsumerConfig() + cons, err := s.CreateOrUpdateConsumer(ctx, *consCfg) + if err != nil { + return nil, err + } + oc.currentConsumer = cons.(*pullConsumer) + + return oc, nil +} + +// Consumer returns an interface to an existing consumer, allowing processing +// of messages. If consumer does not exist, ErrConsumerNotFound is +// returned. +func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) { + return getConsumer(ctx, s.js, s.name, name) +} + +// DeleteConsumer removes a consumer with given name from a stream. +// If consumer does not exist, ErrConsumerNotFound is returned. +func (s *stream) DeleteConsumer(ctx context.Context, name string) error { + return deleteConsumer(ctx, s.js, s.name, name) +} + +// PauseConsumer pauses a consumer. +func (s *stream) PauseConsumer(ctx context.Context, name string, pauseUntil time.Time) (*ConsumerPauseResponse, error) { + return pauseConsumer(ctx, s.js, s.name, name, &pauseUntil) +} + +// ResumeConsumer resumes a consumer. +func (s *stream) ResumeConsumer(ctx context.Context, name string) (*ConsumerPauseResponse, error) { + return resumeConsumer(ctx, s.js, s.name, name) +} + +// Info returns StreamInfo from the server. +func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) { + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + var infoReq *streamInfoRequest + for _, opt := range opts { + if infoReq == nil { + infoReq = &streamInfoRequest{} + } + if err := opt(infoReq); err != nil { + return nil, err + } + } + var req []byte + var err error + var subjectMap map[string]uint64 + var offset int + + infoSubject := fmt.Sprintf(apiStreamInfoT, s.name) + var info *StreamInfo + for { + if infoReq != nil { + if infoReq.SubjectFilter != "" { + if subjectMap == nil { + subjectMap = make(map[string]uint64) + } + infoReq.Offset = offset + } + req, err = json.Marshal(infoReq) + if err != nil { + return nil, err + } + } + var resp streamInfoResponse + if _, err = s.js.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil { + return nil, err + } + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeStreamNotFound { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + info = resp.StreamInfo + var total int + if resp.Total != 0 { + total = resp.Total + } + if len(resp.StreamInfo.State.Subjects) > 0 { + for subj, msgs := range resp.StreamInfo.State.Subjects { + subjectMap[subj] = msgs + } + offset = len(subjectMap) + } + if total == 0 || total <= offset { + info.State.Subjects = nil + // we don't want to store subjects in cache + cached := *info + s.info = &cached + info.State.Subjects = subjectMap + break + } + } + + return info, nil +} + +// CachedInfo returns ConsumerInfo currently cached on this stream. +// This method does not perform any network requests. The cached +// StreamInfo is updated on every call to Info and Update. +func (s *stream) CachedInfo() *StreamInfo { + return s.info +} + +// Purge removes messages from a stream. It is a destructive operation. +// Use with caution. See StreamPurgeOpt for available options. +func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error { + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + var purgeReq StreamPurgeRequest + for _, opt := range opts { + if err := opt(&purgeReq); err != nil { + return err + } + } + var req []byte + var err error + req, err = json.Marshal(purgeReq) + if err != nil { + return err + } + + purgeSubject := fmt.Sprintf(apiStreamPurgeT, s.name) + + var resp streamPurgeResponse + if _, err = s.js.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil { + return err + } + if resp.Error != nil { + return resp.Error + } + + return nil +} + +// GetMsg retrieves a raw stream message stored in JetStream by sequence number. +func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) { + req := &apiMsgGetRequest{Seq: seq} + for _, opt := range opts { + if err := opt(req); err != nil { + return nil, err + } + } + return s.getMsg(ctx, req) +} + +// GetLastMsgForSubject retrieves the last raw stream message stored in +// JetStream on a given subject subject. +func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) { + return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject}) +} + +func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) { + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + req, err := json.Marshal(mreq) + if err != nil { + return nil, err + } + var gmSubj string + + // handle direct gets + if s.info.Config.AllowDirect { + if mreq.LastFor != "" { + gmSubj = fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor) + r, err := s.js.apiRequest(ctx, gmSubj, nil) + if err != nil { + return nil, err + } + return convertDirectGetMsgResponseToMsg(r.msg) + } + gmSubj = fmt.Sprintf(apiDirectMsgGetT, s.name) + r, err := s.js.apiRequest(ctx, gmSubj, req) + if err != nil { + return nil, err + } + return convertDirectGetMsgResponseToMsg(r.msg) + } + + var resp apiMsgGetResponse + dsSubj := fmt.Sprintf(apiMsgGetT, s.name) + _, err = s.js.apiRequestJSON(ctx, dsSubj, &resp, req) + if err != nil { + return nil, err + } + + if resp.Error != nil { + if resp.Error.ErrorCode == JSErrCodeMessageNotFound { + return nil, ErrMsgNotFound + } + return nil, resp.Error + } + + msg := resp.Message + + var hdr nats.Header + if len(msg.Header) > 0 { + hdr, err = nats.DecodeHeadersMsg(msg.Header) + if err != nil { + return nil, err + } + } + + return &RawStreamMsg{ + Subject: msg.Subject, + Sequence: msg.Sequence, + Header: hdr, + Data: msg.Data, + Time: msg.Time, + }, nil +} + +func convertDirectGetMsgResponseToMsg(r *nats.Msg) (*RawStreamMsg, error) { + // Check for 404/408. We would get a no-payload message and a "Status" header + if len(r.Data) == 0 { + val := r.Header.Get(statusHdr) + if val != "" { + switch val { + case noMessages: + return nil, ErrMsgNotFound + default: + desc := r.Header.Get("Description") + if desc == "" { + desc = "unable to get message" + } + return nil, fmt.Errorf("nats: %s", desc) + } + } + } + // Check for headers that give us the required information to + // reconstruct the message. + if len(r.Header) == 0 { + return nil, errors.New("nats: response should have headers") + } + stream := r.Header.Get(StreamHeader) + if stream == "" { + return nil, errors.New("nats: missing stream header") + } + + seqStr := r.Header.Get(SequenceHeader) + if seqStr == "" { + return nil, errors.New("nats: missing sequence header") + } + seq, err := strconv.ParseUint(seqStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) + } + timeStr := r.Header.Get(TimeStampHeaer) + if timeStr == "" { + return nil, errors.New("nats: missing timestamp header") + } + + tm, err := time.Parse(time.RFC3339Nano, timeStr) + if err != nil { + return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) + } + subj := r.Header.Get(SubjectHeader) + if subj == "" { + return nil, errors.New("nats: missing subject header") + } + return &RawStreamMsg{ + Subject: subj, + Sequence: seq, + Header: r.Header, + Data: r.Data, + Time: tm, + }, nil +} + +// DeleteMsg deletes a message from a stream. +// On the server, the message is marked as erased, but not overwritten. +func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error { + return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true}) +} + +// SecureDeleteMsg deletes a message from a stream. The deleted message +// is overwritten with random data. As a result, this operation is slower +// than DeleteMsg. +func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error { + return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq}) +} + +func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error { + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + r, err := json.Marshal(req) + if err != nil { + return err + } + subj := fmt.Sprintf(apiMsgDeleteT, s.name) + var resp msgDeleteResponse + if _, err = s.js.apiRequestJSON(ctx, subj, &resp, r); err != nil { + return err + } + if !resp.Success { + return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, resp.Error.Error()) + } + return nil +} + +// ListConsumers returns ConsumerInfoLister enabling iterating over a +// channel of consumer infos. +func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister { + l := &consumerLister{ + js: s.js, + consumers: make(chan *ConsumerInfo), + } + go func() { + defer close(l.consumers) + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + for { + page, err := l.consumerInfos(ctx, s.name) + if err != nil && !errors.Is(err, ErrEndOfData) { + l.err = err + return + } + for _, info := range page { + select { + case <-ctx.Done(): + l.err = ctx.Err() + return + default: + } + if info != nil { + l.consumers <- info + } + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return l +} + +func (s *consumerLister) Info() <-chan *ConsumerInfo { + return s.consumers +} + +func (s *consumerLister) Err() error { + return s.err +} + +// ConsumerNames returns a ConsumerNameLister enabling iterating over a +// channel of consumer names. +func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister { + l := &consumerLister{ + js: s.js, + names: make(chan string), + } + go func() { + defer close(l.names) + ctx, cancel := s.js.wrapContextWithoutDeadline(ctx) + if cancel != nil { + defer cancel() + } + for { + page, err := l.consumerNames(ctx, s.name) + if err != nil && !errors.Is(err, ErrEndOfData) { + l.err = err + return + } + for _, info := range page { + select { + case l.names <- info: + case <-ctx.Done(): + l.err = ctx.Err() + return + } + } + if errors.Is(err, ErrEndOfData) { + return + } + } + }() + + return l +} + +func (s *consumerLister) Name() <-chan string { + return s.names +} + +// consumerInfos fetches the next ConsumerInfo page +func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) { + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return nil, ErrEndOfData + } + + req, err := json.Marshal( + apiPagedRequest{Offset: s.offset}, + ) + if err != nil { + return nil, err + } + + slSubj := fmt.Sprintf(apiConsumerListT, stream) + var resp consumerListResponse + _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) + if err != nil { + return nil, err + } + if resp.Error != nil { + return nil, resp.Error + } + + s.pageInfo = &resp.apiPaged + s.offset += len(resp.Consumers) + return resp.Consumers, nil +} + +// consumerNames fetches the next consumer names page +func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) { + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return nil, ErrEndOfData + } + + req, err := json.Marshal( + apiPagedRequest{Offset: s.offset}, + ) + if err != nil { + return nil, err + } + + slSubj := fmt.Sprintf(apiConsumerNamesT, stream) + var resp consumerNamesResponse + _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req) + if err != nil { + return nil, err + } + if resp.Error != nil { + return nil, resp.Error + } + + s.pageInfo = &resp.apiPaged + s.offset += len(resp.Consumers) + return resp.Consumers, nil +} + +// UnpinConsumer unpins the currently pinned client for a consumer for the given group name. +// If consumer does not exist, ErrConsumerNotFound is returned. +func (s *stream) UnpinConsumer(ctx context.Context, consumer string, group string) error { + return unpinConsumer(ctx, s.js, s.name, consumer, group) +} diff --git a/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go b/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go new file mode 100644 index 000000000..28184a391 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go @@ -0,0 +1,611 @@ +// Copyright 2022-2024 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jetstream + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +type ( + // StreamInfo shows config and current state for this stream. + StreamInfo struct { + // Config contains the configuration settings of the stream, set when + // creating or updating the stream. + Config StreamConfig `json:"config"` + + // Created is the timestamp when the stream was created. + Created time.Time `json:"created"` + + // State provides the state of the stream at the time of request, + // including metrics like the number of messages in the stream, total + // bytes, etc. + State StreamState `json:"state"` + + // Cluster contains information about the cluster to which this stream + // belongs (if applicable). + Cluster *ClusterInfo `json:"cluster,omitempty"` + + // Mirror contains information about another stream this one is + // mirroring. Mirroring is used to create replicas of another stream's + // data. This field is omitted if the stream is not mirroring another + // stream. + Mirror *StreamSourceInfo `json:"mirror,omitempty"` + + // Sources is a list of source streams from which this stream collects + // data. + Sources []*StreamSourceInfo `json:"sources,omitempty"` + + // TimeStamp indicates when the info was gathered by the server. + TimeStamp time.Time `json:"ts"` + } + + // StreamConfig is the configuration of a JetStream stream. + StreamConfig struct { + // Name is the name of the stream. It is required and must be unique + // across the JetStream account. + // + // Name Names cannot contain whitespace, ., *, >, path separators + // (forward or backwards slash), and non-printable characters. + Name string `json:"name"` + + // Description is an optional description of the stream. + Description string `json:"description,omitempty"` + + // Subjects is a list of subjects that the stream is listening on. + // Wildcards are supported. Subjects cannot be set if the stream is + // created as a mirror. + Subjects []string `json:"subjects,omitempty"` + + // Retention defines the message retention policy for the stream. + // Defaults to LimitsPolicy. + Retention RetentionPolicy `json:"retention"` + + // MaxConsumers specifies the maximum number of consumers allowed for + // the stream. + MaxConsumers int `json:"max_consumers"` + + // MaxMsgs is the maximum number of messages the stream will store. + // After reaching the limit, stream adheres to the discard policy. + // If not set, server default is -1 (unlimited). + MaxMsgs int64 `json:"max_msgs"` + + // MaxBytes is the maximum total size of messages the stream will store. + // After reaching the limit, stream adheres to the discard policy. + // If not set, server default is -1 (unlimited). + MaxBytes int64 `json:"max_bytes"` + + // Discard defines the policy for handling messages when the stream + // reaches its limits in terms of number of messages or total bytes. + Discard DiscardPolicy `json:"discard"` + + // DiscardNewPerSubject is a flag to enable discarding new messages per + // subject when limits are reached. Requires DiscardPolicy to be + // DiscardNew and the MaxMsgsPerSubject to be set. + DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` + + // MaxAge is the maximum age of messages that the stream will retain. + MaxAge time.Duration `json:"max_age"` + + // MaxMsgsPerSubject is the maximum number of messages per subject that + // the stream will retain. + MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` + + // MaxMsgSize is the maximum size of any single message in the stream. + MaxMsgSize int32 `json:"max_msg_size,omitempty"` + + // Storage specifies the type of storage backend used for the stream + // (file or memory). + Storage StorageType `json:"storage"` + + // Replicas is the number of stream replicas in clustered JetStream. + // Defaults to 1, maximum is 5. + Replicas int `json:"num_replicas"` + + // NoAck is a flag to disable acknowledging messages received by this + // stream. + // + // If set to true, publish methods from the JetStream client will not + // work as expected, since they rely on acknowledgements. Core NATS + // publish methods should be used instead. Note that this will make + // message delivery less reliable. + NoAck bool `json:"no_ack,omitempty"` + + // Duplicates is the window within which to track duplicate messages. + // If not set, server default is 2 minutes. + Duplicates time.Duration `json:"duplicate_window,omitempty"` + + // Placement is used to declare where the stream should be placed via + // tags and/or an explicit cluster name. + Placement *Placement `json:"placement,omitempty"` + + // Mirror defines the configuration for mirroring another stream. + Mirror *StreamSource `json:"mirror,omitempty"` + + // Sources is a list of other streams this stream sources messages from. + Sources []*StreamSource `json:"sources,omitempty"` + + // Sealed streams do not allow messages to be published or deleted via limits or API, + // sealed streams can not be unsealed via configuration update. Can only + // be set on already created streams via the Update API. + Sealed bool `json:"sealed,omitempty"` + + // DenyDelete restricts the ability to delete messages from a stream via + // the API. Defaults to false. + DenyDelete bool `json:"deny_delete,omitempty"` + + // DenyPurge restricts the ability to purge messages from a stream via + // the API. Defaults to false. + DenyPurge bool `json:"deny_purge,omitempty"` + + // AllowRollup allows the use of the Nats-Rollup header to replace all + // contents of a stream, or subject in a stream, with a single new + // message. + AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` + + // Compression specifies the message storage compression algorithm. + // Defaults to NoCompression. + Compression StoreCompression `json:"compression"` + + // FirstSeq is the initial sequence number of the first message in the + // stream. + FirstSeq uint64 `json:"first_seq,omitempty"` + + // SubjectTransform allows applying a transformation to matching + // messages' subjects. + SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"` + + // RePublish allows immediate republishing a message to the configured + // subject after it's stored. + RePublish *RePublish `json:"republish,omitempty"` + + // AllowDirect enables direct access to individual messages using direct + // get API. Defaults to false. + AllowDirect bool `json:"allow_direct"` + + // MirrorDirect enables direct access to individual messages from the + // origin stream using direct get API. Defaults to false. + MirrorDirect bool `json:"mirror_direct"` + + // ConsumerLimits defines limits of certain values that consumers can + // set, defaults for those who don't set these settings + ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"` + + // Metadata is a set of application-defined key-value pairs for + // associating metadata on the stream. This feature requires nats-server + // v2.10.0 or later. + Metadata map[string]string `json:"metadata,omitempty"` + + // Template identifies the template that manages the Stream. + // Deprecated: This feature is no longer supported. + Template string `json:"template_owner,omitempty"` + + // AllowMsgTTL allows header initiated per-message TTLs. + // This feature requires nats-server v2.11.0 or later. + AllowMsgTTL bool `json:"allow_msg_ttl"` + + // Enables and sets a duration for adding server markers for delete, purge and max age limits. + // This feature requires nats-server v2.11.0 or later. + SubjectDeleteMarkerTTL time.Duration `json:"subject_delete_marker_ttl,omitempty"` + } + + // StreamSourceInfo shows information about an upstream stream + // source/mirror. + StreamSourceInfo struct { + // Name is the name of the stream that is being replicated. + Name string `json:"name"` + + // Lag informs how many messages behind the source/mirror operation is. + // This will only show correctly if there is active communication + // with stream/mirror. + Lag uint64 `json:"lag"` + + // Active informs when last the mirror or sourced stream had activity. + // Value will be -1 when there has been no activity. + Active time.Duration `json:"active"` + + // FilterSubject is the subject filter defined for this source/mirror. + FilterSubject string `json:"filter_subject,omitempty"` + + // SubjectTransforms is a list of subject transforms defined for this + // source/mirror. + SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` + } + + // StreamState is the state of a JetStream stream at the time of request. + StreamState struct { + // Msgs is the number of messages stored in the stream. + Msgs uint64 `json:"messages"` + + // Bytes is the number of bytes stored in the stream. + Bytes uint64 `json:"bytes"` + + // FirstSeq is the sequence number of the first message in the stream. + FirstSeq uint64 `json:"first_seq"` + + // FirstTime is the timestamp of the first message in the stream. + FirstTime time.Time `json:"first_ts"` + + // LastSeq is the sequence number of the last message in the stream. + LastSeq uint64 `json:"last_seq"` + + // LastTime is the timestamp of the last message in the stream. + LastTime time.Time `json:"last_ts"` + + // Consumers is the number of consumers on the stream. + Consumers int `json:"consumer_count"` + + // Deleted is a list of sequence numbers that have been removed from the + // stream. This field will only be returned if the stream has been + // fetched with the DeletedDetails option. + Deleted []uint64 `json:"deleted"` + + // NumDeleted is the number of messages that have been removed from the + // stream. Only deleted messages causing a gap in stream sequence numbers + // are counted. Messages deleted at the beginning or end of the stream + // are not counted. + NumDeleted int `json:"num_deleted"` + + // NumSubjects is the number of unique subjects the stream has received + // messages on. + NumSubjects uint64 `json:"num_subjects"` + + // Subjects is a map of subjects the stream has received messages on + // with message count per subject. This field will only be returned if + // the stream has been fetched with the SubjectFilter option. + Subjects map[string]uint64 `json:"subjects"` + } + + // ClusterInfo shows information about the underlying set of servers that + // make up the stream or consumer. + ClusterInfo struct { + // Name is the name of the cluster. + Name string `json:"name,omitempty"` + + // Leader is the server name of the RAFT leader. + Leader string `json:"leader,omitempty"` + + // Replicas is the list of members of the RAFT cluster + Replicas []*PeerInfo `json:"replicas,omitempty"` + } + + // PeerInfo shows information about the peers in the cluster that are + // supporting the stream or consumer. + PeerInfo struct { + // Name is the server name of the peer. + Name string `json:"name"` + + // Current indicates if the peer is up to date and synchronized with the + // leader. + Current bool `json:"current"` + + // Offline indicates if the peer is considered offline by the group. + Offline bool `json:"offline,omitempty"` + + // Active it the duration since this peer was last seen. + Active time.Duration `json:"active"` + + // Lag is the number of uncommitted operations this peer is behind the + // leader. + Lag uint64 `json:"lag,omitempty"` + } + + // SubjectTransformConfig is for applying a subject transform (to matching + // messages) before doing anything else when a new message is received. + SubjectTransformConfig struct { + // Source is the subject pattern to match incoming messages against. + Source string `json:"src"` + + // Destination is the subject pattern to remap the subject to. + Destination string `json:"dest"` + } + + // RePublish is for republishing messages once committed to a stream. The + // original subject is remapped from the subject pattern to the destination + // pattern. + RePublish struct { + // Source is the subject pattern to match incoming messages against. + Source string `json:"src,omitempty"` + + // Destination is the subject pattern to republish the subject to. + Destination string `json:"dest"` + + // HeadersOnly is a flag to indicate that only the headers should be + // republished. + HeadersOnly bool `json:"headers_only,omitempty"` + } + + // Placement is used to guide placement of streams in clustered JetStream. + Placement struct { + // Cluster is the name of the cluster to which the stream should be + // assigned. + Cluster string `json:"cluster"` + + // Tags are used to match streams to servers in the cluster. A stream + // will be assigned to a server with a matching tag. + Tags []string `json:"tags,omitempty"` + } + + // StreamSource dictates how streams can source from other streams. + StreamSource struct { + // Name is the name of the stream to source from. + Name string `json:"name"` + + // OptStartSeq is the sequence number to start sourcing from. + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + + // OptStartTime is the timestamp of messages to start sourcing from. + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + + // FilterSubject is the subject filter used to only replicate messages + // with matching subjects. + FilterSubject string `json:"filter_subject,omitempty"` + + // SubjectTransforms is a list of subject transforms to apply to + // matching messages. + // + // Subject transforms on sources and mirrors are also used as subject + // filters with optional transformations. + SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"` + + // External is a configuration referencing a stream source in another + // account or JetStream domain. + External *ExternalStream `json:"external,omitempty"` + + // Domain is used to configure a stream source in another JetStream + // domain. This setting will set the External field with the appropriate + // APIPrefix. + Domain string `json:"-"` + } + + // ExternalStream allows you to qualify access to a stream source in another + // account. + ExternalStream struct { + // APIPrefix is the subject prefix that imports the other account/domain + // $JS.API.CONSUMER.> subjects. + APIPrefix string `json:"api"` + + // DeliverPrefix is the delivery subject to use for the push consumer. + DeliverPrefix string `json:"deliver"` + } + + // StreamConsumerLimits are the limits for a consumer on a stream. These can + // be overridden on a per consumer basis. + StreamConsumerLimits struct { + // InactiveThreshold is a duration which instructs the server to clean + // up the consumer if it has been inactive for the specified duration. + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // MaxAckPending is a maximum number of outstanding unacknowledged + // messages for a consumer. + MaxAckPending int `json:"max_ack_pending,omitempty"` + } + + // DiscardPolicy determines how to proceed when limits of messages or bytes + // are reached. + DiscardPolicy int + + // RetentionPolicy determines how messages in a stream are retained. + RetentionPolicy int + + // StorageType determines how messages are stored for retention. + StorageType int + + // StoreCompression determines how messages are compressed. + StoreCompression uint8 +) + +const ( + // LimitsPolicy (default) means that messages are retained until any given + // limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge. + LimitsPolicy RetentionPolicy = iota + + // InterestPolicy specifies that when all known observables have + // acknowledged a message it can be removed. + InterestPolicy + + // WorkQueuePolicy specifies that when the first worker or subscriber + // acknowledges the message it can be removed. + WorkQueuePolicy +) + +const ( + // DiscardOld will remove older messages to return to the limits. This is + // the default. + DiscardOld DiscardPolicy = iota + + // DiscardNew will fail to store new messages once the limits are reached. + DiscardNew +) + +const ( + limitsPolicyString = "limits" + interestPolicyString = "interest" + workQueuePolicyString = "workqueue" +) + +func (rp RetentionPolicy) String() string { + switch rp { + case LimitsPolicy: + return "Limits" + case InterestPolicy: + return "Interest" + case WorkQueuePolicy: + return "WorkQueue" + default: + return "Unknown Retention Policy" + } +} + +func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { + switch rp { + case LimitsPolicy: + return json.Marshal(limitsPolicyString) + case InterestPolicy: + return json.Marshal(interestPolicyString) + case WorkQueuePolicy: + return json.Marshal(workQueuePolicyString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", rp) + } +} + +func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(limitsPolicyString): + *rp = LimitsPolicy + case jsonString(interestPolicyString): + *rp = InterestPolicy + case jsonString(workQueuePolicyString): + *rp = WorkQueuePolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (dp DiscardPolicy) String() string { + switch dp { + case DiscardOld: + return "DiscardOld" + case DiscardNew: + return "DiscardNew" + default: + return "Unknown Discard Policy" + } +} + +func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { + switch dp { + case DiscardOld: + return json.Marshal("old") + case DiscardNew: + return json.Marshal("new") + default: + return nil, fmt.Errorf("nats: can not marshal %v", dp) + } +} + +func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { + switch strings.ToLower(string(data)) { + case jsonString("old"): + *dp = DiscardOld + case jsonString("new"): + *dp = DiscardNew + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +const ( + // FileStorage specifies on disk storage. It's the default. + FileStorage StorageType = iota + // MemoryStorage specifies in memory only. + MemoryStorage +) + +const ( + memoryStorageString = "memory" + fileStorageString = "file" +) + +func (st StorageType) String() string { + switch st { + case MemoryStorage: + return "Memory" + case FileStorage: + return "File" + default: + return "Unknown Storage Type" + } +} + +func (st StorageType) MarshalJSON() ([]byte, error) { + switch st { + case MemoryStorage: + return json.Marshal(memoryStorageString) + case FileStorage: + return json.Marshal(fileStorageString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", st) + } +} + +func (st *StorageType) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(memoryStorageString): + *st = MemoryStorage + case jsonString(fileStorageString): + *st = FileStorage + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func jsonString(s string) string { + return "\"" + s + "\"" +} + +const ( + // NoCompression disables compression on the stream. This is the default. + NoCompression StoreCompression = iota + + // S2Compression enables S2 compression on the stream. + S2Compression +) + +func (alg StoreCompression) String() string { + switch alg { + case NoCompression: + return "None" + case S2Compression: + return "S2" + default: + return "Unknown StoreCompression" + } +} + +func (alg StoreCompression) MarshalJSON() ([]byte, error) { + var str string + switch alg { + case S2Compression: + str = "s2" + case NoCompression: + str = "none" + default: + return nil, errors.New("unknown compression algorithm") + } + return json.Marshal(str) +} + +func (alg *StoreCompression) UnmarshalJSON(b []byte) error { + var str string + if err := json.Unmarshal(b, &str); err != nil { + return err + } + switch str { + case "s2": + *alg = S2Compression + case "none": + *alg = NoCompression + default: + return errors.New("unknown compression algorithm") + } + return nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go index 4d6de02c5..130383adb 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go @@ -33,10 +33,11 @@ type Config struct { ProductVersion string `mapstructure:"product_version"` AllowPropfindDepthInfinitiy bool `mapstructure:"allow_depth_infinity"` - TransferSharedSecret string `mapstructure:"transfer_shared_secret"` - NameValidation NameValidation `mapstructure:"validation"` + // SharedSecret used to sign the 'oc:download' URLs + URLSigningSharedSecret string `mapstructure:"url_signing_shared_secret"` + MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` } diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go index 9f358a0bf..8a892d3ad 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go @@ -20,6 +20,7 @@ package ocdav import ( "context" + "fmt" "io" "net/http" "path" @@ -41,6 +42,7 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/rhttp" "github.com/opencloud-eu/reva/v2/pkg/rhttp/global" "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/signedurl" "github.com/opencloud-eu/reva/v2/pkg/storage/favorite" "github.com/opencloud-eu/reva/v2/pkg/storage/favorite/registry" "github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates" @@ -69,6 +71,7 @@ type svc struct { LockSystem LockSystem userIdentifierCache *ttlcache.Cache nameValidators []Validator + urlSigner signedurl.Signer } func (s *svc) Config() *config.Config { @@ -116,6 +119,15 @@ func NewWith(conf *config.Config, fm favorite.Manager, ls LockSystem, _ *zerolog // be safe - init the conf again conf.Init() + var signer signedurl.Signer + if conf.URLSigningSharedSecret != "" { + var err error + signer, err = signedurl.NewJWTSignedURL(signedurl.WithSecret(conf.URLSigningSharedSecret)) + if err != nil { + return nil, fmt.Errorf("failed to initialize URL signer: %w", err) + } + } + s := &svc{ c: conf, webDavHandler: new(WebDavHandler), @@ -129,6 +141,7 @@ func NewWith(conf *config.Config, fm favorite.Manager, ls LockSystem, _ *zerolog LockSystem: ls, userIdentifierCache: ttlcache.NewCache(), nameValidators: ValidatorsFromConfig(conf), + urlSigner: signer, } _ = s.userIdentifierCache.SetTTL(60 * time.Second) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go index 58a8eeaaf..14a62069a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/propfind/propfind.go @@ -52,6 +52,7 @@ import ( rstatus "github.com/opencloud-eu/reva/v2/pkg/rgrpc/status" "github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool" "github.com/opencloud-eu/reva/v2/pkg/rhttp/router" + "github.com/opencloud-eu/reva/v2/pkg/signedurl" "github.com/opencloud-eu/reva/v2/pkg/storagespace" "github.com/opencloud-eu/reva/v2/pkg/utils" "github.com/rs/zerolog" @@ -214,14 +215,16 @@ type Handler struct { PublicURL string selector pool.Selectable[gateway.GatewayAPIClient] c *config.Config + urlSigner signedurl.Signer } // NewHandler returns a new PropfindHandler instance -func NewHandler(publicURL string, selector pool.Selectable[gateway.GatewayAPIClient], c *config.Config) *Handler { +func NewHandler(publicURL string, selector pool.Selectable[gateway.GatewayAPIClient], signer signedurl.Signer, c *config.Config) *Handler { return &Handler{ PublicURL: publicURL, selector: selector, c: c, + urlSigner: signer, } } @@ -494,7 +497,7 @@ func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r prefer := net.ParsePrefer(r.Header.Get(net.HeaderPrefer)) returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" - propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares, returnMinimal) + propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares, returnMinimal, p.urlSigner) if err != nil { log.Error().Err(err).Msg("error formatting propfind") w.WriteHeader(http.StatusInternalServerError) @@ -985,7 +988,7 @@ func ReadPropfind(r io.Reader) (pf XML, status int, err error) { } // MultistatusResponse converts a list of resource infos into a multistatus response string -func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool) ([]byte, error) { +func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, downloadURLSigner signedurl.Signer) ([]byte, error) { g, ctx := errgroup.WithContext(ctx) type work struct { @@ -1020,7 +1023,7 @@ func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceI for i := 0; i < numWorkers; i++ { g.Go(func() error { for work := range workChan { - res, err := mdToPropResponse(ctx, pf, work.info, publicURL, ns, linkshares, returnMinimal) + res, err := mdToPropResponse(ctx, pf, work.info, publicURL, ns, linkshares, returnMinimal, downloadURLSigner) if err != nil { return err } @@ -1061,7 +1064,7 @@ func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceI // mdToPropResponse converts the CS3 metadata into a webdav PropResponse // ns is the CS3 namespace that needs to be removed from the CS3 path before // prefixing it with the baseURI -func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool) (*ResponseXML, error) { +func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}, returnMinimal bool, urlSigner signedurl.Signer) (*ResponseXML, error) { ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "md_to_prop_response") span.SetAttributes(attribute.KeyValue{Key: "publicURL", Value: attribute.StringValue(publicURL)}) span.SetAttributes(attribute.KeyValue{Key: "ns", Value: attribute.StringValue(ns)}) @@ -1516,23 +1519,14 @@ func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, p appendToNotFound(prop.NotFound("oc:owner-display-name")) } case "downloadURL": // desktop - if isPublic && md.Type == provider.ResourceType_RESOURCE_TYPE_FILE { - var path string - if !ls.PasswordProtected { - path = p + if md.Type == provider.ResourceType_RESOURCE_TYPE_FILE { + url := downloadURL(ctx, sublog, isPublic, p, ls, publicURL, baseURI, urlSigner) + if url != "" { + appendToOK(prop.Escaped("oc:downloadURL", url)) } else { - expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos)) - var sb strings.Builder - - sb.WriteString(p) - sb.WriteString("?signature=") - sb.WriteString(ls.Signature.Signature) - sb.WriteString("&expiration=") - sb.WriteString(url.QueryEscape(expiration.Format(time.RFC3339))) - - path = sb.String() + appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) } - appendToOK(prop.Escaped("oc:downloadURL", publicURL+baseURI+path)) + } else { appendToNotFound(prop.NotFound("oc:" + pf.Prop[i].Local)) } @@ -1738,6 +1732,42 @@ func hasPreview(md *provider.ResourceInfo, appendToOK func(p ...prop.PropertyXML } } +func downloadURL(ctx context.Context, log zerolog.Logger, isPublic bool, path string, ls *link.PublicShare, publicURL string, baseURI string, urlSigner signedurl.Signer) string { + switch { + case isPublic: + var queryString string + if !ls.PasswordProtected { + queryString = path + } else { + expiration := time.Unix(int64(ls.Signature.SignatureExpiration.Seconds), int64(ls.Signature.SignatureExpiration.Nanos)) + var sb strings.Builder + + sb.WriteString(path) + sb.WriteString("?signature=") + sb.WriteString(ls.Signature.Signature) + sb.WriteString("&expiration=") + sb.WriteString(url.QueryEscape(expiration.Format(time.RFC3339))) + + queryString = sb.String() + } + return publicURL + baseURI + queryString + case urlSigner != nil: + u, ok := ctxpkg.ContextGetUser(ctx) + if !ok { + log.Error().Msg("could not get user from context for download URL signing") + return "" + } + signedURL, err := urlSigner.Sign(publicURL+baseURI+path, u.Id.OpaqueId, 30*time.Minute) + if err != nil { + log.Error().Err(err).Msg("failed to sign download URL") + return "" + } else { + return signedURL + } + } + return "" +} + func activeLocks(log *zerolog.Logger, lock *provider.Lock) string { if lock == nil || lock.Type == provider.LockType_LOCK_TYPE_INVALID { return "" diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/publicfile.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/publicfile.go index 970c68e64..0f995b219 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/publicfile.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/publicfile.go @@ -147,7 +147,7 @@ func (s *svc) handlePropfindOnToken(w http.ResponseWriter, r *http.Request, ns s prefer := net.ParsePrefer(r.Header.Get("prefer")) returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" - propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, ns, nil, returnMinimal) + propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, ns, nil, returnMinimal, nil) if err != nil { sublog.Error().Err(err).Msg("error formatting propfind") w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/report.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/report.go index 904e2ebd0..3ae6602dd 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/report.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/report.go @@ -117,7 +117,7 @@ func (s *svc) doFilterFiles(w http.ResponseWriter, r *http.Request, ff *reportFi prefer := net.ParsePrefer(r.Header.Get("prefer")) returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" - responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal) + responsesXML, err := propfind.MultistatusResponse(ctx, &propfind.XML{Prop: ff.Prop}, infos, s.c.PublicURL, namespace, nil, returnMinimal, nil) if err != nil { log.Error().Err(err).Msg("error formatting propfind") w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/spaces.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/spaces.go index 92e9587c3..eb995323f 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/spaces.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/spaces.go @@ -82,7 +82,7 @@ func (h *SpacesHandler) Handler(s *svc, trashbinHandler *TrashbinHandler) http.H var err error switch r.Method { case MethodPropfind: - p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, config) + p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config) p.HandleSpacesPropfind(w, r, spaceID) case MethodProppatch: status, err = s.handleSpacesProppatch(w, r, spaceID) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/versions.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/versions.go index 75a1f6330..a621957f8 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/versions.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/versions.go @@ -200,7 +200,7 @@ func (h *VersionsHandler) doListVersions(w http.ResponseWriter, r *http.Request, prefer := net.ParsePrefer(r.Header.Get("prefer")) returnMinimal := prefer[net.HeaderPreferReturn] == "minimal" - propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, "", nil, returnMinimal) + propRes, err := propfind.MultistatusResponse(ctx, &pf, infos, s.c.PublicURL, "", nil, returnMinimal, nil) if err != nil { sublog.Error().Err(err).Msg("error formatting propfind") w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/webdav.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/webdav.go index 00b12ba99..eaaf5b9a4 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/webdav.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/webdav.go @@ -72,7 +72,7 @@ func (h *WebDavHandler) Handler(s *svc) http.Handler { var status int // status 0 means the handler already sent the response switch r.Method { case MethodPropfind: - p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, config) + p := propfind.NewHandler(config.PublicURL, s.gatewaySelector, s.urlSigner, config) p.HandlePathPropfind(w, r, ns) case MethodLock: status, err = s.handleLock(w, r, ns) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/events/mocks/Stream.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/mocks/Stream.go index 7e6e2e166..3279b30c3 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/events/mocks/Stream.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/mocks/Stream.go @@ -40,14 +40,13 @@ func (_m *Stream) EXPECT() *Stream_Expecter { // Consume provides a mock function with given fields: _a0, _a1 func (_m *Stream) Consume(_a0 string, _a1 ...events.ConsumeOption) (<-chan events.Event, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] + var tmpRet mock.Arguments + if len(_a1) > 0 { + tmpRet = _m.Called(_a0, _a1) + } else { + tmpRet = _m.Called(_a0) } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Consume") @@ -113,14 +112,13 @@ func (_c *Stream_Consume_Call) RunAndReturn(run func(string, ...events.ConsumeOp // Publish provides a mock function with given fields: _a0, _a1, _a2 func (_m *Stream) Publish(_a0 string, _a1 interface{}, _a2 ...events.PublishOption) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] + var tmpRet mock.Arguments + if len(_a2) > 0 { + tmpRet = _m.Called(_a0, _a1, _a2) + } else { + tmpRet = _m.Called(_a0, _a1) } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Publish") diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/certs.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/certs.go new file mode 100644 index 000000000..5f74c9960 --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/certs.go @@ -0,0 +1,27 @@ +package raw + +import ( + "bytes" + "crypto/x509" + "errors" + "io" +) + +// newCertPoolFromPEM reads certificates from io.Reader and returns a x509.CertPool +// containing those certificates. +func newCertPoolFromPEM(crts ...io.Reader) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + var buf bytes.Buffer + for _, c := range crts { + if _, err := io.Copy(&buf, c); err != nil { + return nil, err + } + if !certPool.AppendCertsFromPEM(buf.Bytes()) { + return nil, errors.New("failed to append cert from PEM") + } + buf.Reset() + } + + return certPool, nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks/Stream.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks/Stream.go new file mode 100644 index 000000000..b83bb7b6b --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks/Stream.go @@ -0,0 +1,127 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package mocks + +import ( + events "github.com/opencloud-eu/reva/v2/pkg/events" + mock "github.com/stretchr/testify/mock" + + raw "github.com/opencloud-eu/reva/v2/pkg/events/raw" +) + +// Stream is an autogenerated mock type for the Stream type +type Stream struct { + mock.Mock +} + +type Stream_Expecter struct { + mock *mock.Mock +} + +func (_m *Stream) EXPECT() *Stream_Expecter { + return &Stream_Expecter{mock: &_m.Mock} +} + +// Consume provides a mock function with given fields: group, evs +func (_m *Stream) Consume(group string, evs ...events.Unmarshaller) (<-chan raw.Event, error) { + var tmpRet mock.Arguments + if len(evs) > 0 { + tmpRet = _m.Called(group, evs) + } else { + tmpRet = _m.Called(group) + } + ret := tmpRet + + if len(ret) == 0 { + panic("no return value specified for Consume") + } + + var r0 <-chan raw.Event + var r1 error + if rf, ok := ret.Get(0).(func(string, ...events.Unmarshaller) (<-chan raw.Event, error)); ok { + return rf(group, evs...) + } + if rf, ok := ret.Get(0).(func(string, ...events.Unmarshaller) <-chan raw.Event); ok { + r0 = rf(group, evs...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan raw.Event) + } + } + + if rf, ok := ret.Get(1).(func(string, ...events.Unmarshaller) error); ok { + r1 = rf(group, evs...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Stream_Consume_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consume' +type Stream_Consume_Call struct { + *mock.Call +} + +// Consume is a helper method to define mock.On call +// - group string +// - evs ...events.Unmarshaller +func (_e *Stream_Expecter) Consume(group interface{}, evs ...interface{}) *Stream_Consume_Call { + return &Stream_Consume_Call{Call: _e.mock.On("Consume", + append([]interface{}{group}, evs...)...)} +} + +func (_c *Stream_Consume_Call) Run(run func(group string, evs ...events.Unmarshaller)) *Stream_Consume_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]events.Unmarshaller, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(events.Unmarshaller) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Stream_Consume_Call) Return(_a0 <-chan raw.Event, _a1 error) *Stream_Consume_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Stream_Consume_Call) RunAndReturn(run func(string, ...events.Unmarshaller) (<-chan raw.Event, error)) *Stream_Consume_Call { + _c.Call.Return(run) + return _c +} + +// NewStream creates a new instance of Stream. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStream(t interface { + mock.TestingT + Cleanup(func()) +}) *Stream { + mock := &Stream{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/raw.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/raw.go new file mode 100644 index 000000000..1d74d8105 --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/events/raw/raw.go @@ -0,0 +1,216 @@ +package raw + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "os" + "reflect" + "time" + + "github.com/cenkalti/backoff" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" + "github.com/opencloud-eu/reva/v2/pkg/events" + "github.com/pkg/errors" +) + +// Config is the configuration needed for a NATS event stream +type Config struct { + Endpoint string `mapstructure:"address"` // Endpoint of the nats server + Cluster string `mapstructure:"clusterID"` // CluserID of the nats cluster + TLSInsecure bool `mapstructure:"tls-insecure"` // Whether to verify TLS certificates + TLSRootCACertificate string `mapstructure:"tls-root-ca-cert"` // The root CA certificate used to validate the TLS certificate + EnableTLS bool `mapstructure:"enable-tls"` // Enable TLS + AuthUsername string `mapstructure:"username"` // Username for authentication + AuthPassword string `mapstructure:"password"` // Password for authentication + MaxAckPending int `mapstructure:"max-ack-pending"` // Maximum number of unacknowledged messages + AckWait time.Duration `mapstructure:"ack-wait"` // Time to wait for an ack +} + +type RawEvent struct { + Timestamp time.Time + Metadata map[string]string + ID string + Topic string + Payload []byte + + msg jetstream.Msg +} + +type Event struct { + events.Event + + msg jetstream.Msg +} + +func (re *Event) Ack() error { + if re.msg == nil { + return errors.New("cannot ack event without message") + } + return re.msg.Ack() +} + +func (re *Event) InProgress() error { + if re.msg == nil { + return errors.New("cannot mark event as in progress without message") + } + return re.msg.InProgress() +} + +type Stream interface { + Consume(group string, evs ...events.Unmarshaller) (<-chan Event, error) +} + +type RawStream struct { + Js jetstream.Stream + + c Config +} + +func FromConfig(ctx context.Context, name string, cfg Config) (Stream, error) { + var s Stream + b := backoff.NewExponentialBackOff() + + connect := func() error { + var tlsConf *tls.Config + if cfg.EnableTLS { + var rootCAPool *x509.CertPool + if cfg.TLSRootCACertificate != "" { + rootCrtFile, err := os.Open(cfg.TLSRootCACertificate) + if err != nil { + return err + } + + rootCAPool, err = newCertPoolFromPEM(rootCrtFile) + if err != nil { + return err + } + cfg.TLSInsecure = false + } + + tlsConf = &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: cfg.TLSInsecure, + RootCAs: rootCAPool, + } + } + + nopts := nats.GetDefaultOptions() + nopts.Name = name + if tlsConf != nil { + nopts.Secure = true + nopts.TLSConfig = tlsConf + } + + if len(cfg.Endpoint) > 0 { + nopts.Servers = []string{cfg.Endpoint} + } + + if cfg.AuthUsername != "" && cfg.AuthPassword != "" { + nopts.User = cfg.AuthUsername + nopts.Password = cfg.AuthPassword + } + + conn, err := nopts.Connect() + if err != nil { + return err + } + + jsConn, err := jetstream.New(conn) + if err != nil { + return err + } + + js, err := jsConn.Stream(ctx, events.MainQueueName) + if err != nil { + return err + } + + s = &RawStream{ + Js: js, + c: cfg, + } + return nil + } + err := backoff.Retry(connect, b) + if err != nil { + return s, errors.Wrap(err, "could not connect to nats jetstream") + } + return s, nil +} + +func (s *RawStream) Consume(group string, evs ...events.Unmarshaller) (<-chan Event, error) { + c, err := s.consumeRaw(group) + if err != nil { + return nil, err + } + + registeredEvents := map[string]events.Unmarshaller{} + for _, e := range evs { + typ := reflect.TypeOf(e) + registeredEvents[typ.String()] = e + } + + outchan := make(chan Event) + go func() { + for { + e := <-c + eventType := e.Metadata[events.MetadatakeyEventType] + ev, ok := registeredEvents[eventType] + if !ok { + _ = e.msg.Ack() // Discard. We are not interested in this event type + continue + } + + event, err := ev.Unmarshal(e.Payload) + if err != nil { + continue + } + + outchan <- Event{ + Event: events.Event{ + Type: eventType, + ID: e.Metadata[events.MetadatakeyEventID], + TraceParent: e.Metadata[events.MetadatakeyTraceParent], + InitiatorID: e.Metadata[events.MetadatakeyInitiatorID], + Event: event, + }, + msg: e.msg, + } + } + }() + return outchan, nil +} + +func (s *RawStream) consumeRaw(group string) (<-chan RawEvent, error) { + consumer, err := s.Js.CreateOrUpdateConsumer(context.Background(), jetstream.ConsumerConfig{ + Durable: group, + DeliverPolicy: jetstream.DeliverNewPolicy, + AckPolicy: jetstream.AckExplicitPolicy, // Require manual acknowledgment + MaxAckPending: s.c.MaxAckPending, // Maximum number of unacknowledged messages + AckWait: s.c.AckWait, // Time to wait for an ack + }) + if err != nil { + return nil, err + } + + channel := make(chan RawEvent) + callback := func(msg jetstream.Msg) { + var rawEvent RawEvent + if err := json.Unmarshal(msg.Data(), &rawEvent); err != nil { + fmt.Printf("error unmarshalling event: %v\n", err) + return + } + rawEvent.msg = msg + channel <- rawEvent + } + _, err = consumer.Consume(callback) + if err != nil { + return nil, err + } + + return channel, nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go index 72ed1c9b7..c294239fb 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go @@ -401,3 +401,10 @@ func RegisterInterval(interval time.Duration) Option { o.RegisterInterval = interval } } + +// URLSigningSharedSecret provides a function to set the URLSigningSharedSecret config option. +func URLSigningSharedSecret(secret string) Option { + return func(o *Options) { + o.config.URLSigningSharedSecret = secret + } +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/jwt.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/jwt.go new file mode 100644 index 000000000..1568e0fb1 --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/jwt.go @@ -0,0 +1,116 @@ +package signedurl + +import ( + "errors" + "fmt" + "net/url" + "time" + + "github.com/golang-jwt/jwt/v5" +) + +// JWTSignedURL implements the Signer and Verifier interfaces using JWT for signing URLs. +type JWTSignedURL struct { + JWTOptions +} + +type claims struct { + TargetURL string `json:"target_url"` + jwt.RegisteredClaims +} + +// JWTOption defines a single option function. +type JWTOption func(o *JWTOptions) + +// JWTOptions defines the available options for this package. +type JWTOptions struct { + secret string // Secret key used for signing and verifying JWTs + queryParam string // Name of the query parameter for the signature +} + +func NewJWTSignedURL(opts ...JWTOption) (*JWTSignedURL, error) { + opt := JWTOptions{} + for _, o := range opts { + o(&opt) + } + + if opt.secret == "" { + return nil, ErrInvalidKey + } + + if opt.queryParam == "" { + opt.queryParam = "oc-jwt-sig" + } + + return &JWTSignedURL{opt}, nil +} + +func WithSecret(secret string) JWTOption { + return func(o *JWTOptions) { + o.secret = secret + } +} + +func WithQueryParam(queryParam string) JWTOption { + return func(o *JWTOptions) { + o.queryParam = queryParam + } +} + +// Sign signs a URL using JWT with a specified time-to-live (ttl). +func (j *JWTSignedURL) Sign(unsignedURL, subject string, ttl time.Duration) (string, error) { + // Re-encode the Query parameters to ensure they are "normalized" (Values.Encode() does return them alphabetically ordered). + u, err := url.Parse(unsignedURL) + if err != nil { + return "", NewSignedURLError(err, "failed to parse url") + } + query := u.Query() + u.RawQuery = query.Encode() + c := claims{ + TargetURL: u.String(), + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(time.Now().Add(ttl)), + Issuer: "reva", + IssuedAt: jwt.NewNumericDate(time.Now()), + Subject: subject, + }, + } + token := jwt.NewWithClaims(jwt.SigningMethodHS256, c) + signedToken, err := token.SignedString([]byte(j.secret)) + if err != nil { + return "", fmt.Errorf("signing failed: %w", err) + } + query.Set(j.queryParam, signedToken) + u.RawQuery = query.Encode() + return u.String(), nil +} + +// Verify verifies a signed URL using a JWT. Returns the subject of the JWT if verification is successful. +func (j *JWTSignedURL) Verify(signedURL string) (string, error) { + u, err := url.Parse(signedURL) + if err != nil { + return "", NewSignatureVerificationError(fmt.Errorf("could not parse URL: %w", err)) + } + query := u.Query() + tokenString := query.Get(j.queryParam) + if tokenString == "" { + return "", NewSignatureVerificationError(errors.New("no signature in url")) + } + token, err := jwt.ParseWithClaims(tokenString, &claims{}, func(token *jwt.Token) (any, error) { return []byte(j.secret), nil }) + if err != nil { + return "", NewSignatureVerificationError(err) + } + c, ok := token.Claims.(*claims) + if !ok { + return "", NewSignatureVerificationError(errors.New("invalid JWT claims")) + } + + query.Del(j.queryParam) + u.RawQuery = query.Encode() + + if c.TargetURL != u.String() { + return "", NewSignatureVerificationError(errors.New("url mismatch")) + } + + return c.Subject, nil +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/signedurl.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/signedurl.go new file mode 100644 index 000000000..506b572db --- /dev/null +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/signedurl/signedurl.go @@ -0,0 +1,64 @@ +// Package signedurl provides interfaces and implementations for signing and verifying URLs. +package signedurl + +import ( + "time" +) + +type Signer interface { + // Sign signs a URL + Sign(url, principal string, ttl time.Duration) (string, error) +} + +type Verifier interface { + // Verify verifies a signed URL + Verify(signedURL string) (string, error) +} + +type SignedURLError struct { + innerErr error + message string +} + +// NewSignedURLError creates a new SignedURLError with the provided inner error and message. +func NewSignedURLError(innerErr error, message string) SignedURLError { + return SignedURLError{ + innerErr: innerErr, + message: message, + } +} + +var ErrInvalidKey = NewSignedURLError(nil, "invalid key provided") + +type SignatureVerificationError struct { + SignedURLError +} + +func NewSignatureVerificationError(innerErr error) SignatureVerificationError { + return SignatureVerificationError{ + SignedURLError: SignedURLError{ + innerErr: innerErr, + message: "signature verification failed", + }, + } +} + +func (e SignatureVerificationError) Is(tgt error) bool { + // Check if the target error is of type SignatureVerificationError + if _, ok := tgt.(SignatureVerificationError); ok { + return true + } + return false +} + +// Error implements the error interface for errorConst. +func (e SignedURLError) Error() string { + if e.innerErr != nil { + return e.message + ": " + e.innerErr.Error() + } + return e.message +} + +func (e SignedURLError) Unwrap() error { + return e.innerErr +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/cephfswatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/cephfswatcher.go index 7b20fe5b8..6c278dd10 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/cephfswatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/cephfswatcher.go @@ -98,11 +98,13 @@ func (w *CephFSWatcher) Watch(topic string) { switch { case mask&CEPH_MDS_NOTIFY_DELETE > 0: err = w.tree.Scan(path, ActionDelete, isDir) - case mask&CEPH_MDS_NOTIFY_CREATE > 0 || mask&CEPH_MDS_NOTIFY_MOVED_TO > 0: + case mask&CEPH_MDS_NOTIFY_MOVED_TO > 0: if ev.SrcMask > 0 { // This is a move, clean up the old path err = w.tree.Scan(filepath.Join(w.tree.options.WatchRoot, ev.SrcPath), ActionMoveFrom, isDir) } + err = w.tree.Scan(path, ActionMove, isDir) + case mask&CEPH_MDS_NOTIFY_CREATE > 0: err = w.tree.Scan(path, ActionCreate, isDir) case mask&CEPH_MDS_NOTIFY_CLOSE_WRITE > 0: err = w.tree.Scan(path, ActionUpdate, isDir) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go index eb2c965fb..cbf146d1c 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go @@ -92,7 +92,9 @@ func (iw *InotifyWatcher) Watch(path string) { err = iw.tree.Scan(event.Filename, ActionDelete, event.IsDir) case inotifywaitgo.MOVED_FROM: err = iw.tree.Scan(event.Filename, ActionMoveFrom, event.IsDir) - case inotifywaitgo.CREATE, inotifywaitgo.MOVED_TO: + case inotifywaitgo.MOVED_TO: + err = iw.tree.Scan(event.Filename, ActionMove, event.IsDir) + case inotifywaitgo.CREATE: err = iw.tree.Scan(event.Filename, ActionCreate, event.IsDir) case inotifywaitgo.CLOSE_WRITE: err = iw.tree.Scan(event.Filename, ActionUpdate, event.IsDir) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go index 11e21a7d1..6ec4ce86b 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go @@ -1117,22 +1117,22 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro continue } - if isGrantExpired(g) { - continue - } - switch { case err == nil: + if isGrantExpired(g) { + continue + } + // If all permissions are set to false we have a deny grant if grants.PermissionsEqual(g.Permissions, &provider.ResourcePermissions{}) { return NoPermissions(), true, nil } AddPermissions(ap, g.GetPermissions()) case metadata.IsAttrUnset(err): - appctx.GetLogger(ctx).Error().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") + appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") // continue with next segment default: - appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Msg("error reading permissions") + appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Msg("error reading permissions") // continue with next segment } } diff --git a/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/CollaborationAPIClient.go b/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/CollaborationAPIClient.go index a8048cd67..ce2a45210 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/CollaborationAPIClient.go +++ b/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/CollaborationAPIClient.go @@ -45,14 +45,13 @@ func (_m *CollaborationAPIClient) EXPECT() *CollaborationAPIClient_Expecter { // CreateShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) CreateShare(ctx context.Context, in *collaborationv1beta1.CreateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.CreateShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateShare") @@ -119,14 +118,13 @@ func (_c *CollaborationAPIClient_CreateShare_Call) RunAndReturn(run func(context // GetReceivedShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) GetReceivedShare(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetReceivedShare") @@ -193,14 +191,13 @@ func (_c *CollaborationAPIClient_GetReceivedShare_Call) RunAndReturn(run func(co // GetShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) GetShare(ctx context.Context, in *collaborationv1beta1.GetShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetShare") @@ -267,14 +264,13 @@ func (_c *CollaborationAPIClient_GetShare_Call) RunAndReturn(run func(context.Co // ListReceivedShares provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) ListReceivedShares(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListReceivedShares") @@ -341,14 +337,13 @@ func (_c *CollaborationAPIClient_ListReceivedShares_Call) RunAndReturn(run func( // ListShares provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) ListShares(ctx context.Context, in *collaborationv1beta1.ListSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListShares") @@ -415,14 +410,13 @@ func (_c *CollaborationAPIClient_ListShares_Call) RunAndReturn(run func(context. // RemoveShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) RemoveShare(ctx context.Context, in *collaborationv1beta1.RemoveShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.RemoveShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RemoveShare") @@ -489,14 +483,13 @@ func (_c *CollaborationAPIClient_RemoveShare_Call) RunAndReturn(run func(context // UpdateReceivedShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) UpdateReceivedShare(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateReceivedShare") @@ -563,14 +556,13 @@ func (_c *CollaborationAPIClient_UpdateReceivedShare_Call) RunAndReturn(run func // UpdateShare provides a mock function with given fields: ctx, in, opts func (_m *CollaborationAPIClient) UpdateShare(ctx context.Context, in *collaborationv1beta1.UpdateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateShare") diff --git a/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go b/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go index 530274674..b3a335693 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go +++ b/vendor/github.com/opencloud-eu/reva/v2/tests/cs3mocks/mocks/GatewayAPIClient.go @@ -76,14 +76,13 @@ func (_m *GatewayAPIClient) EXPECT() *GatewayAPIClient_Expecter { // AcceptInvite provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) AcceptInvite(ctx context.Context, in *invitev1beta1.AcceptInviteRequest, opts ...grpc.CallOption) (*invitev1beta1.AcceptInviteResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for AcceptInvite") @@ -150,14 +149,13 @@ func (_c *GatewayAPIClient_AcceptInvite_Call) RunAndReturn(run func(context.Cont // AddAppProvider provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) AddAppProvider(ctx context.Context, in *registryv1beta1.AddAppProviderRequest, opts ...grpc.CallOption) (*registryv1beta1.AddAppProviderResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for AddAppProvider") @@ -224,14 +222,13 @@ func (_c *GatewayAPIClient_AddAppProvider_Call) RunAndReturn(run func(context.Co // Authenticate provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) Authenticate(ctx context.Context, in *gatewayv1beta1.AuthenticateRequest, opts ...grpc.CallOption) (*gatewayv1beta1.AuthenticateResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Authenticate") @@ -298,14 +295,13 @@ func (_c *GatewayAPIClient_Authenticate_Call) RunAndReturn(run func(context.Cont // CancelTransfer provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CancelTransfer(ctx context.Context, in *txv1beta1.CancelTransferRequest, opts ...grpc.CallOption) (*txv1beta1.CancelTransferResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CancelTransfer") @@ -372,14 +368,13 @@ func (_c *GatewayAPIClient_CancelTransfer_Call) RunAndReturn(run func(context.Co // CheckPermission provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CheckPermission(ctx context.Context, in *permissionsv1beta1.CheckPermissionRequest, opts ...grpc.CallOption) (*permissionsv1beta1.CheckPermissionResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CheckPermission") @@ -446,14 +441,13 @@ func (_c *GatewayAPIClient_CheckPermission_Call) RunAndReturn(run func(context.C // CreateContainer provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateContainer(ctx context.Context, in *providerv1beta1.CreateContainerRequest, opts ...grpc.CallOption) (*providerv1beta1.CreateContainerResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateContainer") @@ -520,14 +514,13 @@ func (_c *GatewayAPIClient_CreateContainer_Call) RunAndReturn(run func(context.C // CreateHome provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateHome(ctx context.Context, in *providerv1beta1.CreateHomeRequest, opts ...grpc.CallOption) (*providerv1beta1.CreateHomeResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateHome") @@ -594,14 +587,13 @@ func (_c *GatewayAPIClient_CreateHome_Call) RunAndReturn(run func(context.Contex // CreateOCMCoreShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateOCMCoreShare(ctx context.Context, in *corev1beta1.CreateOCMCoreShareRequest, opts ...grpc.CallOption) (*corev1beta1.CreateOCMCoreShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateOCMCoreShare") @@ -668,14 +660,13 @@ func (_c *GatewayAPIClient_CreateOCMCoreShare_Call) RunAndReturn(run func(contex // CreateOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateOCMShare(ctx context.Context, in *ocmv1beta1.CreateOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.CreateOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateOCMShare") @@ -742,14 +733,13 @@ func (_c *GatewayAPIClient_CreateOCMShare_Call) RunAndReturn(run func(context.Co // CreatePublicShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreatePublicShare(ctx context.Context, in *linkv1beta1.CreatePublicShareRequest, opts ...grpc.CallOption) (*linkv1beta1.CreatePublicShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreatePublicShare") @@ -816,14 +806,13 @@ func (_c *GatewayAPIClient_CreatePublicShare_Call) RunAndReturn(run func(context // CreateShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateShare(ctx context.Context, in *collaborationv1beta1.CreateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.CreateShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateShare") @@ -890,14 +879,13 @@ func (_c *GatewayAPIClient_CreateShare_Call) RunAndReturn(run func(context.Conte // CreateStorageSpace provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateStorageSpace(ctx context.Context, in *providerv1beta1.CreateStorageSpaceRequest, opts ...grpc.CallOption) (*providerv1beta1.CreateStorageSpaceResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateStorageSpace") @@ -964,14 +952,13 @@ func (_c *GatewayAPIClient_CreateStorageSpace_Call) RunAndReturn(run func(contex // CreateSymlink provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateSymlink(ctx context.Context, in *providerv1beta1.CreateSymlinkRequest, opts ...grpc.CallOption) (*providerv1beta1.CreateSymlinkResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateSymlink") @@ -1038,14 +1025,13 @@ func (_c *GatewayAPIClient_CreateSymlink_Call) RunAndReturn(run func(context.Con // CreateTransfer provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) CreateTransfer(ctx context.Context, in *txv1beta1.CreateTransferRequest, opts ...grpc.CallOption) (*txv1beta1.CreateTransferResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for CreateTransfer") @@ -1112,14 +1098,13 @@ func (_c *GatewayAPIClient_CreateTransfer_Call) RunAndReturn(run func(context.Co // Delete provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) Delete(ctx context.Context, in *providerv1beta1.DeleteRequest, opts ...grpc.CallOption) (*providerv1beta1.DeleteResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Delete") @@ -1186,14 +1171,13 @@ func (_c *GatewayAPIClient_Delete_Call) RunAndReturn(run func(context.Context, * // DeleteAcceptedUser provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) DeleteAcceptedUser(ctx context.Context, in *invitev1beta1.DeleteAcceptedUserRequest, opts ...grpc.CallOption) (*invitev1beta1.DeleteAcceptedUserResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for DeleteAcceptedUser") @@ -1260,14 +1244,13 @@ func (_c *GatewayAPIClient_DeleteAcceptedUser_Call) RunAndReturn(run func(contex // DeleteOCMCoreShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) DeleteOCMCoreShare(ctx context.Context, in *corev1beta1.DeleteOCMCoreShareRequest, opts ...grpc.CallOption) (*corev1beta1.DeleteOCMCoreShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for DeleteOCMCoreShare") @@ -1334,14 +1317,13 @@ func (_c *GatewayAPIClient_DeleteOCMCoreShare_Call) RunAndReturn(run func(contex // DeleteStorageSpace provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) DeleteStorageSpace(ctx context.Context, in *providerv1beta1.DeleteStorageSpaceRequest, opts ...grpc.CallOption) (*providerv1beta1.DeleteStorageSpaceResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for DeleteStorageSpace") @@ -1408,14 +1390,13 @@ func (_c *GatewayAPIClient_DeleteStorageSpace_Call) RunAndReturn(run func(contex // FindAcceptedUsers provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) FindAcceptedUsers(ctx context.Context, in *invitev1beta1.FindAcceptedUsersRequest, opts ...grpc.CallOption) (*invitev1beta1.FindAcceptedUsersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for FindAcceptedUsers") @@ -1482,14 +1463,13 @@ func (_c *GatewayAPIClient_FindAcceptedUsers_Call) RunAndReturn(run func(context // FindGroups provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) FindGroups(ctx context.Context, in *groupv1beta1.FindGroupsRequest, opts ...grpc.CallOption) (*groupv1beta1.FindGroupsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for FindGroups") @@ -1556,14 +1536,13 @@ func (_c *GatewayAPIClient_FindGroups_Call) RunAndReturn(run func(context.Contex // FindUsers provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) FindUsers(ctx context.Context, in *userv1beta1.FindUsersRequest, opts ...grpc.CallOption) (*userv1beta1.FindUsersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for FindUsers") @@ -1630,14 +1609,13 @@ func (_c *GatewayAPIClient_FindUsers_Call) RunAndReturn(run func(context.Context // ForwardInvite provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ForwardInvite(ctx context.Context, in *invitev1beta1.ForwardInviteRequest, opts ...grpc.CallOption) (*invitev1beta1.ForwardInviteResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ForwardInvite") @@ -1704,14 +1682,13 @@ func (_c *GatewayAPIClient_ForwardInvite_Call) RunAndReturn(run func(context.Con // GenerateAppPassword provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GenerateAppPassword(ctx context.Context, in *applicationsv1beta1.GenerateAppPasswordRequest, opts ...grpc.CallOption) (*applicationsv1beta1.GenerateAppPasswordResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GenerateAppPassword") @@ -1778,14 +1755,13 @@ func (_c *GatewayAPIClient_GenerateAppPassword_Call) RunAndReturn(run func(conte // GenerateInviteToken provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GenerateInviteToken(ctx context.Context, in *invitev1beta1.GenerateInviteTokenRequest, opts ...grpc.CallOption) (*invitev1beta1.GenerateInviteTokenResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GenerateInviteToken") @@ -1852,14 +1828,13 @@ func (_c *GatewayAPIClient_GenerateInviteToken_Call) RunAndReturn(run func(conte // GetAcceptedUser provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetAcceptedUser(ctx context.Context, in *invitev1beta1.GetAcceptedUserRequest, opts ...grpc.CallOption) (*invitev1beta1.GetAcceptedUserResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetAcceptedUser") @@ -1926,14 +1901,13 @@ func (_c *GatewayAPIClient_GetAcceptedUser_Call) RunAndReturn(run func(context.C // GetAppPassword provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetAppPassword(ctx context.Context, in *applicationsv1beta1.GetAppPasswordRequest, opts ...grpc.CallOption) (*applicationsv1beta1.GetAppPasswordResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetAppPassword") @@ -2000,14 +1974,13 @@ func (_c *GatewayAPIClient_GetAppPassword_Call) RunAndReturn(run func(context.Co // GetAppProviders provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetAppProviders(ctx context.Context, in *registryv1beta1.GetAppProvidersRequest, opts ...grpc.CallOption) (*registryv1beta1.GetAppProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetAppProviders") @@ -2074,14 +2047,13 @@ func (_c *GatewayAPIClient_GetAppProviders_Call) RunAndReturn(run func(context.C // GetDefaultAppProviderForMimeType provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetDefaultAppProviderForMimeType(ctx context.Context, in *registryv1beta1.GetDefaultAppProviderForMimeTypeRequest, opts ...grpc.CallOption) (*registryv1beta1.GetDefaultAppProviderForMimeTypeResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetDefaultAppProviderForMimeType") @@ -2148,14 +2120,13 @@ func (_c *GatewayAPIClient_GetDefaultAppProviderForMimeType_Call) RunAndReturn(r // GetGroup provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetGroup(ctx context.Context, in *groupv1beta1.GetGroupRequest, opts ...grpc.CallOption) (*groupv1beta1.GetGroupResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetGroup") @@ -2222,14 +2193,13 @@ func (_c *GatewayAPIClient_GetGroup_Call) RunAndReturn(run func(context.Context, // GetGroupByClaim provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetGroupByClaim(ctx context.Context, in *groupv1beta1.GetGroupByClaimRequest, opts ...grpc.CallOption) (*groupv1beta1.GetGroupByClaimResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetGroupByClaim") @@ -2296,14 +2266,13 @@ func (_c *GatewayAPIClient_GetGroupByClaim_Call) RunAndReturn(run func(context.C // GetHome provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetHome(ctx context.Context, in *providerv1beta1.GetHomeRequest, opts ...grpc.CallOption) (*providerv1beta1.GetHomeResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetHome") @@ -2370,14 +2339,13 @@ func (_c *GatewayAPIClient_GetHome_Call) RunAndReturn(run func(context.Context, // GetInfoByDomain provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetInfoByDomain(ctx context.Context, in *v1beta1.GetInfoByDomainRequest, opts ...grpc.CallOption) (*v1beta1.GetInfoByDomainResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetInfoByDomain") @@ -2444,14 +2412,13 @@ func (_c *GatewayAPIClient_GetInfoByDomain_Call) RunAndReturn(run func(context.C // GetKey provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetKey(ctx context.Context, in *preferencesv1beta1.GetKeyRequest, opts ...grpc.CallOption) (*preferencesv1beta1.GetKeyResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetKey") @@ -2518,14 +2485,13 @@ func (_c *GatewayAPIClient_GetKey_Call) RunAndReturn(run func(context.Context, * // GetLock provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetLock(ctx context.Context, in *providerv1beta1.GetLockRequest, opts ...grpc.CallOption) (*providerv1beta1.GetLockResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetLock") @@ -2592,14 +2558,13 @@ func (_c *GatewayAPIClient_GetLock_Call) RunAndReturn(run func(context.Context, // GetMembers provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetMembers(ctx context.Context, in *groupv1beta1.GetMembersRequest, opts ...grpc.CallOption) (*groupv1beta1.GetMembersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetMembers") @@ -2666,14 +2631,13 @@ func (_c *GatewayAPIClient_GetMembers_Call) RunAndReturn(run func(context.Contex // GetOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetOCMShare(ctx context.Context, in *ocmv1beta1.GetOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.GetOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetOCMShare") @@ -2740,14 +2704,13 @@ func (_c *GatewayAPIClient_GetOCMShare_Call) RunAndReturn(run func(context.Conte // GetOCMShareByToken provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetOCMShareByToken(ctx context.Context, in *ocmv1beta1.GetOCMShareByTokenRequest, opts ...grpc.CallOption) (*ocmv1beta1.GetOCMShareByTokenResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetOCMShareByToken") @@ -2814,14 +2777,13 @@ func (_c *GatewayAPIClient_GetOCMShareByToken_Call) RunAndReturn(run func(contex // GetPath provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetPath(ctx context.Context, in *providerv1beta1.GetPathRequest, opts ...grpc.CallOption) (*providerv1beta1.GetPathResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetPath") @@ -2888,14 +2850,13 @@ func (_c *GatewayAPIClient_GetPath_Call) RunAndReturn(run func(context.Context, // GetPublicShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetPublicShare(ctx context.Context, in *linkv1beta1.GetPublicShareRequest, opts ...grpc.CallOption) (*linkv1beta1.GetPublicShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetPublicShare") @@ -2962,14 +2923,13 @@ func (_c *GatewayAPIClient_GetPublicShare_Call) RunAndReturn(run func(context.Co // GetPublicShareByToken provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetPublicShareByToken(ctx context.Context, in *linkv1beta1.GetPublicShareByTokenRequest, opts ...grpc.CallOption) (*linkv1beta1.GetPublicShareByTokenResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetPublicShareByToken") @@ -3036,14 +2996,13 @@ func (_c *GatewayAPIClient_GetPublicShareByToken_Call) RunAndReturn(run func(con // GetQuota provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetQuota(ctx context.Context, in *gatewayv1beta1.GetQuotaRequest, opts ...grpc.CallOption) (*providerv1beta1.GetQuotaResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetQuota") @@ -3110,14 +3069,13 @@ func (_c *GatewayAPIClient_GetQuota_Call) RunAndReturn(run func(context.Context, // GetReceivedOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetReceivedOCMShare(ctx context.Context, in *ocmv1beta1.GetReceivedOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.GetReceivedOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetReceivedOCMShare") @@ -3184,14 +3142,13 @@ func (_c *GatewayAPIClient_GetReceivedOCMShare_Call) RunAndReturn(run func(conte // GetReceivedShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetReceivedShare(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetReceivedShare") @@ -3258,14 +3215,13 @@ func (_c *GatewayAPIClient_GetReceivedShare_Call) RunAndReturn(run func(context. // GetShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetShare(ctx context.Context, in *collaborationv1beta1.GetShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetShare") @@ -3332,14 +3288,13 @@ func (_c *GatewayAPIClient_GetShare_Call) RunAndReturn(run func(context.Context, // GetTransferStatus provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetTransferStatus(ctx context.Context, in *txv1beta1.GetTransferStatusRequest, opts ...grpc.CallOption) (*txv1beta1.GetTransferStatusResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetTransferStatus") @@ -3406,14 +3361,13 @@ func (_c *GatewayAPIClient_GetTransferStatus_Call) RunAndReturn(run func(context // GetUser provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetUser(ctx context.Context, in *userv1beta1.GetUserRequest, opts ...grpc.CallOption) (*userv1beta1.GetUserResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetUser") @@ -3480,14 +3434,13 @@ func (_c *GatewayAPIClient_GetUser_Call) RunAndReturn(run func(context.Context, // GetUserByClaim provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetUserByClaim(ctx context.Context, in *userv1beta1.GetUserByClaimRequest, opts ...grpc.CallOption) (*userv1beta1.GetUserByClaimResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetUserByClaim") @@ -3554,14 +3507,13 @@ func (_c *GatewayAPIClient_GetUserByClaim_Call) RunAndReturn(run func(context.Co // GetUserGroups provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) GetUserGroups(ctx context.Context, in *userv1beta1.GetUserGroupsRequest, opts ...grpc.CallOption) (*userv1beta1.GetUserGroupsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for GetUserGroups") @@ -3628,14 +3580,13 @@ func (_c *GatewayAPIClient_GetUserGroups_Call) RunAndReturn(run func(context.Con // HasMember provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) HasMember(ctx context.Context, in *groupv1beta1.HasMemberRequest, opts ...grpc.CallOption) (*groupv1beta1.HasMemberResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for HasMember") @@ -3702,14 +3653,13 @@ func (_c *GatewayAPIClient_HasMember_Call) RunAndReturn(run func(context.Context // InitiateFileDownload provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) InitiateFileDownload(ctx context.Context, in *providerv1beta1.InitiateFileDownloadRequest, opts ...grpc.CallOption) (*gatewayv1beta1.InitiateFileDownloadResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for InitiateFileDownload") @@ -3776,14 +3726,13 @@ func (_c *GatewayAPIClient_InitiateFileDownload_Call) RunAndReturn(run func(cont // InitiateFileUpload provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) InitiateFileUpload(ctx context.Context, in *providerv1beta1.InitiateFileUploadRequest, opts ...grpc.CallOption) (*gatewayv1beta1.InitiateFileUploadResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for InitiateFileUpload") @@ -3850,14 +3799,13 @@ func (_c *GatewayAPIClient_InitiateFileUpload_Call) RunAndReturn(run func(contex // InvalidateAppPassword provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) InvalidateAppPassword(ctx context.Context, in *applicationsv1beta1.InvalidateAppPasswordRequest, opts ...grpc.CallOption) (*applicationsv1beta1.InvalidateAppPasswordResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for InvalidateAppPassword") @@ -3924,14 +3872,13 @@ func (_c *GatewayAPIClient_InvalidateAppPassword_Call) RunAndReturn(run func(con // IsProviderAllowed provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) IsProviderAllowed(ctx context.Context, in *v1beta1.IsProviderAllowedRequest, opts ...grpc.CallOption) (*v1beta1.IsProviderAllowedResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for IsProviderAllowed") @@ -3998,14 +3945,13 @@ func (_c *GatewayAPIClient_IsProviderAllowed_Call) RunAndReturn(run func(context // ListAllProviders provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListAllProviders(ctx context.Context, in *v1beta1.ListAllProvidersRequest, opts ...grpc.CallOption) (*v1beta1.ListAllProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListAllProviders") @@ -4072,14 +4018,13 @@ func (_c *GatewayAPIClient_ListAllProviders_Call) RunAndReturn(run func(context. // ListAppPasswords provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListAppPasswords(ctx context.Context, in *applicationsv1beta1.ListAppPasswordsRequest, opts ...grpc.CallOption) (*applicationsv1beta1.ListAppPasswordsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListAppPasswords") @@ -4146,14 +4091,13 @@ func (_c *GatewayAPIClient_ListAppPasswords_Call) RunAndReturn(run func(context. // ListAppProviders provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListAppProviders(ctx context.Context, in *registryv1beta1.ListAppProvidersRequest, opts ...grpc.CallOption) (*registryv1beta1.ListAppProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListAppProviders") @@ -4220,14 +4164,13 @@ func (_c *GatewayAPIClient_ListAppProviders_Call) RunAndReturn(run func(context. // ListAuthProviders provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListAuthProviders(ctx context.Context, in *authregistryv1beta1.ListAuthProvidersRequest, opts ...grpc.CallOption) (*gatewayv1beta1.ListAuthProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListAuthProviders") @@ -4294,14 +4237,13 @@ func (_c *GatewayAPIClient_ListAuthProviders_Call) RunAndReturn(run func(context // ListContainer provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListContainer(ctx context.Context, in *providerv1beta1.ListContainerRequest, opts ...grpc.CallOption) (*providerv1beta1.ListContainerResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListContainer") @@ -4368,14 +4310,13 @@ func (_c *GatewayAPIClient_ListContainer_Call) RunAndReturn(run func(context.Con // ListContainerStream provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListContainerStream(ctx context.Context, in *providerv1beta1.ListContainerStreamRequest, opts ...grpc.CallOption) (gatewayv1beta1.GatewayAPI_ListContainerStreamClient, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListContainerStream") @@ -4442,14 +4383,13 @@ func (_c *GatewayAPIClient_ListContainerStream_Call) RunAndReturn(run func(conte // ListExistingPublicShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListExistingPublicShares(ctx context.Context, in *linkv1beta1.ListPublicSharesRequest, opts ...grpc.CallOption) (*gatewayv1beta1.ListExistingPublicSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListExistingPublicShares") @@ -4516,14 +4456,13 @@ func (_c *GatewayAPIClient_ListExistingPublicShares_Call) RunAndReturn(run func( // ListExistingReceivedShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListExistingReceivedShares(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*gatewayv1beta1.ListExistingReceivedSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListExistingReceivedShares") @@ -4590,14 +4529,13 @@ func (_c *GatewayAPIClient_ListExistingReceivedShares_Call) RunAndReturn(run fun // ListExistingShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListExistingShares(ctx context.Context, in *collaborationv1beta1.ListSharesRequest, opts ...grpc.CallOption) (*gatewayv1beta1.ListExistingSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListExistingShares") @@ -4664,14 +4602,13 @@ func (_c *GatewayAPIClient_ListExistingShares_Call) RunAndReturn(run func(contex // ListFileVersions provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListFileVersions(ctx context.Context, in *providerv1beta1.ListFileVersionsRequest, opts ...grpc.CallOption) (*providerv1beta1.ListFileVersionsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListFileVersions") @@ -4738,14 +4675,13 @@ func (_c *GatewayAPIClient_ListFileVersions_Call) RunAndReturn(run func(context. // ListInviteTokens provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListInviteTokens(ctx context.Context, in *invitev1beta1.ListInviteTokensRequest, opts ...grpc.CallOption) (*invitev1beta1.ListInviteTokensResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListInviteTokens") @@ -4812,14 +4748,13 @@ func (_c *GatewayAPIClient_ListInviteTokens_Call) RunAndReturn(run func(context. // ListOCMShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListOCMShares(ctx context.Context, in *ocmv1beta1.ListOCMSharesRequest, opts ...grpc.CallOption) (*ocmv1beta1.ListOCMSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListOCMShares") @@ -4886,14 +4821,13 @@ func (_c *GatewayAPIClient_ListOCMShares_Call) RunAndReturn(run func(context.Con // ListPublicShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListPublicShares(ctx context.Context, in *linkv1beta1.ListPublicSharesRequest, opts ...grpc.CallOption) (*linkv1beta1.ListPublicSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListPublicShares") @@ -4960,14 +4894,13 @@ func (_c *GatewayAPIClient_ListPublicShares_Call) RunAndReturn(run func(context. // ListReceivedOCMShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListReceivedOCMShares(ctx context.Context, in *ocmv1beta1.ListReceivedOCMSharesRequest, opts ...grpc.CallOption) (*ocmv1beta1.ListReceivedOCMSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListReceivedOCMShares") @@ -5034,14 +4967,13 @@ func (_c *GatewayAPIClient_ListReceivedOCMShares_Call) RunAndReturn(run func(con // ListReceivedShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListReceivedShares(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListReceivedShares") @@ -5108,14 +5040,13 @@ func (_c *GatewayAPIClient_ListReceivedShares_Call) RunAndReturn(run func(contex // ListRecycle provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListRecycle(ctx context.Context, in *providerv1beta1.ListRecycleRequest, opts ...grpc.CallOption) (*providerv1beta1.ListRecycleResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListRecycle") @@ -5182,14 +5113,13 @@ func (_c *GatewayAPIClient_ListRecycle_Call) RunAndReturn(run func(context.Conte // ListRecycleStream provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListRecycleStream(ctx context.Context, in *providerv1beta1.ListRecycleStreamRequest, opts ...grpc.CallOption) (gatewayv1beta1.GatewayAPI_ListRecycleStreamClient, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListRecycleStream") @@ -5256,14 +5186,13 @@ func (_c *GatewayAPIClient_ListRecycleStream_Call) RunAndReturn(run func(context // ListShares provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListShares(ctx context.Context, in *collaborationv1beta1.ListSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListSharesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListShares") @@ -5330,14 +5259,13 @@ func (_c *GatewayAPIClient_ListShares_Call) RunAndReturn(run func(context.Contex // ListStorageSpaces provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListStorageSpaces(ctx context.Context, in *providerv1beta1.ListStorageSpacesRequest, opts ...grpc.CallOption) (*providerv1beta1.ListStorageSpacesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListStorageSpaces") @@ -5404,14 +5332,13 @@ func (_c *GatewayAPIClient_ListStorageSpaces_Call) RunAndReturn(run func(context // ListSupportedMimeTypes provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListSupportedMimeTypes(ctx context.Context, in *registryv1beta1.ListSupportedMimeTypesRequest, opts ...grpc.CallOption) (*registryv1beta1.ListSupportedMimeTypesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListSupportedMimeTypes") @@ -5478,14 +5405,13 @@ func (_c *GatewayAPIClient_ListSupportedMimeTypes_Call) RunAndReturn(run func(co // ListTransfers provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) ListTransfers(ctx context.Context, in *txv1beta1.ListTransfersRequest, opts ...grpc.CallOption) (*txv1beta1.ListTransfersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for ListTransfers") @@ -5552,14 +5478,13 @@ func (_c *GatewayAPIClient_ListTransfers_Call) RunAndReturn(run func(context.Con // Move provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) Move(ctx context.Context, in *providerv1beta1.MoveRequest, opts ...grpc.CallOption) (*providerv1beta1.MoveResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Move") @@ -5626,14 +5551,13 @@ func (_c *GatewayAPIClient_Move_Call) RunAndReturn(run func(context.Context, *pr // OpenInApp provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) OpenInApp(ctx context.Context, in *gatewayv1beta1.OpenInAppRequest, opts ...grpc.CallOption) (*appproviderv1beta1.OpenInAppResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for OpenInApp") @@ -5700,14 +5624,13 @@ func (_c *GatewayAPIClient_OpenInApp_Call) RunAndReturn(run func(context.Context // PurgeRecycle provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) PurgeRecycle(ctx context.Context, in *providerv1beta1.PurgeRecycleRequest, opts ...grpc.CallOption) (*providerv1beta1.PurgeRecycleResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for PurgeRecycle") @@ -5774,14 +5697,13 @@ func (_c *GatewayAPIClient_PurgeRecycle_Call) RunAndReturn(run func(context.Cont // RefreshLock provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RefreshLock(ctx context.Context, in *providerv1beta1.RefreshLockRequest, opts ...grpc.CallOption) (*providerv1beta1.RefreshLockResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RefreshLock") @@ -5848,14 +5770,13 @@ func (_c *GatewayAPIClient_RefreshLock_Call) RunAndReturn(run func(context.Conte // RemoveOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RemoveOCMShare(ctx context.Context, in *ocmv1beta1.RemoveOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.RemoveOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RemoveOCMShare") @@ -5922,14 +5843,13 @@ func (_c *GatewayAPIClient_RemoveOCMShare_Call) RunAndReturn(run func(context.Co // RemovePublicShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RemovePublicShare(ctx context.Context, in *linkv1beta1.RemovePublicShareRequest, opts ...grpc.CallOption) (*linkv1beta1.RemovePublicShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RemovePublicShare") @@ -5996,14 +5916,13 @@ func (_c *GatewayAPIClient_RemovePublicShare_Call) RunAndReturn(run func(context // RemoveShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RemoveShare(ctx context.Context, in *collaborationv1beta1.RemoveShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.RemoveShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RemoveShare") @@ -6070,14 +5989,13 @@ func (_c *GatewayAPIClient_RemoveShare_Call) RunAndReturn(run func(context.Conte // RestoreFileVersion provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RestoreFileVersion(ctx context.Context, in *providerv1beta1.RestoreFileVersionRequest, opts ...grpc.CallOption) (*providerv1beta1.RestoreFileVersionResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RestoreFileVersion") @@ -6144,14 +6062,13 @@ func (_c *GatewayAPIClient_RestoreFileVersion_Call) RunAndReturn(run func(contex // RestoreRecycleItem provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RestoreRecycleItem(ctx context.Context, in *providerv1beta1.RestoreRecycleItemRequest, opts ...grpc.CallOption) (*providerv1beta1.RestoreRecycleItemResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RestoreRecycleItem") @@ -6218,14 +6135,13 @@ func (_c *GatewayAPIClient_RestoreRecycleItem_Call) RunAndReturn(run func(contex // RetryTransfer provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) RetryTransfer(ctx context.Context, in *txv1beta1.RetryTransferRequest, opts ...grpc.CallOption) (*txv1beta1.RetryTransferResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for RetryTransfer") @@ -6292,14 +6208,13 @@ func (_c *GatewayAPIClient_RetryTransfer_Call) RunAndReturn(run func(context.Con // SetArbitraryMetadata provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) SetArbitraryMetadata(ctx context.Context, in *providerv1beta1.SetArbitraryMetadataRequest, opts ...grpc.CallOption) (*providerv1beta1.SetArbitraryMetadataResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for SetArbitraryMetadata") @@ -6366,14 +6281,13 @@ func (_c *GatewayAPIClient_SetArbitraryMetadata_Call) RunAndReturn(run func(cont // SetDefaultAppProviderForMimeType provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) SetDefaultAppProviderForMimeType(ctx context.Context, in *registryv1beta1.SetDefaultAppProviderForMimeTypeRequest, opts ...grpc.CallOption) (*registryv1beta1.SetDefaultAppProviderForMimeTypeResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for SetDefaultAppProviderForMimeType") @@ -6440,14 +6354,13 @@ func (_c *GatewayAPIClient_SetDefaultAppProviderForMimeType_Call) RunAndReturn(r // SetKey provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) SetKey(ctx context.Context, in *preferencesv1beta1.SetKeyRequest, opts ...grpc.CallOption) (*preferencesv1beta1.SetKeyResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for SetKey") @@ -6514,14 +6427,13 @@ func (_c *GatewayAPIClient_SetKey_Call) RunAndReturn(run func(context.Context, * // SetLock provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) SetLock(ctx context.Context, in *providerv1beta1.SetLockRequest, opts ...grpc.CallOption) (*providerv1beta1.SetLockResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for SetLock") @@ -6588,14 +6500,13 @@ func (_c *GatewayAPIClient_SetLock_Call) RunAndReturn(run func(context.Context, // Stat provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) Stat(ctx context.Context, in *providerv1beta1.StatRequest, opts ...grpc.CallOption) (*providerv1beta1.StatResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Stat") @@ -6662,14 +6573,13 @@ func (_c *GatewayAPIClient_Stat_Call) RunAndReturn(run func(context.Context, *pr // TouchFile provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) TouchFile(ctx context.Context, in *providerv1beta1.TouchFileRequest, opts ...grpc.CallOption) (*providerv1beta1.TouchFileResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for TouchFile") @@ -6736,14 +6646,13 @@ func (_c *GatewayAPIClient_TouchFile_Call) RunAndReturn(run func(context.Context // Unlock provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) Unlock(ctx context.Context, in *providerv1beta1.UnlockRequest, opts ...grpc.CallOption) (*providerv1beta1.UnlockResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for Unlock") @@ -6810,14 +6719,13 @@ func (_c *GatewayAPIClient_Unlock_Call) RunAndReturn(run func(context.Context, * // UnsetArbitraryMetadata provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UnsetArbitraryMetadata(ctx context.Context, in *providerv1beta1.UnsetArbitraryMetadataRequest, opts ...grpc.CallOption) (*providerv1beta1.UnsetArbitraryMetadataResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UnsetArbitraryMetadata") @@ -6884,14 +6792,13 @@ func (_c *GatewayAPIClient_UnsetArbitraryMetadata_Call) RunAndReturn(run func(co // UpdateOCMCoreShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateOCMCoreShare(ctx context.Context, in *corev1beta1.UpdateOCMCoreShareRequest, opts ...grpc.CallOption) (*corev1beta1.UpdateOCMCoreShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateOCMCoreShare") @@ -6958,14 +6865,13 @@ func (_c *GatewayAPIClient_UpdateOCMCoreShare_Call) RunAndReturn(run func(contex // UpdateOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateOCMShare(ctx context.Context, in *ocmv1beta1.UpdateOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.UpdateOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateOCMShare") @@ -7032,14 +6938,13 @@ func (_c *GatewayAPIClient_UpdateOCMShare_Call) RunAndReturn(run func(context.Co // UpdatePublicShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdatePublicShare(ctx context.Context, in *linkv1beta1.UpdatePublicShareRequest, opts ...grpc.CallOption) (*linkv1beta1.UpdatePublicShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdatePublicShare") @@ -7106,14 +7011,13 @@ func (_c *GatewayAPIClient_UpdatePublicShare_Call) RunAndReturn(run func(context // UpdateReceivedOCMShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateReceivedOCMShare(ctx context.Context, in *ocmv1beta1.UpdateReceivedOCMShareRequest, opts ...grpc.CallOption) (*ocmv1beta1.UpdateReceivedOCMShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateReceivedOCMShare") @@ -7180,14 +7084,13 @@ func (_c *GatewayAPIClient_UpdateReceivedOCMShare_Call) RunAndReturn(run func(co // UpdateReceivedShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateReceivedShare(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateReceivedShare") @@ -7254,14 +7157,13 @@ func (_c *GatewayAPIClient_UpdateReceivedShare_Call) RunAndReturn(run func(conte // UpdateShare provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateShare(ctx context.Context, in *collaborationv1beta1.UpdateShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateShareResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateShare") @@ -7328,14 +7230,13 @@ func (_c *GatewayAPIClient_UpdateShare_Call) RunAndReturn(run func(context.Conte // UpdateStorageSpace provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) UpdateStorageSpace(ctx context.Context, in *providerv1beta1.UpdateStorageSpaceRequest, opts ...grpc.CallOption) (*providerv1beta1.UpdateStorageSpaceResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for UpdateStorageSpace") @@ -7402,14 +7303,13 @@ func (_c *GatewayAPIClient_UpdateStorageSpace_Call) RunAndReturn(run func(contex // WhoAmI provides a mock function with given fields: ctx, in, opts func (_m *GatewayAPIClient) WhoAmI(ctx context.Context, in *gatewayv1beta1.WhoAmIRequest, opts ...grpc.CallOption) (*gatewayv1beta1.WhoAmIResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] + var tmpRet mock.Arguments + if len(opts) > 0 { + tmpRet = _m.Called(ctx, in, opts) + } else { + tmpRet = _m.Called(ctx, in) } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) + ret := tmpRet if len(ret) == 0 { panic("no return value specified for WhoAmI") diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index f0141f6bf..2eaf0e58a 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.6.1" + Version = "3.6.2" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go index 52c414db0..7692bb346 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2x.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -12,6 +12,8 @@ import ( // XOF defines the interface to hash functions that // support arbitrary-length output. +// +// New callers should prefer the standard library [hash.XOF]. type XOF interface { // Write absorbs more data into the hash's state. It panics if called // after Read. @@ -47,6 +49,8 @@ const maxOutputLength = (1 << 32) * 64 // // A non-nil key turns the hash into a MAC. The key must between // zero and 32 bytes long. +// +// The result can be safely interface-upgraded to [hash.XOF]. func NewXOF(size uint32, key []byte) (XOF, error) { if len(key) > Size { return nil, errKeySize @@ -93,6 +97,10 @@ func (x *xof) Clone() XOF { return &clone } +func (x *xof) BlockSize() int { + return x.d.BlockSize() +} + func (x *xof) Reset() { x.cfg[0] = byte(Size) binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length diff --git a/vendor/golang.org/x/crypto/blake2b/go125.go b/vendor/golang.org/x/crypto/blake2b/go125.go new file mode 100644 index 000000000..67e990b7e --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/go125.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.25 + +package blake2b + +import "hash" + +var _ hash.XOF = (*xof)(nil) diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index a3dc629c6..139fa31e1 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -233,7 +233,11 @@ func parseCert(in []byte, privAlgo string) (*Certificate, error) { if err != nil { return nil, err } - + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[k.Type()]; ok { + return nil, fmt.Errorf("ssh: the signature key type %q is invalid for certificates", k.Type()) + } c.SignatureKey = k c.Signature, rest, ok = parseSignatureBody(g.Signature) if !ok || len(rest) > 0 { @@ -301,16 +305,13 @@ type CertChecker struct { SupportedCriticalOptions []string // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. + // authority for user certificate. This must be set if this CertChecker + // will be checking user certificates. IsUserAuthority func(auth PublicKey) bool // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. + // an authority for this host. This must be set if this CertChecker + // will be checking host certificates. IsHostAuthority func(auth PublicKey, address string) bool // Clock is used for verifying time stamps. If nil, time.Now @@ -447,12 +448,19 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { // SignCert signs the certificate with an authority, setting the Nonce, // SignatureKey, and Signature fields. If the authority implements the // MultiAlgorithmSigner interface the first algorithm in the list is used. This -// is useful if you want to sign with a specific algorithm. +// is useful if you want to sign with a specific algorithm. As specified in +// [SSH-CERTS], Section 2.1.1, authority can't be a [Certificate]. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { return err } + // The Type() function is intended to return only certificate key types, but + // we use certKeyAlgoNames anyway for safety, to match [Certificate.Type]. + if _, ok := certKeyAlgoNames[authority.PublicKey().Type()]; ok { + return fmt.Errorf("ssh: certificates cannot be used as authority (public key type %q)", + authority.PublicKey().Type()) + } c.SignatureKey = authority.PublicKey() if v, ok := authority.(MultiAlgorithmSigner); ok { diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index b86dde151..c12818fdc 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -289,7 +289,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA } } - algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos, true) if err != nil { // If there is no overlap, return the fallback algorithm to support // servers that fail to list all supported algorithms. diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 0415d3396..f2ec0896c 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -336,7 +336,7 @@ func parseError(tag uint8) error { return fmt.Errorf("ssh: parse error in message type %d", tag) } -func findCommon(what string, client []string, server []string) (common string, err error) { +func findCommon(what string, client []string, server []string, isClient bool) (string, error) { for _, c := range client { for _, s := range server { if c == s { @@ -344,7 +344,32 @@ func findCommon(what string, client []string, server []string) (common string, e } } } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) + err := &AlgorithmNegotiationError{ + What: what, + } + if isClient { + err.SupportedAlgorithms = client + err.RequestedAlgorithms = server + } else { + err.SupportedAlgorithms = server + err.RequestedAlgorithms = client + } + return "", err +} + +// AlgorithmNegotiationError defines the error returned if the client and the +// server cannot agree on an algorithm for key exchange, host key, cipher, MAC. +type AlgorithmNegotiationError struct { + What string + // RequestedAlgorithms lists the algorithms supported by the peer. + RequestedAlgorithms []string + // SupportedAlgorithms lists the algorithms supported on our side. + SupportedAlgorithms []string +} + +func (a *AlgorithmNegotiationError) Error() string { + return fmt.Sprintf("ssh: no common algorithm for %s; we offered: %v, peer offered: %v", + a.What, a.SupportedAlgorithms, a.RequestedAlgorithms) } // DirectionAlgorithms defines the algorithms negotiated in one direction @@ -379,12 +404,12 @@ var aeadCiphers = map[string]bool{ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *NegotiatedAlgorithms, err error) { result := &NegotiatedAlgorithms{} - result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + result.KeyExchange, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos, isClient) if err != nil { return } - result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + result.HostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos, isClient) if err != nil { return } @@ -394,36 +419,36 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs ctos, stoc = stoc, ctos } - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer, isClient) if err != nil { return } - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient, isClient) if err != nil { return } if !aeadCiphers[ctos.Cipher] { - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer, isClient) if err != nil { return } } if !aeadCiphers[stoc.Cipher] { - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient, isClient) if err != nil { return } } - ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + ctos.compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer, isClient) if err != nil { return } - stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + stoc.compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient, isClient) if err != nil { return } diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index f5d352fe3..04ccce346 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -16,6 +16,7 @@ References: [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + [SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01 This package does not fall under the stability promise of the Go language itself, so its API may be changed when pressing needs arise. diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 566e09d5a..a28c0de50 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -273,7 +273,7 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str return nil, "", nil, nil, errors.New("ssh: no key found") } -// ParsePublicKey parses an SSH public key formatted for use in +// ParsePublicKey parses an SSH public key or certificate formatted for use in // the SSH wire protocol according to RFC 4253, section 6.6. func ParsePublicKey(in []byte) (out PublicKey, err error) { algo, in, ok := parseString(in) diff --git a/vendor/modules.txt b/vendor/modules.txt index 433141903..e441a7e10 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1018,6 +1018,8 @@ github.com/nats-io/nats-server/v2/server/tpm github.com/nats-io/nats.go github.com/nats-io/nats.go/encoders/builtin github.com/nats-io/nats.go/internal/parser +github.com/nats-io/nats.go/internal/syncx +github.com/nats-io/nats.go/jetstream github.com/nats-io/nats.go/util # github.com/nats-io/nkeys v0.4.11 ## explicit; go 1.23.0 @@ -1211,7 +1213,7 @@ github.com/open-policy-agent/opa/v1/version # github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250707143759-32eaae12b2ce ## explicit; go 1.18 github.com/opencloud-eu/libre-graph-api-go -# github.com/opencloud-eu/reva/v2 v2.34.1-0.20250704134423-74abc5f04717 +# github.com/opencloud-eu/reva/v2 v2.34.1-0.20250716074813-cfe225225b23 ## explicit; go 1.24.1 github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace github.com/opencloud-eu/reva/v2/cmd/revad/runtime @@ -1363,6 +1365,8 @@ github.com/opencloud-eu/reva/v2/pkg/eosclient/eosgrpc/eos_grpc github.com/opencloud-eu/reva/v2/pkg/errtypes github.com/opencloud-eu/reva/v2/pkg/events github.com/opencloud-eu/reva/v2/pkg/events/mocks +github.com/opencloud-eu/reva/v2/pkg/events/raw +github.com/opencloud-eu/reva/v2/pkg/events/raw/mocks github.com/opencloud-eu/reva/v2/pkg/events/stream github.com/opencloud-eu/reva/v2/pkg/group github.com/opencloud-eu/reva/v2/pkg/group/manager/json @@ -1470,6 +1474,7 @@ github.com/opencloud-eu/reva/v2/pkg/share/manager/memory github.com/opencloud-eu/reva/v2/pkg/share/manager/owncloudsql github.com/opencloud-eu/reva/v2/pkg/share/manager/registry github.com/opencloud-eu/reva/v2/pkg/sharedconf +github.com/opencloud-eu/reva/v2/pkg/signedurl github.com/opencloud-eu/reva/v2/pkg/siteacc github.com/opencloud-eu/reva/v2/pkg/siteacc/account github.com/opencloud-eu/reva/v2/pkg/siteacc/account/contact @@ -2010,7 +2015,7 @@ go.etcd.io/bbolt go.etcd.io/bbolt/errors go.etcd.io/bbolt/internal/common go.etcd.io/bbolt/internal/freelist -# go.etcd.io/etcd/api/v3 v3.6.1 +# go.etcd.io/etcd/api/v3 v3.6.2 ## explicit; go 1.23.0 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -2019,7 +2024,7 @@ go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version go.etcd.io/etcd/api/v3/versionpb -# go.etcd.io/etcd/client/pkg/v3 v3.6.1 +# go.etcd.io/etcd/client/pkg/v3 v3.6.2 ## explicit; go 1.23.0 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -2028,7 +2033,7 @@ go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types go.etcd.io/etcd/client/pkg/v3/verify -# go.etcd.io/etcd/client/v3 v3.6.1 +# go.etcd.io/etcd/client/v3 v3.6.2 ## explicit; go 1.23.0 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials @@ -2157,7 +2162,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.39.0 +# golang.org/x/crypto v0.40.0 ## explicit; go 1.23.0 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt