From 77bb3d8bcd9e3d398c444487d0a0a55676719e43 Mon Sep 17 00:00:00 2001 From: Andre Duffeck Date: Mon, 24 Apr 2023 15:13:35 +0200 Subject: [PATCH] [full-ci] Refactor stores (#6019) * Streamline the store implementation with and into reva * Adapt to the cache/store refactoring in reva * Streamline config options and their env vars * Apply suggestions from code review Co-authored-by: Martin * Use the same database for all stores * Bump reva * Configure stat and filemetadata cache separately * Fix default config --------- Co-authored-by: Martin --- go.mod | 14 +- go.sum | 8 +- ocis-pkg/roles/manager.go | 2 +- ocis-pkg/shared/shared_types.go | 8 +- ocis-pkg/store/memory/memstore_test.go | 1506 ----------------- ocis-pkg/store/memory/multimemstore_test.go | 172 -- ocis/pkg/command/decomposedfs.go | 3 +- services/eventhistory/pkg/command/server.go | 4 +- services/eventhistory/pkg/config/config.go | 12 +- .../pkg/config/defaults/defaultconfig.go | 8 +- services/eventhistory/pkg/service/service.go | 2 +- .../eventhistory/pkg/service/service_test.go | 2 +- services/frontend/pkg/config/config.go | 21 +- .../pkg/config/defaults/defaultconfig.go | 13 +- services/frontend/pkg/revaconfig/config.go | 32 +- services/gateway/pkg/config/config.go | 16 +- services/gateway/pkg/revaconfig/config.go | 15 +- services/graph/pkg/config/cache.go | 8 +- services/graph/pkg/service/v0/service.go | 2 +- services/ocs/pkg/config/cachestore.go | 8 +- services/proxy/pkg/command/server.go | 2 +- services/proxy/pkg/config/config.go | 10 +- services/storage-system/pkg/config/config.go | 9 +- .../storage-system/pkg/revaconfig/config.go | 6 + services/storage-users/pkg/config/config.go | 46 +- .../pkg/config/defaults/defaultconfig.go | 8 +- .../storage-users/pkg/revaconfig/config.go | 24 +- .../storage-users/pkg/revaconfig/drivers.go | 64 +- services/userlog/pkg/command/server.go | 2 +- services/userlog/pkg/config/config.go | 4 +- services/userlog/pkg/service/service_test.go | 2 +- .../reva/v2/cmd/revad/runtime/loader.go | 1 - .../internal/grpc/services/gateway/gateway.go | 11 +- .../http/services/owncloud/ocdav/copy.go | 25 +- .../http/services/owncloud/ocdav/mkcol.go | 6 +- .../http/services/owncloud/ocdav/move.go | 25 +- .../http/services/owncloud/ocdav/ocdav.go | 46 +- .../http/services/owncloud/ocdav/put.go | 9 + .../http/services/owncloud/ocdav/tus.go | 16 +- .../services/owncloud/ocdav/validation.go | 63 + .../services/owncloud/ocs/config/config.go | 7 +- .../handlers/apps/sharing/shares/shares.go | 109 +- .../ocs/handlers/apps/sharing/shares/user.go | 3 + .../cs3org/reva/v2/pkg/micro/ocdav/option.go | 14 + .../pkg/rhttp/datatx/manager/simple/simple.go | 26 +- .../pkg/rhttp/datatx/manager/spaces/spaces.go | 27 +- .../v2/pkg/rhttp/datatx/manager/tus/tus.go | 29 +- .../v2/pkg/rhttp/datatx/metrics/metrics.go | 20 + .../rhttp/datatx/utils/download/download.go | 1 - .../cs3org/reva/v2/pkg/share/cache/cache.go | 10 - .../reva/v2/pkg/share/cache/loader/loader.go | 26 - .../reva/v2/pkg/share/cache/memory/memory.go | 83 - .../reva/v2/pkg/share/cache/redis/redis.go | 153 -- .../v2/pkg/share/cache/registry/registry.go | 34 - .../cs3org/reva/v2/pkg/storage/cache/cache.go | 130 +- .../reva/v2/pkg/storage/cache/createhome.go | 4 +- .../pkg/storage/cache/createpersonalspace.go | 4 +- .../reva/v2/pkg/storage/cache/filemetadata.go | 4 +- .../reva/v2/pkg/storage/cache/provider.go | 4 +- .../cs3org/reva/v2/pkg/storage/cache/stat.go | 4 +- .../utils/decomposedfs/decomposedfs.go | 2 +- .../metadata/messagepack_backend.go | 5 +- .../0003_switch_to_messagepack_metadata.go | 4 +- .../utils/decomposedfs/options/options.go | 12 +- .../pkg/storage/utils/decomposedfs/spaces.go | 3 + .../cs3org/reva/v2/pkg}/store/etcd/etcd.go | 43 +- .../cs3org/reva/v2/pkg}/store/etcd/utils.go | 0 .../reva/v2/pkg}/store/memory/memstore.go | 41 +- .../v2/pkg}/store/memory/multimemstore.go | 12 +- .../cs3org/reva/v2/pkg}/store/memory/utils.go | 0 .../cs3org/reva/v2/pkg}/store/options.go | 18 + .../cs3org/reva/v2/pkg/store/store.go | 75 +- .../plugins/v4/store/nats-js/context.go | 2 +- .../go-micro/plugins/v4/store/nats-js/nats.go | 8 +- .../plugins/v4/store/nats-js/options.go | 30 +- vendor/modules.txt | 12 +- 76 files changed, 665 insertions(+), 2529 deletions(-) delete mode 100644 ocis-pkg/store/memory/memstore_test.go delete mode 100644 ocis-pkg/store/memory/multimemstore_test.go create mode 100644 vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/validation.go create mode 100644 vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics/metrics.go delete mode 100644 vendor/github.com/cs3org/reva/v2/pkg/share/cache/loader/loader.go delete mode 100644 vendor/github.com/cs3org/reva/v2/pkg/share/cache/memory/memory.go delete mode 100644 vendor/github.com/cs3org/reva/v2/pkg/share/cache/redis/redis.go delete mode 100644 vendor/github.com/cs3org/reva/v2/pkg/share/cache/registry/registry.go rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/etcd/etcd.go (91%) rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/etcd/utils.go (100%) rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/memory/memstore.go (94%) rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/memory/multimemstore.go (90%) rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/memory/utils.go (100%) rename {ocis-pkg => vendor/github.com/cs3org/reva/v2/pkg}/store/options.go (67%) rename ocis-pkg/store/cache.go => vendor/github.com/cs3org/reva/v2/pkg/store/store.go (55%) diff --git a/go.mod b/go.mod index d9dc8c4602..45a17b9883 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,12 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/MicahParks/keyfunc v1.5.1 github.com/Nerzal/gocloak/v13 v13.1.0 - github.com/armon/go-radix v1.0.0 github.com/bbalet/stopwords v1.0.0 github.com/blevesearch/bleve/v2 v2.3.7 github.com/coreos/go-oidc v2.2.1+incompatible github.com/coreos/go-oidc/v3 v3.4.0 github.com/cs3org/go-cs3apis v0.0.0-20221012090518-ef2996678965 - github.com/cs3org/reva/v2 v2.12.1-0.20230420073005-11edad1f09fe + github.com/cs3org/reva/v2 v2.12.1-0.20230424091007-8d8b567179b1 github.com/disintegration/imaging v1.6.2 github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e github.com/egirna/icap-client v0.1.1 @@ -36,13 +35,10 @@ require ( github.com/go-micro/plugins/v4/registry/nats v1.2.1 github.com/go-micro/plugins/v4/server/grpc v1.2.0 github.com/go-micro/plugins/v4/server/http v1.2.1 - github.com/go-micro/plugins/v4/store/nats-js v1.2.0 - github.com/go-micro/plugins/v4/store/redis v1.2.0 github.com/go-micro/plugins/v4/wrapper/breaker/gobreaker v1.2.0 github.com/go-micro/plugins/v4/wrapper/monitoring/prometheus v1.2.0 github.com/go-micro/plugins/v4/wrapper/trace/opencensus v1.1.0 github.com/go-ozzo/ozzo-validation/v4 v4.3.0 - github.com/go-redis/redis/v8 v8.11.5 github.com/gofrs/uuid v4.4.0+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/protobuf v1.5.3 @@ -60,7 +56,6 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nats-io/nats-server/v2 v2.9.4 - github.com/nats-io/nats.go v1.19.0 github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo v1.16.5 @@ -85,7 +80,6 @@ require ( github.com/xhit/go-simple-mail/v2 v2.13.0 go-micro.dev/v4 v4.9.0 go.etcd.io/bbolt v1.3.7 - go.etcd.io/etcd/client/v3 v3.5.7 go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.14.0 go.opentelemetry.io/otel/exporters/jaeger v1.14.0 @@ -124,6 +118,7 @@ require ( github.com/alexedwards/argon2id v0.0.0-20211130144151-3585854a6387 // indirect github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/aws/aws-sdk-go v1.44.181 // indirect github.com/beevik/etree v1.1.0 // indirect @@ -187,6 +182,9 @@ require ( github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-micro/plugins/v4/store/nats-js v1.1.0 // indirect + github.com/go-micro/plugins/v4/store/redis v1.2.0 // indirect + github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/go-resty/resty/v2 v2.7.0 // indirect github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect @@ -260,6 +258,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mschoch/smat v0.2.0 // indirect github.com/nats-io/jwt/v2 v2.3.0 // indirect + github.com/nats-io/nats.go v1.19.0 // indirect github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect @@ -302,6 +301,7 @@ require ( github.com/yashtewari/glob-intersection v0.1.0 // indirect go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.4 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.1 // indirect diff --git a/go.sum b/go.sum index b84d9517e1..cd68f84c31 100644 --- a/go.sum +++ b/go.sum @@ -627,8 +627,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4= github.com/crewjam/saml v0.4.13 h1:TYHggH/hwP7eArqiXSJUvtOPNzQDyQ7vwmwEqlFWhMc= github.com/crewjam/saml v0.4.13/go.mod h1:igEejV+fihTIlHXYP8zOec3V5A8y3lws5bQBFsTm4gA= -github.com/cs3org/reva/v2 v2.12.1-0.20230420073005-11edad1f09fe h1:VwL3XmxMC/lYar2xVHQYcrC6L38/DE+qIh2hYg1X3tc= -github.com/cs3org/reva/v2 v2.12.1-0.20230420073005-11edad1f09fe/go.mod h1:FNAYs5H3xs8v0OFmNgZtiMAzIMXd/6TJmO0uZuNn8pQ= +github.com/cs3org/reva/v2 v2.12.1-0.20230424091007-8d8b567179b1 h1:ds7JOiVqRoMR1EAkoxHJEEA8iX8JirO3bPxXACGE8iA= +github.com/cs3org/reva/v2 v2.12.1-0.20230424091007-8d8b567179b1/go.mod h1:2SKiycp0NwrLm/c1YeKUHwLSwPqV7hKfFEMdYgEaDxo= github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8 h1:Z9lwXumT5ACSmJ7WGnFl+OMLLjpz5uR2fyz7dC255FI= github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8/go.mod h1:4abs/jPXcmJzYoYGF91JF9Uq9s/KL5n1jvFDix8KcqY= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= @@ -790,8 +790,8 @@ github.com/go-micro/plugins/v4/server/grpc v1.2.0 h1:lXfM+/0oE/u1g0hVBYsvbP4lYOY github.com/go-micro/plugins/v4/server/grpc v1.2.0/go.mod h1:+Ah9Pf/vMSXxBM3fup/hc3N+zN2as3nIpcRaR4sBjnY= github.com/go-micro/plugins/v4/server/http v1.2.1 h1:Cia924J90rgFT/4qWWvyLvN+XqEm5T9tiQyQ+GU4bOQ= github.com/go-micro/plugins/v4/server/http v1.2.1/go.mod h1:YuAjaSPxcn3LI8j2FUsqx0Rxunrj4YwDV41Ax76rLl0= -github.com/go-micro/plugins/v4/store/nats-js v1.2.0 h1:D0LAp/QJw1nC9scNZltZRurZsman0cCmIn+A36hDR5E= -github.com/go-micro/plugins/v4/store/nats-js v1.2.0/go.mod h1:wt51O2yNmgF/F7E00IYIH0awseRGqtnmjZGn6RjbZSk= +github.com/go-micro/plugins/v4/store/nats-js v1.1.0 h1:6Fe1/eLtg8kRyaGvMILp4olYtTDGwYNBXyb1sYfAWGk= +github.com/go-micro/plugins/v4/store/nats-js v1.1.0/go.mod h1:jJf7Gm39OafZlT3s3UE2/9NIYj6OlI2fmZ4czSA3gvo= github.com/go-micro/plugins/v4/store/redis v1.2.0 h1:jR7sHOD1a735cxyBNFif58tP0Ck8OUklpDN1IzzDoRg= github.com/go-micro/plugins/v4/store/redis v1.2.0/go.mod h1:MbCG0YiyPqETTtm7uHFmxQNCaW1o9hBoYtFwhbVjLUg= github.com/go-micro/plugins/v4/transport/grpc v1.1.0 h1:mXfDYfFQLnVDzjGY3o84oe4prfux9h8txsnA19dKsj8= diff --git a/ocis-pkg/roles/manager.go b/ocis-pkg/roles/manager.go index 4f0988ab95..4ac3db2d5f 100644 --- a/ocis-pkg/roles/manager.go +++ b/ocis-pkg/roles/manager.go @@ -4,8 +4,8 @@ import ( "context" "time" + "github.com/cs3org/reva/v2/pkg/store" "github.com/owncloud/ocis/v2/ocis-pkg/log" - "github.com/owncloud/ocis/v2/ocis-pkg/store" settingsmsg "github.com/owncloud/ocis/v2/protogen/gen/ocis/messages/settings/v0" settingssvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/settings/v0" microstore "go-micro.dev/v4/store" diff --git a/ocis-pkg/shared/shared_types.go b/ocis-pkg/shared/shared_types.go index c8cc938fc9..349541c8ed 100644 --- a/ocis-pkg/shared/shared_types.go +++ b/ocis-pkg/shared/shared_types.go @@ -56,12 +56,12 @@ type HTTPServiceTLS struct { } type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;OCIS_CACHE_STORE_TYPE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;OCIS_CACHE_STORE_ADDRESSES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` Database string `yaml:"database" env:"OCIS_CACHE_STORE_DATABASE" desc:"The database name the configured store should use."` Table string `yaml:"table" env:"OCIS_CACHE_STORE_TABLE" desc:"The database table the store should use."` - TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_STORE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h."` - Size int `yaml:"size" env:"OCIS_CACHE_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured."` } // Commons holds configuration that are common to all extensions. Each extension can then decide whether diff --git a/ocis-pkg/store/memory/memstore_test.go b/ocis-pkg/store/memory/memstore_test.go deleted file mode 100644 index 28fc5ca47f..0000000000 --- a/ocis-pkg/store/memory/memstore_test.go +++ /dev/null @@ -1,1506 +0,0 @@ -package memory - -import ( - "context" - "encoding/hex" - "hash/fnv" - "math/rand" - "sort" - "strconv" - "strings" - "sync" - "testing" - "time" - - "sync/atomic" - - "go-micro.dev/v4/store" -) - -func TestWriteAndRead(t *testing.T) { - cache := NewMemStore() - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzzz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "zozzz": "vooouyenbdnya", - "zzaz": "viaooouyenbdnya", - "mbzzaamb": "viunya", - } - - for key, value := range data { - record := &store.Record{ - Key: key, - Value: []byte(value), - } - _ = cache.Write(record) - } - - t.Run("Plain", func(t *testing.T) { - readPlain(t, cache) - }) - t.Run("Prefix", func(t *testing.T) { - readPrefix(t, cache) - }) - t.Run("Suffix", func(t *testing.T) { - readSuffix(t, cache) - }) - t.Run("PrefixSuffix", func(t *testing.T) { - readPrefixSuffix(t, cache) - }) - t.Run("PrefixLimitOffset", func(t *testing.T) { - readPrefixLimitOffset(t, cache) - }) - t.Run("SuffixLimitOffset", func(t *testing.T) { - readSuffixLimitOffset(t, cache) - }) - t.Run("PrefixSuffixLimitOffset", func(t *testing.T) { - readPrefixSuffixLimitOffset(t, cache) - }) -} - -func readPlain(t *testing.T, cache store.Store) { - // expected data in the cache - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzzz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "zozzz": "vooouyenbdnya", - "zzaz": "viaooouyenbdnya", - "mbzzaamb": "viunya", - } - for key, value := range data { - records, _ := cache.Read(key) - if len(records) != 1 { - t.Fatalf("Plain read for key %s returned %d records", key, len(records)) - } - if key != records[0].Key { - t.Errorf("Plain read for key %s returned got wrong key %s", key, records[0].Key) - } - v := string(records[0].Value) - if value != v { - t.Errorf("Plain read for key %s returned different value, expected %s, got %s", key, value, v) - } - } -} - -func readPrefix(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "abaya", Value: "v329487"}, - {Key: "abayakjdkj", Value: "v989898"}, - } - - pref2 := []struct { - Key string - Value string - }{ - {Key: "zozzz", Value: "vooouyenbdnya"}, - {Key: "zzaz", Value: "viaooouyenbdnya"}, - {Key: "zzzz", Value: "viaooouyenbdnya"}, - } - - records, _ := cache.Read("abaya", store.ReadPrefix()) - if len(records) != 2 { - t.Fatalf("Prefix read for \"abaya\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for prefix \"abaya\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for prefix \"abaya\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } - - records, _ = cache.Read("z", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Prefix read for \"z\" returned %d records, expected 3", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref2[index].Key != record.Key { - t.Errorf("Unexpected key for prefix \"z\", index %d, expected %s, got %s", index, pref2[index].Key, record.Key) - } - if pref2[index].Value != string(record.Value) { - t.Errorf("Unexpected value for prefix \"z\", index %d, expected %s, got %s", index, pref2[index].Value, record.Value) - } - } -} - -func readSuffix(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "abaaz", Value: "v398342"}, - {Key: "zzaz", Value: "viaooouyenbdnya"}, - } - pref2 := []struct { - Key string - Value string - }{ - {Key: "abaaz", Value: "v398342"}, - {Key: "zzaz", Value: "viaooouyenbdnya"}, - {Key: "abazzz", Value: "v57869nbdnya"}, - {Key: "zozzz", Value: "vooouyenbdnya"}, - {Key: "zzzz", Value: "viaooouyenbdnya"}, - } - - records, _ := cache.Read("az", store.ReadSuffix()) - if len(records) != 2 { - t.Fatalf("Suffix read for \"az\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for suffix \"az\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for suffix \"az\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } - - records, _ = cache.Read("z", store.ReadSuffix()) - if len(records) != 5 { - t.Fatalf("Suffix read for \"z\" returned %d records, expected 5", len(records)) - } - for index, record := range records { - if pref2[index].Key != record.Key { - t.Errorf("Unexpected key for suffix \"z\", index %d, expected %s, got %s", index, pref2[index].Key, record.Key) - } - if pref2[index].Value != string(record.Value) { - t.Errorf("Unexpected value for suffix \"z\", index %d, expected %s, got %s", index, pref2[index].Value, record.Value) - } - } -} - -func readPrefixSuffix(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "zozzz", Value: "vooouyenbdnya"}, - {Key: "zzaz", Value: "viaooouyenbdnya"}, - {Key: "zzzz", Value: "viaooouyenbdnya"}, - } - pref2 := []struct { - Key string - Value string - }{ - {Key: "mbmbmb", Value: "viuyenbdnya"}, - {Key: "mbzzaamb", Value: "viunya"}, - } - - records, _ := cache.Read("z", store.ReadPrefix(), store.ReadSuffix()) - if len(records) != 3 { - t.Fatalf("Prefix-Suffix read for \"z\" returned %d records, expected 3", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for prefix-suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for prefix-suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } - - records, _ = cache.Read("mb", store.ReadPrefix(), store.ReadSuffix()) - if len(records) != 2 { - t.Fatalf("Prefix-Suffix read for \"mb\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref2[index].Key != record.Key { - t.Errorf("Unexpected key for prefix-suffix \"mb\", index %d, expected %s, got %s", index, pref2[index].Key, record.Key) - } - if pref2[index].Value != string(record.Value) { - t.Errorf("Unexpected value for prefix-suffix \"mb\", index %d, expected %s, got %s", index, pref2[index].Value, record.Value) - } - } -} - -func readPrefixLimitOffset(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "abaaz", Value: "v398342"}, - {Key: "abaya", Value: "v329487"}, - } - pref2 := []struct { - Key string - Value string - }{ - {Key: "abayakjdkj", Value: "v989898"}, - {Key: "abazzz", Value: "v57869nbdnya"}, - } - - records, _ := cache.Read("aba", store.ReadPrefix(), store.ReadLimit(2)) - if len(records) != 2 { - t.Fatalf("Limit prefix read for \"aba\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for limit prefix \"aba\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for limit prefix \"aba\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } - - records, _ = cache.Read("aba", store.ReadPrefix(), store.ReadLimit(2), store.ReadOffset(2)) - if len(records) != 2 { - t.Fatalf("Offset-limit prefix read for \"aba\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref2[index].Key != record.Key { - t.Errorf("Unexpected key for offset-limit prefix \"aba\", index %d, expected %s, got %s", index, pref2[index].Key, record.Key) - } - if pref2[index].Value != string(record.Value) { - t.Errorf("Unexpected value for offset-limit prefix \"aba\", index %d, expected %s, got %s", index, pref2[index].Value, record.Value) - } - } -} - -func readSuffixLimitOffset(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "abaaz", Value: "v398342"}, - {Key: "zzaz", Value: "viaooouyenbdnya"}, - } - pref2 := []struct { - Key string - Value string - }{ - {Key: "abazzz", Value: "v57869nbdnya"}, - {Key: "zozzz", Value: "vooouyenbdnya"}, - } - - records, _ := cache.Read("z", store.ReadSuffix(), store.ReadLimit(2)) - if len(records) != 2 { - t.Fatalf("Limit suffix read for \"z\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for limit suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for limit suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } - - records, _ = cache.Read("z", store.ReadSuffix(), store.ReadLimit(2), store.ReadOffset(2)) - if len(records) != 2 { - t.Fatalf("Offset-limit suffix read for \"z\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref2[index].Key != record.Key { - t.Errorf("Unexpected key for offset-limit suffix \"z\", index %d, expected %s, got %s", index, pref2[index].Key, record.Key) - } - if pref2[index].Value != string(record.Value) { - t.Errorf("Unexpected value for offset-limit suffix \"z\", index %d, expected %s, got %s", index, pref2[index].Value, record.Value) - } - } -} - -func readPrefixSuffixLimitOffset(t *testing.T, cache store.Store) { - pref1 := []struct { - Key string - Value string - }{ - {Key: "zzaz", Value: "viaooouyenbdnya"}, - {Key: "zzzz", Value: "viaooouyenbdnya"}, - } - - records, _ := cache.Read("z", store.ReadPrefix(), store.ReadSuffix(), store.ReadOffset(1), store.ReadLimit(2)) - if len(records) != 2 { - t.Fatalf("Limit suffix read for \"z\" returned %d records, expected 2", len(records)) - } - for index, record := range records { - // it should be sorted alphabetically - if pref1[index].Key != record.Key { - t.Errorf("Unexpected key for limit suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Key, record.Key) - } - if pref1[index].Value != string(record.Value) { - t.Errorf("Unexpected value for limit suffix \"z\", index %d, expected %s, got %s", index, pref1[index].Value, record.Value) - } - } -} - -func TestWriteExpiryAndRead(t *testing.T) { - cache := NewMemStore() - - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzaz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "mbzzaamb": "viunya", - "zozzz": "vooouyenbdnya", - } - - for key, value := range data { - record := &store.Record{ - Key: key, - Value: []byte(value), - Expiry: time.Second * 1000, - } - _ = cache.Write(record) - } - - records, _ := cache.Read("zzaz") - if len(records) != 1 { - t.Fatalf("Failed read for \"zzaz\" returned %d records, expected 1", len(records)) - } - record := records[0] - if record.Expiry < 999*time.Second || record.Expiry > 1000*time.Second { - // The expiry will be adjusted on retrieval - t.Errorf("Abnormal expiry range: expected %d-%d, got %d", 999*time.Second, 1000*time.Second, record.Expiry) - } -} - -func TestWriteExpiryWithExpiryAndRead(t *testing.T) { - cache := NewMemStore() - - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzaz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "mbzzaamb": "viunya", - "zozzz": "vooouyenbdnya", - } - - for key, value := range data { - record := &store.Record{ - Key: key, - Value: []byte(value), - Expiry: time.Second * 1000, - } - // write option will override the record data - _ = cache.Write(record, store.WriteExpiry(time.Now().Add(time.Hour))) - } - - records, _ := cache.Read("zzaz") - if len(records) != 1 { - t.Fatalf("Failed read for \"zzaz\" returned %d records, expected 1", len(records)) - } - record := records[0] - if record.Expiry < 3599*time.Second || record.Expiry > 3600*time.Second { - // The expiry will be adjusted on retrieval - t.Errorf("Abnormal expiry range: expected %d-%d, got %d", 3599*time.Second, 3600*time.Second, record.Expiry) - } -} - -func TestWriteExpiryWithTTLAndRead(t *testing.T) { - cache := NewMemStore() - - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzaz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "mbzzaamb": "viunya", - "zozzz": "vooouyenbdnya", - } - - for key, value := range data { - record := &store.Record{ - Key: key, - Value: []byte(value), - Expiry: time.Second * 1000, - } - // write option will override the record data, TTL takes precedence - _ = cache.Write(record, store.WriteTTL(20*time.Second), store.WriteExpiry(time.Now().Add(time.Hour))) - } - - records, _ := cache.Read("zzaz") - if len(records) != 1 { - t.Fatalf("Failed read for \"zzaz\" returned %d records, expected 1", len(records)) - } - record := records[0] - if record.Expiry < 19*time.Second || record.Expiry > 20*time.Second { - // The expiry will be adjusted on retrieval - t.Errorf("Abnormal expiry range: expected %d-%d, got %d", 19*time.Second, 20*time.Second, record.Expiry) - } -} - -func TestDelete(t *testing.T) { - cache := NewMemStore() - record := &store.Record{ - Key: "record", - Value: []byte("value for record"), - } - - records, err := cache.Read("record") - if err != store.ErrNotFound && len(records) > 0 { - t.Fatal("Found key in cache but it shouldn't be there") - } - - _ = cache.Write(record) - records, err = cache.Read("record") - if err != nil { - t.Fatal("Key not found in cache after inserting it") - } - if len(records) != 1 { - t.Fatal("Multiple keys found in cache after inserting it") - } - if records[0].Key != "record" && string(records[0].Value) != "value for record" { - t.Fatal("Wrong record retrieved") - } - - err = cache.Delete("record") - if err != nil { - t.Fatal("Error deleting the record") - } - - records, err = cache.Read("record") - if err != store.ErrNotFound && len(records) > 0 { - t.Fatal("Found key in cache but it shouldn't be there") - } -} - -func TestList(t *testing.T) { - cache := NewMemStore() - data := map[string]string{ - "abaya": "v329487", - "abaaz": "v398342", - "abayakjdkj": "v989898", - "zzzz": "viaooouyenbdnya", - "abazzz": "v57869nbdnya", - "mbmbmb": "viuyenbdnya", - "zozzz": "vooouyenbdnya", - "aboyo": "v889487", - "zzaaaz": "v999487", - } - - for key, value := range data { - record := &store.Record{ - Key: key, - Value: []byte(value), - } - _ = cache.Write(record) - } - - t.Run("Plain", func(t *testing.T) { - listPlain(t, cache) - }) - t.Run("Prefix", func(t *testing.T) { - listPrefix(t, cache) - }) - t.Run("Suffix", func(t *testing.T) { - listSuffix(t, cache) - }) - t.Run("PrefixSuffix", func(t *testing.T) { - listPrefixSuffix(t, cache) - }) - t.Run("LimitOffset", func(t *testing.T) { - listLimitOffset(t, cache) - }) - t.Run("PrefixLimitOffset", func(t *testing.T) { - listPrefixLimitOffset(t, cache) - }) - t.Run("SuffixLimitOffset", func(t *testing.T) { - listSuffixLimitOffset(t, cache) - }) - t.Run("PrefixSuffixLimitOffset", func(t *testing.T) { - listPrefixSuffixLimitOffset(t, cache) - }) -} - -func listPlain(t *testing.T, cache store.Store) { - keys, _ := cache.List() - expectedKeys := []string{"abaaz", "abaya", "abayakjdkj", "abazzz", "aboyo", "mbmbmb", "zozzz", "zzaaaz", "zzzz"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listPrefix(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListPrefix("aba")) - expectedKeys := []string{"abaaz", "abaya", "abayakjdkj", "abazzz"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listSuffix(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListSuffix("z")) - expectedKeys := []string{"zzaaaz", "abaaz", "abazzz", "zozzz", "zzzz"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listPrefixSuffix(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListPrefix("ab"), store.ListSuffix("z")) - expectedKeys := []string{"abaaz", "abazzz"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listLimitOffset(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListLimit(3), store.ListOffset(2)) - expectedKeys := []string{"abayakjdkj", "abazzz", "aboyo"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listPrefixLimitOffset(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListPrefix("aba"), store.ListLimit(2), store.ListOffset(1)) - expectedKeys := []string{"abaya", "abayakjdkj"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listSuffixLimitOffset(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListSuffix("z"), store.ListLimit(2), store.ListOffset(1)) - expectedKeys := []string{"abaaz", "abazzz"} - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func listPrefixSuffixLimitOffset(t *testing.T, cache store.Store) { - keys, _ := cache.List(store.ListPrefix("a"), store.ListSuffix("z"), store.ListLimit(2), store.ListOffset(1)) - expectedKeys := []string{"abazzz"} // only 2 available, and we skip the first one - if len(keys) != len(expectedKeys) { - t.Fatalf("Wrong number of keys, expected %d, got %d", len(expectedKeys), len(keys)) - } - - for index, key := range keys { - if key != expectedKeys[index] { - t.Errorf("Wrong key in the list in index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func TestEvictWriteUpdate(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 3, - }, - )), - ) - - for i := 0; i < 3; i++ { - v := strconv.Itoa(i) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - } - - // update first item - updatedRecord := &store.Record{ - Key: "0", - Value: []byte("zero"), - } - _ = cache.Write(updatedRecord) - - // new record, to force eviction - newRecord := &store.Record{ - Key: "new", - Value: []byte("newNew"), - } - _ = cache.Write(newRecord) - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Wrong number of record returned, expected 3, got %d", len(records)) - } - - expectedKV := []struct { - Key string - Value string - }{ - {Key: "0", Value: "zero"}, - {Key: "2", Value: "2"}, - {Key: "new", Value: "newNew"}, - } - - for index, record := range records { - if record.Key != expectedKV[index].Key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKV[index].Key, record.Key) - } - if string(record.Value) != expectedKV[index].Value { - t.Errorf("Wrong value for index %d, expected %s, got %s", index, expectedKV[index].Value, string(record.Value)) - } - } -} - -func TestEvictRead(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 3, - }, - )), - ) - - for i := 0; i < 3; i++ { - v := strconv.Itoa(i) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - } - - // Read first item - _, _ = cache.Read("0") - - // new record, to force eviction - newRecord := &store.Record{ - Key: "new", - Value: []byte("newNew"), - } - _ = cache.Write(newRecord) - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Wrong number of record returned, expected 3, got %d", len(records)) - } - - expectedKV := []struct { - Key string - Value string - }{ - {Key: "0", Value: "0"}, - {Key: "2", Value: "2"}, - {Key: "new", Value: "newNew"}, - } - - for index, record := range records { - if record.Key != expectedKV[index].Key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKV[index].Key, record.Key) - } - if string(record.Value) != expectedKV[index].Value { - t.Errorf("Wrong value for index %d, expected %s, got %s", index, expectedKV[index].Value, string(record.Value)) - } - } -} - -func TestEvictReadPrefix(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 3, - }, - )), - ) - - for i := 0; i < 3; i++ { - v := strconv.Itoa(i) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - } - - // Read prefix won't change evcition list - _, _ = cache.Read("0", store.ReadPrefix()) - - // new record, to force eviction - newRecord := &store.Record{ - Key: "new", - Value: []byte("newNew"), - } - _ = cache.Write(newRecord) - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Wrong number of record returned, expected 3, got %d", len(records)) - } - - expectedKV := []struct { - Key string - Value string - }{ - {Key: "1", Value: "1"}, - {Key: "2", Value: "2"}, - {Key: "new", Value: "newNew"}, - } - - for index, record := range records { - if record.Key != expectedKV[index].Key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKV[index].Key, record.Key) - } - if string(record.Value) != expectedKV[index].Value { - t.Errorf("Wrong value for index %d, expected %s, got %s", index, expectedKV[index].Value, string(record.Value)) - } - } -} - -func TestEvictReadSuffix(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 3, - }, - )), - ) - - for i := 0; i < 3; i++ { - v := strconv.Itoa(i) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - } - - // Read suffix won't change evcition list - _, _ = cache.Read("0", store.ReadSuffix()) - - // new record, to force eviction - newRecord := &store.Record{ - Key: "new", - Value: []byte("newNew"), - } - _ = cache.Write(newRecord) - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Wrong number of record returned, expected 3, got %d", len(records)) - } - - expectedKV := []struct { - Key string - Value string - }{ - {Key: "1", Value: "1"}, - {Key: "2", Value: "2"}, - {Key: "new", Value: "newNew"}, - } - - for index, record := range records { - if record.Key != expectedKV[index].Key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKV[index].Key, record.Key) - } - if string(record.Value) != expectedKV[index].Value { - t.Errorf("Wrong value for index %d, expected %s, got %s", index, expectedKV[index].Value, string(record.Value)) - } - } -} - -func TestEvictList(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 3, - }, - )), - ) - - for i := 0; i < 3; i++ { - v := strconv.Itoa(i) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - } - - // List won't change evcition list - _, _ = cache.List() - - // new record, to force eviction - newRecord := &store.Record{ - Key: "new", - Value: []byte("newNew"), - } - _ = cache.Write(newRecord) - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 3 { - t.Fatalf("Wrong number of record returned, expected 3, got %d", len(records)) - } - - expectedKV := []struct { - Key string - Value string - }{ - {Key: "1", Value: "1"}, - {Key: "2", Value: "2"}, - {Key: "new", Value: "newNew"}, - } - - for index, record := range records { - if record.Key != expectedKV[index].Key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKV[index].Key, record.Key) - } - if string(record.Value) != expectedKV[index].Value { - t.Errorf("Wrong value for index %d, expected %s, got %s", index, expectedKV[index].Value, string(record.Value)) - } - } -} - -func TestExpireReadPrefix(t *testing.T) { - cache := NewMemStore() - - record := &store.Record{} - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - if i%2 == 0 { - record.Expiry = time.Duration(i) * time.Minute - } else { - record.Expiry = time.Duration(-i) * time.Minute - } - _ = cache.Write(record) - } - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 10 { - t.Fatalf("Wrong number of records, expected 10, got %d", len(records)) - } - - var expKeys []string - for i := 0; i < 20; i++ { - if i%2 == 0 { - expKeys = append(expKeys, strconv.Itoa(i)) - } - } - sort.Strings(expKeys) - - expKeyIndex := 0 - for _, record := range records { - if record.Key != expKeys[expKeyIndex] { - t.Fatalf("Wrong expected key, expected %s, got %s", expKeys[expKeyIndex], record.Key) - } - expKeyIndex++ - } -} - -func TestExpireReadSuffix(t *testing.T) { - cache := NewMemStore() - - record := &store.Record{} - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - if i%2 == 0 { - record.Expiry = time.Duration(i) * time.Minute - } else { - record.Expiry = time.Duration(-i) * time.Minute - } - _ = cache.Write(record) - } - - records, _ := cache.Read("", store.ReadSuffix()) - if len(records) != 10 { - t.Fatalf("Wrong number of records, expected 10, got %d", len(records)) - } - - var expKeys []string - for i := 0; i < 20; i++ { - if i%2 == 0 { - expKeys = append(expKeys, strconv.Itoa(i)) - } - } - sort.Slice(expKeys, func(i, j int) bool { - return reverseString(expKeys[i]) < reverseString(expKeys[j]) - }) - - expKeyIndex := 0 - for _, record := range records { - if record.Key != expKeys[expKeyIndex] { - t.Fatalf("Wrong expected key, expected %s, got %s", expKeys[expKeyIndex], record.Key) - } - expKeyIndex++ - } -} - -func TestExpireList(t *testing.T) { - cache := NewMemStore() - - record := &store.Record{} - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - if i%2 == 0 { - record.Expiry = time.Duration(i) * time.Minute - } else { - record.Expiry = time.Duration(-i) * time.Minute - } - _ = cache.Write(record) - } - - keys, _ := cache.List() - if len(keys) != 10 { - t.Fatalf("Wrong number of records, expected 10, got %d", len(keys)) - } - - var expKeys []string - for i := 0; i < 20; i++ { - if i%2 == 0 { - expKeys = append(expKeys, strconv.Itoa(i)) - } - } - sort.Strings(expKeys) - - expKeyIndex := 0 - for _, key := range keys { - if key != expKeys[expKeyIndex] { - t.Fatalf("Wrong expected key, expected %s, got %s", expKeys[expKeyIndex], key) - } - expKeyIndex++ - } -} - -func TestExpireListPrefix(t *testing.T) { - cache := NewMemStore() - - record := &store.Record{} - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - if i%2 == 0 { - record.Expiry = time.Duration(i) * time.Minute - } else { - record.Expiry = time.Duration(-i) * time.Minute - } - _ = cache.Write(record) - } - - keys, _ := cache.List(store.ListPrefix("1")) - if len(keys) != 5 { - t.Fatalf("Wrong number of records, expected 5, got %d", len(keys)) - } - - var expKeys []string - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - if i%2 == 0 && strings.HasPrefix(v, "1") { - expKeys = append(expKeys, v) - } - } - sort.Strings(expKeys) - - expKeyIndex := 0 - for _, key := range keys { - if key != expKeys[expKeyIndex] { - t.Fatalf("Wrong expected key, expected %s, got %s", expKeys[expKeyIndex], key) - } - expKeyIndex++ - } -} - -func TestExpireListSuffix(t *testing.T) { - cache := NewMemStore() - - record := &store.Record{} - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - if i%2 == 0 { - record.Expiry = time.Duration(i) * time.Minute - } else { - record.Expiry = time.Duration(-i) * time.Minute - } - _ = cache.Write(record) - } - - keys, _ := cache.List(store.ListSuffix("8")) - if len(keys) != 2 { - t.Fatalf("Wrong number of records, expected 2, got %d", len(keys)) - } - - var expKeys []string - for i := 0; i < 20; i++ { - v := strconv.Itoa(i) - if i%2 == 0 && strings.HasSuffix(v, "8") { - expKeys = append(expKeys, v) - } - } - sort.Slice(expKeys, func(i, j int) bool { - return reverseString(expKeys[i]) < reverseString(expKeys[j]) - }) - - expKeyIndex := 0 - for _, key := range keys { - if key != expKeys[expKeyIndex] { - t.Fatalf("Wrong expected key, expected %s, got %s", expKeys[expKeyIndex], key) - } - expKeyIndex++ - } -} - -func TestConcurrentWrite(t *testing.T) { - nThreads := []int{3, 10, 50} - - for _, threads := range nThreads { - t.Run("T"+strconv.Itoa(threads), func(t *testing.T) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 50000, - }, - )), - ) - - var wg sync.WaitGroup - var index int64 - - wg.Add(threads) - for i := 0; i < threads; i++ { - go func(cache store.Store, ind *int64) { - j := atomic.AddInt64(ind, 1) - 1 - for j < 100000 { - v := strconv.FormatInt(j, 10) - record := &store.Record{ - Key: v, - Value: []byte(v), - } - _ = cache.Write(record) - j = atomic.AddInt64(ind, 1) - 1 - } - wg.Done() - }(cache, &index) - } - wg.Wait() - - records, _ := cache.Read("", store.ReadPrefix()) - if len(records) != 50000 { - t.Fatalf("Wrong number of records, expected 50000, got %d", len(records)) - } - for _, record := range records { - if record.Key != string(record.Value) { - t.Fatalf("Wrong record found, key %s, value %s", record.Key, string(record.Value)) - } - } - }) - } -} - -func BenchmarkWrite(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // records will be copied, so it's safe to overwrite the previous record - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - } - }) - } -} - -func BenchmarkRead(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - for i := 0; i < size; i++ { - // records will be copied, so it's safe to overwrite the previous record - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - } - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - v := strconv.Itoa(i) - _, _ = cache.Read(v) - } - }) - } -} - -func BenchmarkWriteMedKey(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - h := fnv.New128() - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - // records will be copied, so it's safe to overwrite the previous record - record.Key = hex.EncodeToString(h.Sum(nil)) - record.Value = bys - _ = cache.Write(record) - } - }) - } -} - -func BenchmarkReadMedKey(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - h := fnv.New128() - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - for i := 0; i < size; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - // records will be copied, so it's safe to overwrite the previous record - record.Key = hex.EncodeToString(h.Sum(nil)) - record.Value = bys - _ = cache.Write(record) - } - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - _, _ = cache.Read(hex.EncodeToString(h.Sum(nil))) - } - }) - } -} - -func BenchmarkReadMedKeyPrefix(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - h := fnv.New128() - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - for i := 0; i < size; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - // records will be copied, so it's safe to overwrite the previous record - record.Key = hex.EncodeToString(h.Sum(nil)) - record.Value = bys - _ = cache.Write(record) - } - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - _, _ = cache.Read(hex.EncodeToString(h.Sum(nil))[:10], store.ReadPrefix(), store.ReadLimit(50)) - } - }) - } -} - -func BenchmarkReadMedKeySuffix(b *testing.B) { - cacheSizes := []int{512, 1024, 10000, 50000, 1000000} - - h := fnv.New128() - for _, size := range cacheSizes { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - record := &store.Record{} - - for i := 0; i < size; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - // records will be copied, so it's safe to overwrite the previous record - record.Key = hex.EncodeToString(h.Sum(nil)) - record.Value = bys - _ = cache.Write(record) - } - b.Run("CacheSize"+strconv.Itoa(size), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - h.Reset() - v := strconv.Itoa(i) - bys := []byte(v) - h.Write(bys) - _, _ = cache.Read(hex.EncodeToString(h.Sum(nil))[23:], store.ReadSuffix(), store.ReadLimit(50)) - } - }) - } -} - -func concurrentStoreBench(b *testing.B, threads int) { - benchTest := map[string]int{ - "DefCap": 512, - "LimCap": 3, - "BigCap": 1000000, - } - for testname, size := range benchTest { - b.Run(testname, func(b *testing.B) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - - b.SetParallelism(threads) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - h := fnv.New128() - record := &store.Record{} - for pb.Next() { - h.Reset() - v := strconv.Itoa(rand.Int()) //nolint:gosec - bys := []byte(v) - h.Write(bys) - // records will be copied, so it's safe to overwrite the previous record - record.Key = hex.EncodeToString(h.Sum(nil)) - record.Value = bys - _ = cache.Write(record) - } - }) - }) - } -} - -func concurrentRetrieveBench(b *testing.B, threads int) { - benchTest := map[string]int{ - "DefCap": 512, - "LimCap": 3, - "BigCap": 1000000, - } - for testname, size := range benchTest { - b.Run(testname, func(b *testing.B) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - - record := &store.Record{} - for i := 0; i < size; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - } - - b.SetParallelism(threads) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - v := strconv.Itoa(rand.Intn(size * 2)) //nolint:gosec - _, _ = cache.Read(v) - } - }) - }) - } -} - -func concurrentRemoveBench(b *testing.B, threads int) { - benchTest := map[string]int{ - "DefCap": 512, - "LimCap": 3, - "BigCap": 1000000, - } - for testname, size := range benchTest { - b.Run(testname, func(b *testing.B) { - cache := NewMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": size, - }, - )), - ) - - record := &store.Record{} - for i := 0; i < size; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - } - - b.SetParallelism(threads) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - record := &store.Record{} - for pb.Next() { - v := strconv.Itoa(rand.Intn(size * 2)) //nolint:gosec - _ = cache.Delete(v) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - } - }) - }) - } -} - -func BenchmarkConcurrent(b *testing.B) { - threads := []int{3, 10, 50} - for _, nThreads := range threads { - nt := strconv.Itoa(nThreads) - b.Run("StoreT"+nt, func(b *testing.B) { - concurrentStoreBench(b, nThreads) - }) - b.Run("RetrieveT"+nt, func(b *testing.B) { - concurrentRetrieveBench(b, nThreads) - }) - b.Run("RemoveT"+nt, func(b *testing.B) { - concurrentRemoveBench(b, nThreads) - }) - } -} diff --git a/ocis-pkg/store/memory/multimemstore_test.go b/ocis-pkg/store/memory/multimemstore_test.go deleted file mode 100644 index 30a9c8851f..0000000000 --- a/ocis-pkg/store/memory/multimemstore_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package memory - -import ( - "context" - "strconv" - "testing" - - "go-micro.dev/v4/store" -) - -func TestWriteReadTables(t *testing.T) { - cache := NewMultiMemStore() - - record1 := &store.Record{ - Key: "sameKey", - Value: []byte("from record1"), - } - record2 := &store.Record{ - Key: "sameKey", - Value: []byte("from record2"), - } - - _ = cache.Write(record1) - _ = cache.Write(record2, store.WriteTo("DB02", "Table02")) - - records1, _ := cache.Read("sameKey") - if len(records1) != 1 { - t.Fatalf("Wrong number of records, expected 1, got %d", len(records1)) - } - if records1[0].Key != "sameKey" { - t.Errorf("Wrong key, expected \"sameKey\", got %s", records1[0].Key) - } - if string(records1[0].Value) != "from record1" { - t.Errorf("Wrong value, expected \"from record1\", got %s", string(records1[0].Value)) - } - - records2, _ := cache.Read("sameKey", store.ReadFrom("DB02", "Table02")) - if len(records2) != 1 { - t.Fatalf("Wrong number of records, expected 1, got %d", len(records2)) - } - if records2[0].Key != "sameKey" { - t.Errorf("Wrong key, expected \"sameKey\", got %s", records2[0].Key) - } - if string(records2[0].Value) != "from record2" { - t.Errorf("Wrong value, expected \"from record2\", got %s", string(records2[0].Value)) - } -} - -func TestDeleteTables(t *testing.T) { - cache := NewMultiMemStore() - - record1 := &store.Record{ - Key: "sameKey", - Value: []byte("from record1"), - } - record2 := &store.Record{ - Key: "sameKey", - Value: []byte("from record2"), - } - - _ = cache.Write(record1) - _ = cache.Write(record2, store.WriteTo("DB02", "Table02")) - - records1, _ := cache.Read("sameKey") - if len(records1) != 1 { - t.Fatalf("Wrong number of records, expected 1, got %d", len(records1)) - } - if records1[0].Key != "sameKey" { - t.Errorf("Wrong key, expected \"sameKey\", got %s", records1[0].Key) - } - if string(records1[0].Value) != "from record1" { - t.Errorf("Wrong value, expected \"from record1\", got %s", string(records1[0].Value)) - } - - records2, _ := cache.Read("sameKey", store.ReadFrom("DB02", "Table02")) - if len(records2) != 1 { - t.Fatalf("Wrong number of records, expected 1, got %d", len(records2)) - } - if records2[0].Key != "sameKey" { - t.Errorf("Wrong key, expected \"sameKey\", got %s", records2[0].Key) - } - if string(records2[0].Value) != "from record2" { - t.Errorf("Wrong value, expected \"from record2\", got %s", string(records2[0].Value)) - } - - _ = cache.Delete("sameKey") - if _, err := cache.Read("sameKey"); err != store.ErrNotFound { - t.Errorf("Key \"sameKey\" still exists after deletion") - } - - records2, _ = cache.Read("sameKey", store.ReadFrom("DB02", "Table02")) - if len(records2) != 1 { - t.Fatalf("Wrong number of records, expected 1, got %d", len(records2)) - } - if records2[0].Key != "sameKey" { - t.Errorf("Wrong key, expected \"sameKey\", got %s", records2[0].Key) - } - if string(records2[0].Value) != "from record2" { - t.Errorf("Wrong value, expected \"from record2\", got %s", string(records2[0].Value)) - } -} - -func TestListTables(t *testing.T) { - cache := NewMultiMemStore() - - record1 := &store.Record{ - Key: "key001", - Value: []byte("from record1"), - } - record2 := &store.Record{ - Key: "key002", - Value: []byte("from record2"), - } - - _ = cache.Write(record1) - _ = cache.Write(record2, store.WriteTo("DB02", "Table02")) - - keys, _ := cache.List(store.ListFrom("DB02", "Table02")) - expectedKeys := []string{"key002"} - if len(keys) != 1 { - t.Fatalf("Wrong number of keys, expected 1, got %d", len(keys)) - } - for index, key := range keys { - if expectedKeys[index] != key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKeys[index], key) - } - } -} - -func TestWriteSizeLimit(t *testing.T) { - cache := NewMultiMemStore( - store.WithContext( - NewContext( - context.Background(), - map[string]interface{}{ - "maxCap": 2, - }, - ), - ), - ) - - record := &store.Record{} - for i := 0; i < 4; i++ { - v := strconv.Itoa(i) - record.Key = v - record.Value = []byte(v) - _ = cache.Write(record) - _ = cache.Write(record, store.WriteTo("DB02", "Table02")) - } - - keys1, _ := cache.List() - expectedKeys1 := []string{"2", "3"} - if len(keys1) != 2 { - t.Fatalf("Wrong number of keys, expected 2, got %d", len(keys1)) - } - for index, key := range keys1 { - if expectedKeys1[index] != key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKeys1[index], key) - } - } - - keys2, _ := cache.List(store.ListFrom("DB02", "Table02")) - expectedKeys2 := []string{"2", "3"} - if len(keys2) != 2 { - t.Fatalf("Wrong number of keys, expected 2, got %d", len(keys2)) - } - for index, key := range keys2 { - if expectedKeys2[index] != key { - t.Errorf("Wrong key for index %d, expected %s, got %s", index, expectedKeys2[index], key) - } - } -} diff --git a/ocis/pkg/command/decomposedfs.go b/ocis/pkg/command/decomposedfs.go index 8c10f60ca2..06b78b939b 100644 --- a/ocis/pkg/command/decomposedfs.go +++ b/ocis/pkg/command/decomposedfs.go @@ -8,6 +8,7 @@ import ( "sort" "strings" + "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" @@ -157,7 +158,7 @@ func backend(root, backend string) metadata.Backend { case "xattrs": return metadata.XattrsBackend{} case "mpk": - return metadata.NewMessagePackBackend(root, options.CacheOptions{}) + return metadata.NewMessagePackBackend(root, cache.Config{}) } return metadata.NullBackend{} } diff --git a/services/eventhistory/pkg/command/server.go b/services/eventhistory/pkg/command/server.go index 942ad7c87e..eb27517724 100644 --- a/services/eventhistory/pkg/command/server.go +++ b/services/eventhistory/pkg/command/server.go @@ -5,10 +5,10 @@ import ( "fmt" "github.com/cs3org/reva/v2/pkg/events/stream" + "github.com/cs3org/reva/v2/pkg/store" "github.com/oklog/run" "github.com/owncloud/ocis/v2/ocis-pkg/config/configlog" ogrpc "github.com/owncloud/ocis/v2/ocis-pkg/service/grpc" - "github.com/owncloud/ocis/v2/ocis-pkg/store" "github.com/owncloud/ocis/v2/ocis-pkg/version" "github.com/owncloud/ocis/v2/services/eventhistory/pkg/config" "github.com/owncloud/ocis/v2/services/eventhistory/pkg/config/parser" @@ -57,7 +57,7 @@ func Server(cfg *config.Config) *cli.Command { st := store.Create( store.Store(cfg.Store.Store), - store.TTL(cfg.Store.RecordExpiry), + store.TTL(cfg.Store.TTL), store.Size(cfg.Store.Size), microstore.Nodes(cfg.Store.Nodes...), microstore.Database(cfg.Store.Database), diff --git a/services/eventhistory/pkg/config/config.go b/services/eventhistory/pkg/config/config.go index ca3eb7fdca..b8ba2cf98f 100644 --- a/services/eventhistory/pkg/config/config.go +++ b/services/eventhistory/pkg/config/config.go @@ -34,12 +34,12 @@ type GRPCConfig struct { // Store configures the store to use type Store struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE;OCIS_PERSISTENT_STORE_TYPE;EVENTHISTORY_STORE_TYPE" desc:"The type of the eventhistory store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;EVENTHISTORY_STORE_NODES;EVENTHISTORY_STORE_ADDRESSES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store." deprecationVersion:"3.0" removalVersion:"4.0.0" deprecationInfo:"EVENTHISTORY_STORE_ADDRESSES name needs to be harmonized" deprecationReplacement:"EVENTHISTORY_STORE_NODES"` - Database string `yaml:"database" env:"EVENTHISTORY_STORE_DATABASE" desc:"The database name the configured store should use."` - Table string `yaml:"table" env:"EVENTHISTORY_STORE_TABLE" desc:"The database table the store should use."` - RecordExpiry time.Duration `yaml:"record_expiry" env:"EVENTHISTORY_RECORD_EXPIRY" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` - Size int `yaml:"size" env:"EVENTHISTORY_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;EVENTHISTORY_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;EVENTHISTORY_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"EVENTHISTORY_STORE_DATABASE" desc:"The database name the configured store should use."` + Table string `yaml:"table" env:"EVENTHISTORY_STORE_TABLE" desc:"The database table the store should use."` + TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;EVENTHISTORY_STORE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` + Size int `yaml:"size" env:"OCIS_PERSISTENT_STORE_SIZE;EVENTHISTORY_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` } // Events combines the configuration options for the event bus. diff --git a/services/eventhistory/pkg/config/defaults/defaultconfig.go b/services/eventhistory/pkg/config/defaults/defaultconfig.go index b9efcdc0fb..47b5fc637a 100644 --- a/services/eventhistory/pkg/config/defaults/defaultconfig.go +++ b/services/eventhistory/pkg/config/defaults/defaultconfig.go @@ -27,10 +27,10 @@ func DefaultConfig() *config.Config { EnableTLS: false, }, Store: config.Store{ - Store: "memory", - Database: "eventhistory", - Table: "events", - RecordExpiry: 336 * time.Hour, + Store: "memory", + Database: "eventhistory", + Table: "events", + TTL: 336 * time.Hour, }, GRPC: config.GRPCConfig{ Addr: "127.0.0.1:0", diff --git a/services/eventhistory/pkg/service/service.go b/services/eventhistory/pkg/service/service.go index b95a24d386..66019532c5 100644 --- a/services/eventhistory/pkg/service/service.go +++ b/services/eventhistory/pkg/service/service.go @@ -61,7 +61,7 @@ func (eh *EventHistoryService) StoreEvents() { if err := eh.store.Write(&store.Record{ Key: event.ID, Value: ev, - Expiry: eh.cfg.Store.RecordExpiry, + Expiry: eh.cfg.Store.TTL, Metadata: map[string]interface{}{ "type": event.Type, }, diff --git a/services/eventhistory/pkg/service/service_test.go b/services/eventhistory/pkg/service/service_test.go index c8b78f98c8..0d3510df36 100644 --- a/services/eventhistory/pkg/service/service_test.go +++ b/services/eventhistory/pkg/service/service_test.go @@ -9,11 +9,11 @@ import ( userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" "github.com/cs3org/reva/v2/pkg/events" + "github.com/cs3org/reva/v2/pkg/store" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/owncloud/ocis/v2/ocis-pkg/log" - "github.com/owncloud/ocis/v2/ocis-pkg/store" ehsvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/eventhistory/v0" "github.com/owncloud/ocis/v2/services/eventhistory/pkg/config" "github.com/owncloud/ocis/v2/services/eventhistory/pkg/service" diff --git a/services/frontend/pkg/config/config.go b/services/frontend/pkg/config/config.go index 2c9bc83d59..bf0e6e4458 100644 --- a/services/frontend/pkg/config/config.go +++ b/services/frontend/pkg/config/config.go @@ -123,27 +123,18 @@ type OCS struct { SharePrefix string `yaml:"share_prefix" env:"FRONTEND_OCS_SHARE_PREFIX" desc:"Path prefix for shares as part of an ocis resource. Note that the path must start with '/'."` HomeNamespace string `yaml:"home_namespace" env:"FRONTEND_OCS_PERSONAL_NAMESPACE;FRONTEND_OCS_HOME_NAMESPACE" desc:"Homespace namespace identifier." deprecationVersion:"3.0" removalVersion:"4.0.0" deprecationInfo:"FRONTEND_OCS_HOME_NAMESPACE changing name for consistency" deprecationReplacement:"FRONTEND_OCS_PERSONAL_NAMESPACE"` AdditionalInfoAttribute string `yaml:"additional_info_attribute" env:"FRONTEND_OCS_ADDITIONAL_INFO_ATTRIBUTE" desc:"Additional information attribute for the user like {{.Mail}}."` - ResourceInfoCacheTTL int `yaml:"resource_info_cache_ttl" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_TTL" desc:"Max TTL in seconds for the resource info cache. 0 disables the cache."` - ResourceInfoCacheType string `yaml:"resource_info_cache_type" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_TYPE" desc:"The type of the resource info cache. Supported values are 'memory' and 'redis'."` - ResourceInfoCaches ResourceInfoCaches `yaml:"resource_info_caches,omitempty"` // only used for redis + ResourceInfoCacheType string `yaml:"resource_info_cache_type" env:"OCIS_CACHE_STORE;FRONTEND_OCS_RESOURCE_INFO_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + ResourceInfoCacheNodes []string `yaml:"resource_info_cache_nodes" env:"OCIS_CACHE_STORE_NODES;FRONTEND_OCS_RESOURCE_INFO_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + ResourceInfoCacheDatabase string `yaml:"resource_info_cache_database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` + ResourceInfoCacheTable string `yaml:"resource_info_cache_table" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_TABLE" desc:"The database table the store should use."` + ResourceInfoCacheTTL int `yaml:"resource_info_cache_ttl" env:"OCIS_CACHE_TTL;FRONTEND_OCS_RESOURCE_INFO_CACHE_TTL" desc:"Max TTL in seconds for the resource info cache."` + ResourceInfoCacheSize int `yaml:"resource_info_cache_size" env:"OCIS_CACHE_SIZE;FRONTEND_OCS_RESOURCE_INFO_CACHE_SIZE" desc:"Max number of entries to hold in the cache."` CacheWarmupDriver string `yaml:"cache_warmup_driver,omitempty"` // not supported by the oCIS product, therefore not part of docs CacheWarmupDrivers CacheWarmupDrivers `yaml:"cache_warmup_drivers,omitempty"` // not supported by the oCIS product, therefore not part of docs EnableDenials bool `yaml:"enable_denials" env:"FRONTEND_OCS_ENABLE_DENIALS" desc:"EXPERIMENTAL: enable the feature to deny access on folders."` WriteablePublicShareMustHavePassword bool `yaml:"public_sharing_writeableshare_must_have_password" env:"OCIS_SHARING_PUBLIC_WRITEABLE_SHARE_MUST_HAVE_PASSWORD;FRONTEND_OCS_PUBLIC_WRITEABLE_SHARE_MUST_HAVE_PASSWORD" desc:"Set this to true if you want to enforce passwords on Uploader, Editor or Contributor shares."` } -// ResourceInfoCaches holds resource info cache configurations -type ResourceInfoCaches struct { - Redis RedisDriver `yaml:"redis,omitempty"` -} - -// RedisDriver holds redis configuration -type RedisDriver struct { - Address string `yaml:"address" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_REDIS_ADDR" desc:"A comma separated list of addresses to access the configured store. This has no effect when the 'memory' store is configured. Note that the behaviour how addresses are used is dependent on the library of the configured store."` - Username string `yaml:"username" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_REDIS_USERNAME" desc:"The username to access the redis cache."` - Password string `yaml:"password" env:"FRONTEND_OCS_RESOURCE_INFO_CACHE_REDIS_PASSWORD" desc:"The password to access the redis cache."` -} - type CacheWarmupDrivers struct { CBOX CBOXDriver `yaml:"cbox,omitempty"` } diff --git a/services/frontend/pkg/config/defaults/defaultconfig.go b/services/frontend/pkg/config/defaults/defaultconfig.go index ed03512773..4d678662e3 100644 --- a/services/frontend/pkg/config/defaults/defaultconfig.go +++ b/services/frontend/pkg/config/defaults/defaultconfig.go @@ -102,12 +102,13 @@ func DefaultConfig() *config.Config { Prefix: "data", }, OCS: config.OCS{ - Prefix: "ocs", - SharePrefix: "/Shares", - HomeNamespace: "/users/{{.Id.OpaqueId}}", - AdditionalInfoAttribute: "{{.Mail}}", - ResourceInfoCacheType: "memory", - ResourceInfoCacheTTL: 0, + Prefix: "ocs", + SharePrefix: "/Shares", + HomeNamespace: "/users/{{.Id.OpaqueId}}", + AdditionalInfoAttribute: "{{.Mail}}", + ResourceInfoCacheType: "memory", + ResourceInfoCacheDatabase: "frontend", + ResourceInfoCacheTTL: 0, }, Middleware: config.Middleware{ Auth: config.Auth{ diff --git a/services/frontend/pkg/revaconfig/config.go b/services/frontend/pkg/revaconfig/config.go index 00456fa2ce..cda78110e7 100644 --- a/services/frontend/pkg/revaconfig/config.go +++ b/services/frontend/pkg/revaconfig/config.go @@ -146,24 +146,20 @@ func FrontendConfigFromStruct(cfg *config.Config) (map[string]interface{}, error "insecure": true, }, "ocs": map[string]interface{}{ - "storage_registry_svc": cfg.Reva.Address, - "share_prefix": cfg.OCS.SharePrefix, - "home_namespace": cfg.OCS.HomeNamespace, - "resource_info_cache_ttl": cfg.OCS.ResourceInfoCacheTTL, - "resource_info_cache_type": cfg.OCS.ResourceInfoCacheType, - "resource_info_caches": map[string]interface{}{ - // memory has no additional config - "redis": map[string]interface{}{ - "redis_address": cfg.OCS.ResourceInfoCaches.Redis.Address, - "redis_username": cfg.OCS.ResourceInfoCaches.Redis.Username, - "redis_password": cfg.OCS.ResourceInfoCaches.Redis.Password, - }, - }, - "prefix": cfg.OCS.Prefix, - "additional_info_attribute": cfg.OCS.AdditionalInfoAttribute, - "machine_auth_apikey": cfg.MachineAuthAPIKey, - "enable_denials": cfg.OCS.EnableDenials, - "cache_warmup_driver": cfg.OCS.CacheWarmupDriver, + "storage_registry_svc": cfg.Reva.Address, + "share_prefix": cfg.OCS.SharePrefix, + "home_namespace": cfg.OCS.HomeNamespace, + "resource_info_cache_ttl": cfg.OCS.ResourceInfoCacheTTL, + "resource_info_cache_size": cfg.OCS.ResourceInfoCacheSize, + "resource_info_cache_store": cfg.OCS.ResourceInfoCacheType, + "resource_info_cache_nodes": cfg.OCS.ResourceInfoCacheNodes, + "resource_info_cache_database": cfg.OCS.ResourceInfoCacheDatabase, + "resource_info_cache_table": cfg.OCS.ResourceInfoCacheTable, + "prefix": cfg.OCS.Prefix, + "additional_info_attribute": cfg.OCS.AdditionalInfoAttribute, + "machine_auth_apikey": cfg.MachineAuthAPIKey, + "enable_denials": cfg.OCS.EnableDenials, + "cache_warmup_driver": cfg.OCS.CacheWarmupDriver, "cache_warmup_drivers": map[string]interface{}{ "cbox": map[string]interface{}{ "db_username": cfg.OCS.CacheWarmupDrivers.CBOX.DBUsername, diff --git a/services/gateway/pkg/config/config.go b/services/gateway/pkg/config/config.go index d4e7f8ac9a..fc20663dd5 100644 --- a/services/gateway/pkg/config/config.go +++ b/services/gateway/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "context" + "time" "github.com/owncloud/ocis/v2/ocis-pkg/shared" ) @@ -88,10 +89,13 @@ type StorageRegistry struct { // Cache holds cache config type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;GATEWAY_CACHE_STORE;OCIS_CACHE_STORE_TYPE;GATEWAY_CACHE_STORE_TYPE" desc:"Store implementation for the cache. Supported values are 'memory' (default), 'redis', and 'etcd'."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_CACHE_STORE_NODES;OCIS_CACHE_STORE_ADDRESS;GATEWAY_CACHE_STORE_ADDRESS;GATEWAY_CACHE_NODES" desc:"Nodes to use for the cache store."` - Database string `yaml:"database" env:"GATEWAY_CACHE_DATABASE" desc:"Database name of the cache."` - StatCacheTTL int `yaml:"stat_cache_ttl" env:"OCIS_CACHE_STORE_TTL;GATEWAY_STAT_CACHE_TTL" desc:"Max TTL in seconds for the gateway's stat cache."` - ProviderCacheTTL int `yaml:"provider_cache_ttl" env:"OCIS_CACHE_STORE_TTL;GATEWAY_PROVIDER_CACHE_TTL" desc:"Max TTL in seconds for the gateway's provider cache."` - CreateHomeCacheTTL int `yaml:"create_home_cache_ttl" env:"OCIS_CACHE_STORE_TTL;GATEWAY_CREATE_HOME_CACHE_TTL" desc:"Max TTL in seconds for the gateway's create home cache."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE;GATEWAY_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GATEWAY_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` + StatCacheTTL time.Duration `yaml:"stat_cache_ttl" env:"OCIS_CACHE_TTL;GATEWAY_STAT_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + StatCacheSize int `yaml:"stat_cache_size" env:"OCIS_CACHE_SIZE;GATEWAY_STAT_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` + ProviderCacheTTL time.Duration `yaml:"provider_cache_ttl" env:"OCIS_CACHE_TTL;GATEWAY_PROVIDER_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + ProviderCacheSize int `yaml:"provider_cache_size" env:"OCIS_CACHE_SIZE;GATEWAY_PROVIDER_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` + CreateHomeCacheTTL time.Duration `yaml:"create_home_cache_ttl" env:"OCIS_CACHE_TTL;GATEWAY_CREATE_HOME_CACHE_TTL" desc:"Default time to live for user info in the cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + CreateHomeCacheSize int `yaml:"create_home_cache_size" env:"OCIS_CACHE_SIZE;GATEWAY_CREATE_HOME_CACHE_SIZE" desc:"The maximum quantity of items in the cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` } diff --git a/services/gateway/pkg/revaconfig/config.go b/services/gateway/pkg/revaconfig/config.go index bf0deaa7b1..058551726d 100644 --- a/services/gateway/pkg/revaconfig/config.go +++ b/services/gateway/pkg/revaconfig/config.go @@ -58,12 +58,15 @@ func GatewayConfigFromStruct(cfg *config.Config, logger log.Logger) map[string]i "transfer_shared_secret": cfg.TransferSecret, "transfer_expires": cfg.TransferExpires, // cache and TTLs - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, - "stat_cache_ttl": cfg.Cache.StatCacheTTL, - "provider_cache_ttl": cfg.Cache.ProviderCacheTTL, - "create_home_cache_ttl": cfg.Cache.CreateHomeCacheTTL, + "cache_store": cfg.Cache.Store, + "cache_nodes": cfg.Cache.Nodes, + "cache_database": cfg.Cache.Database, + "stat_cache_ttl": cfg.Cache.StatCacheTTL, + "stat_cache_size": cfg.Cache.StatCacheSize, + "provider_cache_ttl": cfg.Cache.ProviderCacheTTL, + "provider_cache_size": cfg.Cache.ProviderCacheSize, + "create_home_cache_ttl": cfg.Cache.CreateHomeCacheTTL, + "create_home_cache_size": cfg.Cache.CreateHomeCacheSize, }, "authregistry": map[string]interface{}{ "driver": "static", diff --git a/services/graph/pkg/config/cache.go b/services/graph/pkg/config/cache.go index 48f5921da3..d6c8cfbac7 100644 --- a/services/graph/pkg/config/cache.go +++ b/services/graph/pkg/config/cache.go @@ -4,10 +4,10 @@ import "time" // Cache defines the available configuration for a cache store type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;GRAPH_CACHE_STORE;OCIS_CACHE_STORE_TYPE;GRAPH_CACHE_STORE_TYPE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GRAPH_CACHE_STORE_NODES;OCIS_CACHE_STORE_ADDRESSES;GRAPH_CACHE_STORE_ADDRESSES" desc:"A comma-separated list of nodes to connect to. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE;GRAPH_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;GRAPH_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` Database string `yaml:"database" env:"GRAPH_CACHE_STORE_DATABASE" desc:"The database name the configured store should use."` Table string `yaml:"table" env:"GRAPH_CACHE_STORE_TABLE" desc:"The database table the store should use."` - TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_STORE_TTL;GRAPH_CACHE_STORE_TTL" desc:"Time to live for cache records in the graph. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` - Size int `yaml:"size" env:"OCIS_CACHE_STORE_SIZE;GRAPH_CACHE_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;GRAPH_CACHE_TTL" desc:"Time to live for cache records in the graph. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;GRAPH_CACHE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` } diff --git a/services/graph/pkg/service/v0/service.go b/services/graph/pkg/service/v0/service.go index 8ed8f64196..8984685d37 100644 --- a/services/graph/pkg/service/v0/service.go +++ b/services/graph/pkg/service/v0/service.go @@ -10,6 +10,7 @@ import ( "strconv" "time" + "github.com/cs3org/reva/v2/pkg/store" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" ldapv3 "github.com/go-ldap/ldap/v3" @@ -18,7 +19,6 @@ import ( ocisldap "github.com/owncloud/ocis/v2/ocis-pkg/ldap" "github.com/owncloud/ocis/v2/ocis-pkg/roles" "github.com/owncloud/ocis/v2/ocis-pkg/service/grpc" - "github.com/owncloud/ocis/v2/ocis-pkg/store" settingssvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/settings/v0" "github.com/owncloud/ocis/v2/services/graph/pkg/identity" "github.com/owncloud/ocis/v2/services/graph/pkg/identity/ldap" diff --git a/services/ocs/pkg/config/cachestore.go b/services/ocs/pkg/config/cachestore.go index bff5c27f5e..e8ba225048 100644 --- a/services/ocs/pkg/config/cachestore.go +++ b/services/ocs/pkg/config/cachestore.go @@ -4,10 +4,10 @@ import "time" // Cache defines the available configuration for the cache store type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;OCS_CACHE_STORE;OCIS_CACHE_STORE_TYPE;OCS_CACHE_STORE_TYPE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;OCS_CACHE_STORE_NODES;OCIS_CACHE_STORE_ADDRESSES;OCS_CACHE_STORE_ADDRESSES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE;OCS_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;OCS_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` Database string `yaml:"database" env:"OCS_CACHE_STORE_DATABASE" desc:"The database name the configured store should use."` Table string `yaml:"table" env:"OCS_CACHE_STORE_TABLE" desc:"The database table the store should use."` - TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_STORE_TTL;OCS_CACHE_STORE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` - Size int `yaml:"size" env:"OCIS_CACHE_STORE_SIZE;OCS_CACHE_STORE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;OCS_CACHE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;OCS_CACHE_SIZE" desc:"The maximum quantity of items in the store. Only applies when store type 'ocmem' is configured. Defaults to 512."` } diff --git a/services/proxy/pkg/command/server.go b/services/proxy/pkg/command/server.go index 265f36fb5b..3ed00b1050 100644 --- a/services/proxy/pkg/command/server.go +++ b/services/proxy/pkg/command/server.go @@ -9,6 +9,7 @@ import ( "time" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/v2/pkg/store" "github.com/cs3org/reva/v2/pkg/token/manager/jwt" "github.com/go-chi/chi/v5" chimiddleware "github.com/go-chi/chi/v5/middleware" @@ -20,7 +21,6 @@ import ( pkgmiddleware "github.com/owncloud/ocis/v2/ocis-pkg/middleware" "github.com/owncloud/ocis/v2/ocis-pkg/oidc" "github.com/owncloud/ocis/v2/ocis-pkg/service/grpc" - "github.com/owncloud/ocis/v2/ocis-pkg/store" "github.com/owncloud/ocis/v2/ocis-pkg/version" settingssvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/settings/v0" storesvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/store/v0" diff --git a/services/proxy/pkg/config/config.go b/services/proxy/pkg/config/config.go index 22f818e608..000377610f 100644 --- a/services/proxy/pkg/config/config.go +++ b/services/proxy/pkg/config/config.go @@ -121,12 +121,12 @@ type JWKS struct { // Cache is a TTL cache configuration. type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;PROXY_OIDC_USERINFO_CACHE_STORE;OCIS_CACHE_STORE_TYPE;PROXY_OIDC_USERINFO_CACHE_TYPE" desc:"The type of the userinfo cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;PROXY_OIDC_USERINFO_CACHE_NODES;OCIS_CACHE_STORE_ADDRESSES;PROXY_OIDC_USERINFO_CACHE_ADDRESSES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` - Database string `yaml:"database" env:"PROXY_OIDC_USERINFO_CACHE_DATABASE" desc:"The database name the configured store should use."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE;PROXY_OIDC_USERINFO_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"addresses" env:"OCIS_CACHE_STORE_NODES;PROXY_OIDC_USERINFO_CACHE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` Table string `yaml:"table" env:"PROXY_OIDC_USERINFO_CACHE_TABLE" desc:"The database table the store should use."` - TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_STORE_TTL;PROXY_OIDC_USERINFO_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` - Size int `yaml:"size" env:"OCIS_CACHE_STORE_SIZE;PROXY_OIDC_USERINFO_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;PROXY_OIDC_USERINFO_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;PROXY_OIDC_USERINFO_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` } // RoleAssignment contains the configuration for how to assign roles to users during login diff --git a/services/storage-system/pkg/config/config.go b/services/storage-system/pkg/config/config.go index c51752b42b..96c8cf7fcc 100644 --- a/services/storage-system/pkg/config/config.go +++ b/services/storage-system/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "context" + "time" "github.com/owncloud/ocis/v2/ocis-pkg/shared" ) @@ -84,7 +85,9 @@ type OCISDriver struct { // Cache holds cache config type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_SYSTEM_CACHE_STORE;OCIS_CACHE_STORE_TYPE" desc:"Store implementation for the cache. Supported values are 'memory' (default), 'redis', 'redis-sentinel', 'nats-js', 'etcd' and 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_SYSTEM_CACHE_NODES;OCIS_CACHE_STORE_ADDRESS" desc:"A comma separated list of nodes to access the configured store. This has no effect when the 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` - Database string `yaml:"database" env:"STORAGE_SYSTEM_CACHE_DATABASE" desc:"The database name the configured store should use."` + Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_SYSTEM_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_SYSTEM_CACHE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_SYSTEM_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_SYSTEM_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` } diff --git a/services/storage-system/pkg/revaconfig/config.go b/services/storage-system/pkg/revaconfig/config.go index 7d597c4ab3..1c89fb27a6 100644 --- a/services/storage-system/pkg/revaconfig/config.go +++ b/services/storage-system/pkg/revaconfig/config.go @@ -161,10 +161,16 @@ func metadataDrivers(cfg *config.Config) map[string]interface{} { "permissionssvc": cfg.GRPC.Addr, "max_acquire_lock_cycles": cfg.Drivers.OCIS.MaxAcquireLockCycles, "lock_cycle_duration_factor": cfg.Drivers.OCIS.LockCycleDurationFactor, + "statcache": map[string]interface{}{ + "cache_store": "noop", + "cache_database": "system", + }, "filemetadatacache": map[string]interface{}{ "cache_store": cfg.Cache.Store, "cache_nodes": cfg.Cache.Nodes, "cache_database": cfg.Cache.Database, + "cache_ttl": cfg.Cache.TTL, + "cache_size": cfg.Cache.Size, }, }, } diff --git a/services/storage-users/pkg/config/config.go b/services/storage-users/pkg/config/config.go index 71cd8e4fd6..fa529c15ec 100644 --- a/services/storage-users/pkg/config/config.go +++ b/services/storage-users/pkg/config/config.go @@ -23,18 +23,19 @@ type Config struct { SkipUserGroupsInToken bool `yaml:"skip_user_groups_in_token" env:"STORAGE_USERS_SKIP_USER_GROUPS_IN_TOKEN" desc:"Disables the loading of user's group memberships from the reva access token."` - Driver string `yaml:"driver" env:"STORAGE_USERS_DRIVER" desc:"The storage driver which should be used by the service. Defaults to 'ocis', Supported values are: 'ocis', 's3ng' and 'owncloudsql'. The 'ocis' driver stores all data (blob and meta data) in an POSIX compliant volume. The 's3ng' driver stores metadata in a POSIX compliant volume and uploads blobs to the s3 bucket."` - Drivers Drivers `yaml:"drivers"` - DataServerURL string `yaml:"data_server_url" env:"STORAGE_USERS_DATA_SERVER_URL" desc:"URL of the data server, needs to be reachable by the data gateway provided by the frontend service or the user if directly exposed."` - DataGatewayURL string `yaml:"data_gateway_url" env:"STORAGE_USERS_DATA_GATEWAY_URL" desc:"URL of the data gateway server"` - TransferExpires int64 `yaml:"transfer_expires" env:"STORAGE_USERS_TRANSFER_EXPIRES" desc:"the time after which the token for upload postprocessing expires"` - Events Events `yaml:"events"` - Cache Cache `yaml:"cache"` - MountID string `yaml:"mount_id" env:"STORAGE_USERS_MOUNT_ID" desc:"Mount ID of this storage."` - ExposeDataServer bool `yaml:"expose_data_server" env:"STORAGE_USERS_EXPOSE_DATA_SERVER" desc:"Exposes the data server directly to users and bypasses the data gateway. Ensure that the data server address is reachable by users."` - ReadOnly bool `yaml:"readonly" env:"STORAGE_USERS_READ_ONLY" desc:"Set this storage to be read-only."` - UploadExpiration int64 `yaml:"upload_expiration" env:"STORAGE_USERS_UPLOAD_EXPIRATION" desc:"Duration in seconds after which uploads will expire."` - Tasks Tasks `yaml:"tasks"` + Driver string `yaml:"driver" env:"STORAGE_USERS_DRIVER" desc:"The storage driver which should be used by the service. Defaults to 'ocis', Supported values are: 'ocis', 's3ng' and 'owncloudsql'. The 'ocis' driver stores all data (blob and meta data) in an POSIX compliant volume. The 's3ng' driver stores metadata in a POSIX compliant volume and uploads blobs to the s3 bucket."` + Drivers Drivers `yaml:"drivers"` + DataServerURL string `yaml:"data_server_url" env:"STORAGE_USERS_DATA_SERVER_URL" desc:"URL of the data server, needs to be reachable by the data gateway provided by the frontend service or the user if directly exposed."` + DataGatewayURL string `yaml:"data_gateway_url" env:"STORAGE_USERS_DATA_GATEWAY_URL" desc:"URL of the data gateway server"` + TransferExpires int64 `yaml:"transfer_expires" env:"STORAGE_USERS_TRANSFER_EXPIRES" desc:"the time after which the token for upload postprocessing expires"` + Events Events `yaml:"events"` + StatCache StatCache `yaml:"stat_cache"` + FilemetadataCache FilemetadataCache `yaml:"filemetadata_cache"` + MountID string `yaml:"mount_id" env:"STORAGE_USERS_MOUNT_ID" desc:"Mount ID of this storage."` + ExposeDataServer bool `yaml:"expose_data_server" env:"STORAGE_USERS_EXPOSE_DATA_SERVER" desc:"Exposes the data server directly to users and bypasses the data gateway. Ensure that the data server address is reachable by users."` + ReadOnly bool `yaml:"readonly" env:"STORAGE_USERS_READ_ONLY" desc:"Set this storage to be read-only."` + UploadExpiration int64 `yaml:"upload_expiration" env:"STORAGE_USERS_UPLOAD_EXPIRATION" desc:"Duration in seconds after which uploads will expire."` + Tasks Tasks `yaml:"tasks"` Supervised bool `yaml:"-"` Context context.Context `yaml:"-"` @@ -169,11 +170,22 @@ type Events struct { NumConsumers int `yaml:"num_consumers" env:"STORAGE_USERS_EVENTS_NUM_CONSUMERS" desc:"The amount of concurrent event consumers to start. Event consumers are used for post-processing files. Multiple consumers increase parallelisation, but will also increase CPU and memory demands. The setting has no effect when the STORAGE_USERS_OCIS_ASYNC_UPLOADS is set to false. The default and minimum value is 1."` } -// Cache holds cache config -type Cache struct { - Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_USERS_CACHE_STORE;STORAGE_USERS_CACHE_STORE_TYPE" desc:"Store implementation for the cache. Supported values are 'memory' (default), 'redis', 'redis-sentinel', 'nats-js', and 'etcd'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_CACHE_STORE_NODES;OCIS_CACHE_STORE_ADDRESS;STORAGE_USERS_CACHE_STORE_ADDRESS;STORAGE_USERS_CACHE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when the 'memory' store is configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` - Database string `yaml:"database" env:"STORAGE_USERS_CACHE_DATABASE" desc:"The database name the configured store should use."` +// StatCache holds cache config +type StatCache struct { + Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_USERS_STAT_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_STAT_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_USERS_STAT_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_USERS_STAT_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` +} + +// FilemetadataCache holds cache config +type FilemetadataCache struct { + Store string `yaml:"store" env:"OCIS_CACHE_STORE;STORAGE_USERS_FILEMETADATA_CACHE_STORE" desc:"The type of the cache store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_CACHE_STORE_NODES;STORAGE_USERS_FILEMETADATA_CACHE_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` + Database string `yaml:"database" env:"OCIS_CACHE_DATABASE" desc:"The database name the configured store should use."` + TTL time.Duration `yaml:"ttl" env:"OCIS_CACHE_TTL;STORAGE_USERS_FILEMETADATA_CACHE_TTL" desc:"Default time to live for user info in the user info cache. Only applied when access tokens has no expiration. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '10s' (10 seconds)."` + Size int `yaml:"size" env:"OCIS_CACHE_SIZE;STORAGE_USERS_FILEMETADATA_CACHE_SIZE" desc:"The maximum quantity of items in the user info cache. Only applies when store type 'ocmem' is configured. Defaults to 512."` } // S3Driver is the storage driver configuration when using 's3' storage driver diff --git a/services/storage-users/pkg/config/defaults/defaultconfig.go b/services/storage-users/pkg/config/defaults/defaultconfig.go index 78f982fd20..3a75c2eedc 100644 --- a/services/storage-users/pkg/config/defaults/defaultconfig.go +++ b/services/storage-users/pkg/config/defaults/defaultconfig.go @@ -89,9 +89,13 @@ func DefaultConfig() *config.Config { ClusterID: "ocis-cluster", EnableTLS: false, }, - Cache: config.Cache{ + StatCache: config.StatCache{ Store: "memory", - Database: "users", + Database: "ocis", + }, + FilemetadataCache: config.FilemetadataCache{ + Store: "memory", + Database: "ocis", }, Tasks: config.Tasks{ PurgeTrashBin: config.PurgeTrashBin{ diff --git a/services/storage-users/pkg/revaconfig/config.go b/services/storage-users/pkg/revaconfig/config.go index cb072d8172..5fe55991e8 100644 --- a/services/storage-users/pkg/revaconfig/config.go +++ b/services/storage-users/pkg/revaconfig/config.go @@ -73,21 +73,27 @@ func StorageUsersConfigFromStruct(cfg *config.Config) map[string]interface{} { "nats_enable_tls": cfg.Events.EnableTLS, "data_txs": map[string]interface{}{ "simple": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, "cache_table": "stat", }, "spaces": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, "cache_table": "stat", }, "tus": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, "cache_table": "stat", }, }, diff --git a/services/storage-users/pkg/revaconfig/drivers.go b/services/storage-users/pkg/revaconfig/drivers.go index eb69199b19..234e79cb8c 100644 --- a/services/storage-users/pkg/revaconfig/drivers.go +++ b/services/storage-users/pkg/revaconfig/drivers.go @@ -127,14 +127,18 @@ func Ocis(cfg *config.Config) map[string]interface{} { "asyncfileuploads": cfg.Drivers.OCIS.AsyncUploads, "max_quota": cfg.Drivers.OCIS.MaxQuota, "statcache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, }, "filemetadatacache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.FilemetadataCache.Store, + "cache_nodes": cfg.FilemetadataCache.Nodes, + "cache_database": cfg.FilemetadataCache.Database, + "cache_ttl": cfg.FilemetadataCache.TTL, + "cache_size": cfg.FilemetadataCache.Size, }, "events": map[string]interface{}{ "natsaddress": cfg.Events.Addr, @@ -170,14 +174,18 @@ func OcisNoEvents(cfg *config.Config) map[string]interface{} { "max_concurrency": cfg.Drivers.OCIS.MaxConcurrency, "max_quota": cfg.Drivers.OCIS.MaxQuota, "statcache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, }, "filemetadatacache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.FilemetadataCache.Store, + "cache_nodes": cfg.FilemetadataCache.Nodes, + "cache_database": cfg.FilemetadataCache.Database, + "cache_ttl": cfg.FilemetadataCache.TTL, + "cache_size": cfg.FilemetadataCache.Size, }, } } @@ -218,14 +226,18 @@ func S3NG(cfg *config.Config) map[string]interface{} { "max_concurrency": cfg.Drivers.S3NG.MaxConcurrency, "asyncfileuploads": cfg.Drivers.OCIS.AsyncUploads, "statcache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, }, "filemetadatacache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.FilemetadataCache.Store, + "cache_nodes": cfg.FilemetadataCache.Nodes, + "cache_database": cfg.FilemetadataCache.Database, + "cache_ttl": cfg.FilemetadataCache.TTL, + "cache_size": cfg.FilemetadataCache.Size, }, "events": map[string]interface{}{ "natsaddress": cfg.Events.Addr, @@ -265,14 +277,18 @@ func S3NGNoEvents(cfg *config.Config) map[string]interface{} { "max_concurrency": cfg.Drivers.S3NG.MaxConcurrency, "lock_cycle_duration_factor": cfg.Drivers.S3NG.LockCycleDurationFactor, "statcache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.StatCache.Store, + "cache_nodes": cfg.StatCache.Nodes, + "cache_database": cfg.StatCache.Database, + "cache_ttl": cfg.StatCache.TTL, + "cache_size": cfg.StatCache.Size, }, "filemetadatacache": map[string]interface{}{ - "cache_store": cfg.Cache.Store, - "cache_nodes": cfg.Cache.Nodes, - "cache_database": cfg.Cache.Database, + "cache_store": cfg.FilemetadataCache.Store, + "cache_nodes": cfg.FilemetadataCache.Nodes, + "cache_database": cfg.FilemetadataCache.Database, + "cache_ttl": cfg.FilemetadataCache.TTL, + "cache_size": cfg.FilemetadataCache.Size, }, } } diff --git a/services/userlog/pkg/command/server.go b/services/userlog/pkg/command/server.go index e7c0c579ee..af3b4de34c 100644 --- a/services/userlog/pkg/command/server.go +++ b/services/userlog/pkg/command/server.go @@ -7,10 +7,10 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/events/stream" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/v2/pkg/store" "github.com/oklog/run" "github.com/owncloud/ocis/v2/ocis-pkg/config/configlog" ogrpc "github.com/owncloud/ocis/v2/ocis-pkg/service/grpc" - "github.com/owncloud/ocis/v2/ocis-pkg/store" "github.com/owncloud/ocis/v2/ocis-pkg/version" ehsvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/eventhistory/v0" "github.com/owncloud/ocis/v2/services/userlog/pkg/config" diff --git a/services/userlog/pkg/config/config.go b/services/userlog/pkg/config/config.go index c7243cf066..30a7350289 100644 --- a/services/userlog/pkg/config/config.go +++ b/services/userlog/pkg/config/config.go @@ -32,8 +32,8 @@ type Config struct { // Persistence configures the store to use type Persistence struct { - Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;USERLOG_STORE;USERLOG_STORE_TYPE" desc:"The type of the userlog store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` - Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;USERLOG_STORE_NODES;USERLOG_STORE_ADDRESSES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'in-memory' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store." deprecationVersion:"3.0" removalVersion:"4.0.0" deprecationInfo:"USERLOG_STORE_ADDRESSES name needs to be harmonized" deprecationReplacement:"USERLOG_STORE_NODES"` + Store string `yaml:"store" env:"OCIS_PERSISTENT_STORE;USERLOG_STORE" desc:"The type of the store. Supported values are: 'memory', 'ocmem', 'etcd', 'redis', 'redis-sentinel', 'nats-js', 'noop'. See the text description for details."` + Nodes []string `yaml:"nodes" env:"OCIS_PERSISTENT_STORE_NODES;USERLOG_STORE_NODES" desc:"A comma separated list of nodes to access the configured store. This has no effect when 'memory' or 'ocmem' stores are configured. Note that the behaviour how nodes are used is dependent on the library of the configured store."` Database string `yaml:"database" env:"USERLOG_STORE_DATABASE" desc:"The database name the configured store should use."` Table string `yaml:"table" env:"USERLOG_STORE_TABLE" desc:"The database table the store should use."` TTL time.Duration `yaml:"ttl" env:"OCIS_PERSISTENT_STORE_TTL;USERLOG_STORE_TTL" desc:"Time to live for events in the store. The duration can be set as number followed by a unit identifier like s, m or h. Defaults to '336h' (2 weeks)."` diff --git a/services/userlog/pkg/service/service_test.go b/services/userlog/pkg/service/service_test.go index 22a6e7831c..acecb66a8f 100644 --- a/services/userlog/pkg/service/service_test.go +++ b/services/userlog/pkg/service/service_test.go @@ -11,6 +11,7 @@ import ( rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/events" + "github.com/cs3org/reva/v2/pkg/store" "github.com/cs3org/reva/v2/pkg/utils" cs3mocks "github.com/cs3org/reva/v2/tests/cs3mocks/mocks" "github.com/go-chi/chi/v5" @@ -18,7 +19,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/owncloud/ocis/v2/ocis-pkg/log" - "github.com/owncloud/ocis/v2/ocis-pkg/store" ehmsg "github.com/owncloud/ocis/v2/protogen/gen/ocis/messages/eventhistory/v0" ehsvc "github.com/owncloud/ocis/v2/protogen/gen/ocis/services/eventhistory/v0" "github.com/owncloud/ocis/v2/services/userlog/mocks" diff --git a/vendor/github.com/cs3org/reva/v2/cmd/revad/runtime/loader.go b/vendor/github.com/cs3org/reva/v2/cmd/revad/runtime/loader.go index af3c538277..486a00518c 100644 --- a/vendor/github.com/cs3org/reva/v2/cmd/revad/runtime/loader.go +++ b/vendor/github.com/cs3org/reva/v2/cmd/revad/runtime/loader.go @@ -43,7 +43,6 @@ import ( _ "github.com/cs3org/reva/v2/pkg/preferences/loader" _ "github.com/cs3org/reva/v2/pkg/publicshare/manager/loader" _ "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/loader" - _ "github.com/cs3org/reva/v2/pkg/share/cache/loader" _ "github.com/cs3org/reva/v2/pkg/share/cache/warmup/loader" _ "github.com/cs3org/reva/v2/pkg/share/manager/loader" _ "github.com/cs3org/reva/v2/pkg/storage/favorite/loader" diff --git a/vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/gateway.go b/vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/gateway.go index eef5b21576..44d6d9524d 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/gateway.go +++ b/vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/gateway.go @@ -72,8 +72,11 @@ type config struct { CacheNodes []string `mapstructure:"cache_nodes"` CacheDatabase string `mapstructure:"cache_database"` CreateHomeCacheTTL int `mapstructure:"create_home_cache_ttl"` + CreateHomeCacheSize int `mapstructure:"create_home_cache_size"` ProviderCacheTTL int `mapstructure:"provider_cache_ttl"` + ProviderCacheSize int `mapstructure:"provider_cache_size"` StatCacheTTL int `mapstructure:"stat_cache_ttl"` + StatCacheSize int `mapstructure:"stat_cache_size"` UseCommonSpaceRootShareLogic bool `mapstructure:"use_common_space_root_share_logic"` } @@ -166,10 +169,10 @@ func New(m map[string]interface{}, ss *grpc.Server) (rgrpc.Service, error) { c: c, dataGatewayURL: *u, tokenmgr: tokenManager, - statCache: cache.GetStatCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "stat", time.Duration(c.StatCacheTTL)*time.Second), - providerCache: cache.GetProviderCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "provider", time.Duration(c.ProviderCacheTTL)*time.Second), - createHomeCache: cache.GetCreateHomeCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "createHome", time.Duration(c.CreateHomeCacheTTL)*time.Second), - createPersonalSpaceCache: cache.GetCreatePersonalSpaceCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "createPersonalSpace", time.Duration(c.CreateHomeCacheTTL)*time.Second), + statCache: cache.GetStatCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "stat", time.Duration(c.StatCacheTTL)*time.Second, c.StatCacheSize), + providerCache: cache.GetProviderCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "provider", time.Duration(c.ProviderCacheTTL)*time.Second, c.ProviderCacheSize), + createHomeCache: cache.GetCreateHomeCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "createHome", time.Duration(c.CreateHomeCacheTTL)*time.Second, c.CreateHomeCacheSize), + createPersonalSpaceCache: cache.GetCreatePersonalSpaceCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, "createPersonalSpace", time.Duration(c.CreateHomeCacheTTL)*time.Second, c.CreateHomeCacheSize), } return s, nil diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/copy.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/copy.go index cc95983211..8fd20484e4 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/copy.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/copy.go @@ -87,19 +87,18 @@ func (s *svc) handlePathCopy(w http.ResponseWriter, r *http.Request, ns string) return } - for _, r := range nameRules { - if !r.Test(src) { - w.WriteHeader(http.StatusBadRequest) - b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "") - errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) - return - } - if !r.Test(dst) { - w.WriteHeader(http.StatusBadRequest) - b, err := errors.Marshal(http.StatusBadRequest, "destination failed naming rules", "") - errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) - return - } + if err := ValidateName(src, s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateName(dst, s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "destination failed naming rules", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return } dst = path.Join(ns, dst) diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/mkcol.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/mkcol.go index e99bc61733..9861fc672e 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/mkcol.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/mkcol.go @@ -40,10 +40,8 @@ func (s *svc) handlePathMkcol(w http.ResponseWriter, r *http.Request, ns string) defer span.End() fn := path.Join(ns, r.URL.Path) - for _, r := range nameRules { - if !r.Test(fn) { - return http.StatusBadRequest, fmt.Errorf("invalid name rule") - } + if err := ValidateName(fn, s.nameValidators); err != nil { + return http.StatusBadRequest, err } sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/move.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/move.go index f85b7db617..34d20b8547 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/move.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/move.go @@ -60,19 +60,18 @@ func (s *svc) handlePathMove(w http.ResponseWriter, r *http.Request, ns string) return } - for _, r := range nameRules { - if !r.Test(srcPath) { - w.WriteHeader(http.StatusBadRequest) - b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "") - errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) - return - } - if !r.Test(dstPath) { - w.WriteHeader(http.StatusBadRequest) - b, err := errors.Marshal(http.StatusBadRequest, "destination naming rules", "") - errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) - return - } + if err := ValidateName(srcPath, s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "source failed naming rules", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return + } + + if err := ValidateName(dstPath, s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, "destination naming rules", "") + errors.HandleWebdavError(appctx.GetLogger(ctx), w, b, err) + return } dstPath = path.Join(ns, dstPath) diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go index bebc18070a..998cd56187 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/ocdav.go @@ -55,31 +55,6 @@ import ( // name is the Tracer name used to identify this instrumentation library. const tracerName = "ocdav" -var ( - nameRules = [...]nameRule{ - nameNotEmpty{}, - nameDoesNotContain{chars: "\f\r\n\\"}, - } -) - -type nameRule interface { - Test(name string) bool -} - -type nameNotEmpty struct{} - -func (r nameNotEmpty) Test(name string) bool { - return len(strings.TrimSpace(name)) > 0 -} - -type nameDoesNotContain struct { - chars string -} - -func (r nameDoesNotContain) Test(name string) bool { - return !strings.ContainsAny(name, r.chars) -} - func init() { global.Register("ocdav", New) } @@ -113,9 +88,17 @@ type Config struct { ProductName string `mapstructure:"product_name"` ProductVersion string `mapstructure:"product_version"` + NameValidation NameValidation `mapstructure:"validation"` + MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` } +// NameValidation is the validation configuration for file and folder names +type NameValidation struct { + InvalidChars []string `mapstructure:"invalid_chars"` + MaxLength int `mapstructure:"max_length"` +} + func (c *Config) init() { // note: default c.Prefix is an empty string c.GatewaySvc = sharedconf.GetGatewaySVC(c.GatewaySvc) @@ -147,6 +130,14 @@ func (c *Config) init() { if c.Edition == "" { c.Edition = "community" } + + if c.NameValidation.InvalidChars == nil { + c.NameValidation.InvalidChars = []string{"\f", "\r", "\n", "\\"} + } + + if c.NameValidation.MaxLength == 0 { + c.NameValidation.MaxLength = 255 + } } type svc struct { @@ -160,6 +151,7 @@ type svc struct { LockSystem LockSystem userIdentifierCache *ttlcache.Cache tracerProvider trace.TracerProvider + nameValidators []Validator } func (s *svc) Config() *Config { @@ -204,6 +196,9 @@ func New(m map[string]interface{}, log *zerolog.Logger) (global.Service, error) // NewWith returns a new ocdav service func NewWith(conf *Config, fm favorite.Manager, ls LockSystem, _ *zerolog.Logger, tp trace.TracerProvider, gwc gateway.GatewayAPIClient) (global.Service, error) { + // be safe - init the conf again + conf.init() + s := &svc{ c: conf, webDavHandler: new(WebDavHandler), @@ -217,6 +212,7 @@ func NewWith(conf *Config, fm favorite.Manager, ls LockSystem, _ *zerolog.Logger LockSystem: ls, userIdentifierCache: ttlcache.NewCache(), tracerProvider: tp, + nameValidators: ValidatorsFromConfig(conf), } _ = s.userIdentifierCache.SetTTL(60 * time.Second) diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go index c4f4005547..36b151de25 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go @@ -23,6 +23,7 @@ import ( "io" "net/http" "path" + "path/filepath" "strconv" "strings" @@ -139,6 +140,14 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ return } + fn := filepath.Base(ref.Path) + if err := ValidateName(fn, s.nameValidators); err != nil { + w.WriteHeader(http.StatusBadRequest) + b, err := errors.Marshal(http.StatusBadRequest, err.Error(), "") + errors.HandleWebdavError(&log, w, b, err) + return + } + if length == 0 { tfRes, err := s.gwClient.TouchFile(ctx, &provider.TouchFileRequest{ Ref: ref, diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/tus.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/tus.go index ff4d58b04d..af5ea00e63 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/tus.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/tus.go @@ -50,11 +50,9 @@ func (s *svc) handlePathTusPost(w http.ResponseWriter, r *http.Request, ns strin // read filename from metadata meta := tusd.ParseMetadataHeader(r.Header.Get(net.HeaderUploadMetadata)) - for _, r := range nameRules { - if !r.Test(meta["filename"]) { - w.WriteHeader(http.StatusPreconditionFailed) - return - } + if err := ValidateName(meta["filename"], s.nameValidators); err != nil { + w.WriteHeader(http.StatusPreconditionFailed) + return } // append filename to current dir @@ -76,11 +74,9 @@ func (s *svc) handleSpacesTusPost(w http.ResponseWriter, r *http.Request, spaceI // read filename from metadata meta := tusd.ParseMetadataHeader(r.Header.Get(net.HeaderUploadMetadata)) - for _, r := range nameRules { - if !r.Test(meta["filename"]) { - w.WriteHeader(http.StatusPreconditionFailed) - return - } + if err := ValidateName(meta["filename"], s.nameValidators); err != nil { + w.WriteHeader(http.StatusPreconditionFailed) + return } sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/validation.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/validation.go new file mode 100644 index 0000000000..859a9d65ea --- /dev/null +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/validation.go @@ -0,0 +1,63 @@ +package ocdav + +import ( + "errors" + "fmt" + "strings" +) + +// Validator validates strings +type Validator func(string) error + +// ValidatorsFromConfig returns the configured Validators +func ValidatorsFromConfig(c *Config) []Validator { + // we always want to exclude empty names + vals := []Validator{notEmpty()} + + // forbidden characters + vals = append(vals, doesNotContain(c.NameValidation.InvalidChars)) + + // max length + vals = append(vals, isShorterThan(c.NameValidation.MaxLength)) + + return vals +} + +// ValidateName will validate a file or folder name, returning an error when it is not accepted +func ValidateName(name string, validators []Validator) error { + for _, v := range validators { + if err := v(name); err != nil { + return fmt.Errorf("name validation failed: %w", err) + } + } + return nil +} + +func notEmpty() Validator { + return func(s string) error { + if strings.TrimSpace(s) == "" { + return errors.New("must not be empty") + } + return nil + } +} + +func doesNotContain(bad []string) Validator { + return func(s string) error { + for _, b := range bad { + if strings.Contains(s, b) { + return fmt.Errorf("must not contain %s", b) + } + } + return nil + } +} + +func isShorterThan(maxLength int) Validator { + return func(s string) error { + if len(s) > maxLength { + return fmt.Errorf("must be shorter than %d", maxLength) + } + return nil + } +} diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/config/config.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/config/config.go index 65f9770b3d..91da5fc04f 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/config/config.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/config/config.go @@ -37,9 +37,12 @@ type Config struct { AdditionalInfoAttribute string `mapstructure:"additional_info_attribute"` CacheWarmupDriver string `mapstructure:"cache_warmup_driver"` CacheWarmupDrivers map[string]map[string]interface{} `mapstructure:"cache_warmup_drivers"` - ResourceInfoCacheDriver string `mapstructure:"resource_info_cache_type"` + ResourceInfoCacheStore string `mapstructure:"resource_info_cache_store"` + ResourceInfoCacheNodes []string `mapstructure:"resource_info_cache_nodes"` + ResourceInfoCacheDatabase string `mapstructure:"resource_info_cache_database"` + ResourceInfoCacheTable string `mapstructure:"resource_info_cache_table"` ResourceInfoCacheTTL int `mapstructure:"resource_info_cache_ttl"` - ResourceInfoCacheDrivers map[string]map[string]interface{} `mapstructure:"resource_info_caches"` + ResourceInfoCacheSize int `mapstructure:"resource_info_cache_size"` UserIdentifierCacheTTL int `mapstructure:"user_identifier_cache_ttl"` MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` SkipUpdatingExistingSharesMountpoints bool `mapstructure:"skip_updating_existing_shares_mountpoint"` diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go index c2910690a2..adeeb03dee 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/shares.go @@ -52,9 +52,9 @@ import ( "github.com/cs3org/reva/v2/pkg/publicshare" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/v2/pkg/share" - "github.com/cs3org/reva/v2/pkg/share/cache" - cachereg "github.com/cs3org/reva/v2/pkg/share/cache/registry" + sharecache "github.com/cs3org/reva/v2/pkg/share/cache" warmupreg "github.com/cs3org/reva/v2/pkg/share/cache/warmup/registry" + "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/cs3org/reva/v2/pkg/storage/utils/templates" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" @@ -83,8 +83,7 @@ type Handler struct { skipUpdatingExistingSharesMountpoints bool additionalInfoTemplate *template.Template userIdentifierCache *ttlcache.Cache - resourceInfoCache cache.ResourceInfoCache - resourceInfoCacheTTL time.Duration + resourceInfoCache cache.StatCache deniable bool resharing bool @@ -104,7 +103,7 @@ type ocsError struct { Message string } -func getCacheWarmupManager(c *config.Config) (cache.Warmup, error) { +func getCacheWarmupManager(c *config.Config) (sharecache.Warmup, error) { if f, ok := warmupreg.NewFuncs[c.CacheWarmupDriver]; ok { return f(c.CacheWarmupDrivers[c.CacheWarmupDriver]) } @@ -114,13 +113,6 @@ func getCacheWarmupManager(c *config.Config) (cache.Warmup, error) { // GatewayClientGetter is the function being used to retrieve a gateway client instance type GatewayClientGetter func() (gateway.GatewayAPIClient, error) -func getCacheManager(c *config.Config) (cache.ResourceInfoCache, error) { - if f, ok := cachereg.NewFuncs[c.ResourceInfoCacheDriver]; ok { - return f(c.ResourceInfoCacheDrivers[c.ResourceInfoCacheDriver]) - } - return nil, fmt.Errorf("driver not found: %s", c.ResourceInfoCacheDriver) -} - // Init initializes this and any contained handlers func (h *Handler) Init(c *config.Config) { h.gatewayAddr = c.GatewaySvc @@ -132,19 +124,14 @@ func (h *Handler) Init(c *config.Config) { h.skipUpdatingExistingSharesMountpoints = c.SkipUpdatingExistingSharesMountpoints h.additionalInfoTemplate, _ = template.New("additionalInfo").Parse(c.AdditionalInfoAttribute) - h.resourceInfoCacheTTL = time.Second * time.Duration(c.ResourceInfoCacheTTL) h.userIdentifierCache = ttlcache.NewCache() _ = h.userIdentifierCache.SetTTL(time.Second * time.Duration(c.UserIdentifierCacheTTL)) h.deniable = c.EnableDenials h.resharing = resharing(c) - cache, err := getCacheManager(c) - if err == nil { - h.resourceInfoCache = cache - } - - if h.resourceInfoCacheTTL > 0 { + h.resourceInfoCache = cache.GetStatCache(c.ResourceInfoCacheStore, c.ResourceInfoCacheNodes, c.ResourceInfoCacheDatabase, "stat", time.Duration(c.ResourceInfoCacheTTL)*time.Second, c.ResourceInfoCacheSize) + if c.CacheWarmupDriver != "" { cwm, err := getCacheWarmupManager(c) if err == nil { go h.startCacheWarmup(cwm) @@ -159,15 +146,15 @@ func (h *Handler) InitWithGetter(c *config.Config, clientGetter GatewayClientGet h.getClient = clientGetter } -func (h *Handler) startCacheWarmup(c cache.Warmup) { +func (h *Handler) startCacheWarmup(c sharecache.Warmup) { time.Sleep(2 * time.Second) infos, err := c.GetResourceInfos() if err != nil { return } for _, r := range infos { - key := storagespace.FormatResourceID(*r.Id) - _ = h.resourceInfoCache.SetWithExpire(key, r, h.resourceInfoCacheTTL) + key := h.resourceInfoCache.GetKey(r.Owner, &provider.Reference{ResourceId: r.Id}, []string{}, []string{}) + _ = h.resourceInfoCache.PushToCache(key, r) } } @@ -781,6 +768,10 @@ func (h *Handler) updateShare(w http.ResponseWriter, r *http.Request, shareID st return } + if currentUser, ok := ctxpkg.ContextGetUser(ctx); ok { + h.resourceInfoCache.RemoveStat(currentUser.Id, shareR.Share.ResourceId) + } + share, err := conversions.CS3Share2ShareData(ctx, uRes.Share) if err != nil { response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "error mapping share data", err) @@ -1350,63 +1341,45 @@ func (h *Handler) getAdditionalInfoAttribute(ctx context.Context, u *userIdentif } func (h *Handler) getResourceInfoByReference(ctx context.Context, client gateway.GatewayAPIClient, ref *provider.Reference) (*provider.ResourceInfo, *rpc.Status, error) { - var key string - if ref.ResourceId == nil { - // This is a path based reference - key = ref.Path - } else { - var err error - key, err = storagespace.FormatReference(ref) - if err != nil { - return nil, nil, err - } - } - return h.getResourceInfo(ctx, client, key, ref) + return h.getResourceInfo(ctx, client, ref) } func (h *Handler) getResourceInfoByID(ctx context.Context, client gateway.GatewayAPIClient, id *provider.ResourceId) (*provider.ResourceInfo, *rpc.Status, error) { - return h.getResourceInfo(ctx, client, storagespace.FormatResourceID(*id), &provider.Reference{ResourceId: id}) + return h.getResourceInfo(ctx, client, &provider.Reference{ResourceId: id}) } // getResourceInfo retrieves the resource info to a target. // This method utilizes caching if it is enabled. -func (h *Handler) getResourceInfo(ctx context.Context, client gateway.GatewayAPIClient, key string, ref *provider.Reference) (*provider.ResourceInfo, *rpc.Status, error) { +func (h *Handler) getResourceInfo(ctx context.Context, client gateway.GatewayAPIClient, ref *provider.Reference) (*provider.ResourceInfo, *rpc.Status, error) { logger := appctx.GetLogger(ctx) - - var pinfo *provider.ResourceInfo - var status *rpc.Status - var err error - var foundInCache bool - if h.resourceInfoCacheTTL > 0 && h.resourceInfoCache != nil { - if pinfo, err = h.resourceInfoCache.Get(key); err == nil { - logger.Debug().Msgf("cache hit for resource %+v", key) - status = &rpc.Status{Code: rpc.Code_CODE_OK} - foundInCache = true - } - } - if !foundInCache { - logger.Debug().Msgf("cache miss for resource %+v, statting", key) - statReq := &provider.StatRequest{ - Ref: ref, - } - - statRes, err := client.Stat(ctx, statReq) - if err != nil { - return nil, nil, err - } - - if statRes.Status.Code != rpc.Code_CODE_OK { - return nil, statRes.Status, nil - } - - pinfo = statRes.GetInfo() - status = statRes.Status - if h.resourceInfoCacheTTL > 0 { - _ = h.resourceInfoCache.SetWithExpire(key, pinfo, h.resourceInfoCacheTTL) + key := "" + if currentUser, ok := ctxpkg.ContextGetUser(ctx); ok { + key = h.resourceInfoCache.GetKey(currentUser.Id, ref, []string{}, []string{}) + pinfo := &provider.ResourceInfo{} + if err := h.resourceInfoCache.PullFromCache(key, pinfo); err == nil { + return pinfo, &rpc.Status{Code: rpc.Code_CODE_OK}, nil } } - return pinfo, status, nil + logger.Debug().Msgf("cache miss for resource %+v, statting", ref) + statReq := &provider.StatRequest{ + Ref: ref, + } + + statRes, err := client.Stat(ctx, statReq) + if err != nil { + return nil, nil, err + } + + if statRes.Status.Code != rpc.Code_CODE_OK { + return nil, statRes.Status, nil + } + + if key != "" { + _ = h.resourceInfoCache.PushToCache(key, *statRes.Info) + } + + return statRes.Info, statRes.Status, nil } func (h *Handler) createCs3Share(ctx context.Context, w http.ResponseWriter, r *http.Request, client gateway.GatewayAPIClient, req *collaboration.CreateShareRequest) (*collaboration.Share, *ocsError) { diff --git a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go index 5dab9dfddf..a435252881 100644 --- a/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go +++ b/vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/user.go @@ -206,6 +206,9 @@ func (h *Handler) removeUserShare(w http.ResponseWriter, r *http.Request, shareI response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "grpc delete share request failed", err) return } + if currentUser, ok := ctxpkg.ContextGetUser(ctx); ok { + h.resourceInfoCache.RemoveStat(currentUser.Id, getShareResp.Share.ResourceId) + } response.WriteOCSSuccess(w, r, data) } diff --git a/vendor/github.com/cs3org/reva/v2/pkg/micro/ocdav/option.go b/vendor/github.com/cs3org/reva/v2/pkg/micro/ocdav/option.go index 939b75c0ca..85f847a004 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/micro/ocdav/option.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/micro/ocdav/option.go @@ -307,3 +307,17 @@ func AllowedHeaders(val []string) Option { o.AllowedHeaders = val } } + +// ItemNameInvalidChars provides a function to set forbidden characters in file or folder names +func ItemNameInvalidChars(chars []string) Option { + return func(o *Options) { + o.config.NameValidation.InvalidChars = chars + } +} + +// ItemNameMaxLength provides a function to set the maximum length of a file or folder name +func ItemNameMaxLength(i int) Option { + return func(o *Options) { + o.config.NameValidation.MaxLength = i + } +} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go index 13f2bfbe31..5e47c9a15a 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go @@ -33,6 +33,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/rhttp/datatx" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/registry" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download" "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/cache" @@ -44,21 +45,14 @@ func init() { registry.Register("simple", New) } -type config struct { - CacheStore string `mapstructure:"cache_store"` - CacheNodes []string `mapstructure:"cache_nodes"` - CacheDatabase string `mapstructure:"cache_database"` - CacheTable string `mapstructure:"cache_table"` -} - type manager struct { - conf *config + conf *cache.Config publisher events.Publisher statCache cache.StatCache } -func parseConfig(m map[string]interface{}) (*config, error) { - c := &config{} +func parseConfig(m map[string]interface{}) (*cache.Config, error) { + c := &cache.Config{} if err := mapstructure.Decode(m, c); err != nil { err = errors.Wrap(err, "error decoding conf") return nil, err @@ -76,7 +70,7 @@ func New(m map[string]interface{}, publisher events.Publisher) (datatx.DataTX, e return &manager{ conf: c, publisher: publisher, - statCache: cache.GetStatCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, c.CacheTable, 0), + statCache: cache.GetStatCache(c.Store, c.Nodes, c.Database, c.Table, time.Duration(c.TTL)*time.Second, c.Size), }, nil } @@ -87,8 +81,18 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { switch r.Method { case "GET", "HEAD": + if r.Method == "GET" { + metrics.DownloadsActive.Add(1) + defer func() { + metrics.DownloadsActive.Sub(1) + }() + } download.GetOrHeadFile(w, r, fs, "") case "PUT": + metrics.UploadsActive.Add(1) + defer func() { + metrics.UploadsActive.Sub(1) + }() fn := r.URL.Path defer r.Body.Close() diff --git a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/spaces/spaces.go b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/spaces/spaces.go index b4ec4bddef..f16f656ec9 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/spaces/spaces.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/spaces/spaces.go @@ -32,6 +32,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/rhttp/datatx" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/registry" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download" "github.com/cs3org/reva/v2/pkg/rhttp/router" "github.com/cs3org/reva/v2/pkg/storage" @@ -46,21 +47,14 @@ func init() { registry.Register("spaces", New) } -type config struct { - CacheStore string `mapstructure:"cache_store"` - CacheNodes []string `mapstructure:"cache_nodes"` - CacheDatabase string `mapstructure:"cache_database"` - CacheTable string `mapstructure:"cache_table"` -} - type manager struct { - conf *config + conf *cache.Config publisher events.Publisher statCache cache.StatCache } -func parseConfig(m map[string]interface{}) (*config, error) { - c := &config{} +func parseConfig(m map[string]interface{}) (*cache.Config, error) { + c := &cache.Config{} if err := mapstructure.Decode(m, c); err != nil { err = errors.Wrap(err, "error decoding conf") return nil, err @@ -78,7 +72,7 @@ func New(m map[string]interface{}, publisher events.Publisher) (datatx.DataTX, e return &manager{ conf: c, publisher: publisher, - statCache: cache.GetStatCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, c.CacheTable, 0), + statCache: cache.GetStatCache(c.Store, c.Nodes, c.Database, c.Table, time.Duration(c.TTL)*time.Second, c.Size), }, nil } @@ -92,8 +86,19 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { switch r.Method { case "GET", "HEAD": + if r.Method == "GET" { + metrics.DownloadsActive.Add(1) + defer func() { + metrics.DownloadsActive.Sub(1) + }() + } download.GetOrHeadFile(w, r, fs, spaceID) case "PUT": + metrics.UploadsActive.Add(1) + defer func() { + metrics.UploadsActive.Sub(1) + }() + // make a clean relative path fn := path.Clean(strings.TrimLeft(r.URL.Path, "/")) defer r.Body.Close() diff --git a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go index c7326ba231..0a5a626d66 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go @@ -24,6 +24,7 @@ import ( "net/http" "path" "path/filepath" + "time" "github.com/pkg/errors" tusd "github.com/tus/tusd/pkg/handler" @@ -36,6 +37,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/rhttp/datatx" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/registry" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics" "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/cs3org/reva/v2/pkg/utils" @@ -46,21 +48,14 @@ func init() { registry.Register("tus", New) } -type config struct { - CacheStore string `mapstructure:"cache_store"` - CacheNodes []string `mapstructure:"cache_nodes"` - CacheDatabase string `mapstructure:"cache_database"` - CacheTable string `mapstructure:"cache_table"` -} - type manager struct { - conf *config + conf *cache.Config publisher events.Publisher statCache cache.StatCache } -func parseConfig(m map[string]interface{}) (*config, error) { - c := &config{} +func parseConfig(m map[string]interface{}) (*cache.Config, error) { + c := &cache.Config{} if err := mapstructure.Decode(m, c); err != nil { err = errors.Wrap(err, "error decoding conf") return nil, err @@ -77,7 +72,7 @@ func New(m map[string]interface{}, publisher events.Publisher) (datatx.DataTX, e return &manager{ conf: c, publisher: publisher, - statCache: cache.GetStatCache(c.CacheStore, c.CacheNodes, c.CacheDatabase, c.CacheTable, 0), + statCache: cache.GetStatCache(c.Store, c.Nodes, c.Database, c.Table, time.Duration(c.TTL)*time.Second, c.Size), }, nil } @@ -144,17 +139,29 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { switch method { case "POST": + metrics.UploadsActive.Add(1) + defer func() { + metrics.UploadsActive.Sub(1) + }() // set etag, mtime and file id handler.PostFile(w, r) case "HEAD": handler.HeadFile(w, r) case "PATCH": + metrics.UploadsActive.Add(1) + defer func() { + metrics.UploadsActive.Sub(1) + }() // set etag, mtime and file id setExpiresHeader(fs, w, r) handler.PatchFile(w, r) case "DELETE": handler.DelFile(w, r) case "GET": + metrics.DownloadsActive.Add(1) + defer func() { + metrics.DownloadsActive.Sub(1) + }() // NOTE: this is breaking change - allthought it does not seem to be used // We can make a switch here depending on some header value if that is needed // download.GetOrHeadFile(w, r, fs, "") diff --git a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics/metrics.go b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics/metrics.go new file mode 100644 index 0000000000..f090a04c33 --- /dev/null +++ b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics/metrics.go @@ -0,0 +1,20 @@ +// Package metrics provides prometheus metrics for the data managers.. +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + // DownloadsActive is the number of active downloads + DownloadsActive = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "reva_download_active", + Help: "Number of active downloads", + }) + // UploadsActive is the number of active uploads + UploadsActive = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "reva_upload_active", + Help: "Number of active uploads", + }) +) diff --git a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download/download.go b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download/download.go index 3e9e1e8342..c40b55f9ab 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download/download.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download/download.go @@ -213,7 +213,6 @@ func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS, spaceI sublog.Error().Int64("copied", c).Int64("size", sendSize).Msg("copied vs size mismatch") } } - } func handleError(w http.ResponseWriter, log *zerolog.Logger, err error, action string) { diff --git a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/cache.go b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/cache.go index a17791630c..8e2023873b 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/cache.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/cache.go @@ -19,8 +19,6 @@ package cache import ( - "time" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" ) @@ -28,11 +26,3 @@ import ( type Warmup interface { GetResourceInfos() ([]*provider.ResourceInfo, error) } - -// ResourceInfoCache is the interface to implement caches for resource infos -type ResourceInfoCache interface { - Get(key string) (*provider.ResourceInfo, error) - GetKeys(keys []string) ([]*provider.ResourceInfo, error) - Set(key string, info *provider.ResourceInfo) error - SetWithExpire(key string, info *provider.ResourceInfo, expiration time.Duration) error -} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/loader/loader.go b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/loader/loader.go deleted file mode 100644 index 62708bb89b..0000000000 --- a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/loader/loader.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package loader - -import ( - // Load share cache drivers. - _ "github.com/cs3org/reva/v2/pkg/share/cache/memory" - _ "github.com/cs3org/reva/v2/pkg/share/cache/redis" - // Add your own here -) diff --git a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/memory/memory.go b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/memory/memory.go deleted file mode 100644 index 2467cc522c..0000000000 --- a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/memory/memory.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package memory - -import ( - "time" - - "github.com/bluele/gcache" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/v2/pkg/share/cache" - "github.com/cs3org/reva/v2/pkg/share/cache/registry" - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" -) - -func init() { - registry.Register("memory", New) -} - -type config struct { - CacheSize int `mapstructure:"cache_size"` -} - -type manager struct { - cache gcache.Cache -} - -// New returns an implementation of a resource info cache that stores the objects in memory -func New(m map[string]interface{}) (cache.ResourceInfoCache, error) { - c := &config{} - if err := mapstructure.Decode(m, c); err != nil { - return nil, errors.Wrap(err, "error decoding conf") - } - if c.CacheSize == 0 { - c.CacheSize = 10000 - } - - return &manager{ - cache: gcache.New(c.CacheSize).LFU().Build(), - }, nil -} - -func (m *manager) Get(key string) (*provider.ResourceInfo, error) { - infoIf, err := m.cache.Get(key) - if err != nil { - return nil, err - } - return infoIf.(*provider.ResourceInfo), nil -} - -func (m *manager) GetKeys(keys []string) ([]*provider.ResourceInfo, error) { - infos := make([]*provider.ResourceInfo, len(keys)) - for i, key := range keys { - if infoIf, err := m.cache.Get(key); err == nil { - infos[i] = infoIf.(*provider.ResourceInfo) - } - } - return infos, nil -} - -func (m *manager) Set(key string, info *provider.ResourceInfo) error { - return m.cache.Set(key, info) -} - -func (m *manager) SetWithExpire(key string, info *provider.ResourceInfo, expiration time.Duration) error { - return m.cache.SetWithExpire(key, info, expiration) -} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/redis/redis.go b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/redis/redis.go deleted file mode 100644 index d361f65528..0000000000 --- a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/redis/redis.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package redis - -import ( - "encoding/json" - "time" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/v2/pkg/share/cache" - "github.com/cs3org/reva/v2/pkg/share/cache/registry" - "github.com/gomodule/redigo/redis" - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" -) - -func init() { - registry.Register("redis", New) -} - -type config struct { - RedisAddress string `mapstructure:"redis_address"` - RedisUsername string `mapstructure:"redis_username"` - RedisPassword string `mapstructure:"redis_password"` -} - -type manager struct { - redisPool *redis.Pool -} - -// New returns an implementation of a resource info cache that stores the objects in a redis cluster -func New(m map[string]interface{}) (cache.ResourceInfoCache, error) { - c := &config{} - if err := mapstructure.Decode(m, c); err != nil { - return nil, errors.Wrap(err, "error decoding conf") - } - - if c.RedisAddress == "" { - c.RedisAddress = "localhost:6379" - } - - pool := &redis.Pool{ - MaxIdle: 50, - MaxActive: 1000, - IdleTimeout: 240 * time.Second, - - Dial: func() (redis.Conn, error) { - var opts []redis.DialOption - if c.RedisUsername != "" { - opts = append(opts, redis.DialUsername(c.RedisUsername)) - } - if c.RedisPassword != "" { - opts = append(opts, redis.DialPassword(c.RedisPassword)) - } - - c, err := redis.Dial("tcp", c.RedisAddress, opts...) - if err != nil { - return nil, err - } - return c, err - }, - - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } - - return &manager{ - redisPool: pool, - }, nil -} - -func (m *manager) Get(key string) (*provider.ResourceInfo, error) { - infos, err := m.getVals([]string{key}) - if err != nil { - return nil, err - } - return infos[0], nil -} - -func (m *manager) GetKeys(keys []string) ([]*provider.ResourceInfo, error) { - return m.getVals(keys) -} - -func (m *manager) Set(key string, info *provider.ResourceInfo) error { - return m.setVal(key, info, -1) -} - -func (m *manager) SetWithExpire(key string, info *provider.ResourceInfo, expiration time.Duration) error { - return m.setVal(key, info, int(expiration.Seconds())) -} - -func (m *manager) setVal(key string, info *provider.ResourceInfo, expiration int) error { - conn := m.redisPool.Get() - defer conn.Close() - if conn != nil { - encodedInfo, err := json.Marshal(&info) - if err != nil { - return err - } - - args := []interface{}{key, encodedInfo} - if expiration != -1 { - args = append(args, "EX", expiration) - } - - if _, err := conn.Do("SET", args); err != nil { - return err - } - return nil - } - return errors.New("cache: unable to get connection from redis pool") -} - -func (m *manager) getVals(keys []string) ([]*provider.ResourceInfo, error) { - conn := m.redisPool.Get() - defer conn.Close() - - if conn != nil { - vals, err := redis.Strings(conn.Do("MGET", keys)) - if err != nil { - return nil, err - } - - infos := make([]*provider.ResourceInfo, len(keys)) - for i, v := range vals { - if v != "" { - if err = json.Unmarshal([]byte(v), &infos[i]); err != nil { - infos[i] = nil - } - } - } - return infos, nil - } - return nil, errors.New("cache: unable to get connection from redis pool") -} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/registry/registry.go b/vendor/github.com/cs3org/reva/v2/pkg/share/cache/registry/registry.go deleted file mode 100644 index fe97a152a3..0000000000 --- a/vendor/github.com/cs3org/reva/v2/pkg/share/cache/registry/registry.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package registry - -import "github.com/cs3org/reva/v2/pkg/share/cache" - -// NewFunc is the function that cache implementations -// should register at init time. -type NewFunc func(map[string]interface{}) (cache.ResourceInfoCache, error) - -// NewFuncs is a map containing all the registered cache implementations. -var NewFuncs = map[string]NewFunc{} - -// Register registers a new cache function. -// Not safe for concurrent use. Safe for use from package init. -func Register(name string, f NewFunc) { - NewFuncs[name] = f -} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/cache.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/cache.go index 98896ad7f0..cf6a7a12fe 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/cache.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/cache.go @@ -26,11 +26,7 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - natsjs "github.com/go-micro/plugins/v4/store/nats-js" - "github.com/go-micro/plugins/v4/store/redis" - redisopts "github.com/go-redis/redis/v8" - "github.com/nats-io/nats.go" - microetcd "github.com/owncloud/ocis/v2/ocis-pkg/store/etcd" + "github.com/cs3org/reva/v2/pkg/store" "github.com/shamaton/msgpack/v2" microstore "go-micro.dev/v4/store" ) @@ -45,7 +41,20 @@ var ( mutex sync.Mutex ) +// Config contains the configuring for a cache +type Config struct { + Store string `mapstructure:"cache_store"` + Nodes []string `mapstructure:"cache_nodes"` + Database string `mapstructure:"cache_database"` + Table string `mapstructure:"cache_table"` + TTL int `mapstructure:"cache_ttl"` + Size int `mapstructure:"cache_size"` +} + // Cache handles key value operations on caches +// It, and the interfaces derived from it, are currently being used +// for building caches around go-micro stores, encoding the data +// in the messsagepack format. type Cache interface { PullFromCache(key string, dest interface{}) error PushToCache(key string, src interface{}) error @@ -89,65 +98,65 @@ type FileMetadataCache interface { // GetStatCache will return an existing StatCache for the given store, nodes, database and table // If it does not exist yet it will be created, different TTLs are ignored -func GetStatCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration) StatCache { +func GetStatCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration, size int) StatCache { mutex.Lock() defer mutex.Unlock() key := strings.Join(append(append([]string{cacheStore}, cacheNodes...), database, table), ":") if statCaches[key] == nil { - statCaches[key] = NewStatCache(cacheStore, cacheNodes, database, table, ttl) + statCaches[key] = NewStatCache(cacheStore, cacheNodes, database, table, ttl, size) } return statCaches[key] } // GetProviderCache will return an existing ProviderCache for the given store, nodes, database and table // If it does not exist yet it will be created, different TTLs are ignored -func GetProviderCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration) ProviderCache { +func GetProviderCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration, size int) ProviderCache { mutex.Lock() defer mutex.Unlock() key := strings.Join(append(append([]string{cacheStore}, cacheNodes...), database, table), ":") if providerCaches[key] == nil { - providerCaches[key] = NewProviderCache(cacheStore, cacheNodes, database, table, ttl) + providerCaches[key] = NewProviderCache(cacheStore, cacheNodes, database, table, ttl, size) } return providerCaches[key] } // GetCreateHomeCache will return an existing CreateHomeCache for the given store, nodes, database and table // If it does not exist yet it will be created, different TTLs are ignored -func GetCreateHomeCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration) CreateHomeCache { +func GetCreateHomeCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration, size int) CreateHomeCache { mutex.Lock() defer mutex.Unlock() key := strings.Join(append(append([]string{cacheStore}, cacheNodes...), database, table), ":") if createHomeCaches[key] == nil { - createHomeCaches[key] = NewCreateHomeCache(cacheStore, cacheNodes, database, table, ttl) + createHomeCaches[key] = NewCreateHomeCache(cacheStore, cacheNodes, database, table, ttl, size) } return createHomeCaches[key] } // GetCreatePersonalSpaceCache will return an existing CreatePersonalSpaceCache for the given store, nodes, database and table // If it does not exist yet it will be created, different TTLs are ignored -func GetCreatePersonalSpaceCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration) CreatePersonalSpaceCache { +func GetCreatePersonalSpaceCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration, size int) CreatePersonalSpaceCache { mutex.Lock() defer mutex.Unlock() key := strings.Join(append(append([]string{cacheStore}, cacheNodes...), database, table), ":") if createPersonalSpaceCaches[key] == nil { - createPersonalSpaceCaches[key] = NewCreatePersonalSpaceCache(cacheStore, cacheNodes, database, table, ttl) + createPersonalSpaceCaches[key] = NewCreatePersonalSpaceCache(cacheStore, cacheNodes, database, table, ttl, size) } return createPersonalSpaceCaches[key] } // GetFileMetadataCache will return an existing GetFileMetadataCache for the given store, nodes, database and table // If it does not exist yet it will be created, different TTLs are ignored -func GetFileMetadataCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration) FileMetadataCache { +func GetFileMetadataCache(cacheStore string, cacheNodes []string, database, table string, ttl time.Duration, size int) FileMetadataCache { mutex.Lock() defer mutex.Unlock() key := strings.Join(append(append([]string{cacheStore}, cacheNodes...), database, table), ":") if fileMetadataCaches[key] == nil { - fileMetadataCaches[key] = NewFileMetadataCache(cacheStore, cacheNodes, database, table, ttl) + fileMetadataCaches[key] = NewFileMetadataCache(cacheStore, cacheNodes, database, table, ttl, size) } return fileMetadataCaches[key] } @@ -159,77 +168,6 @@ type cacheStore struct { ttl time.Duration } -// NewCache initializes a new CacheStore -func NewCache(store string, nodes []string, database, table string, ttl time.Duration) Cache { - return cacheStore{ - s: getStore(store, nodes, database, table, ttl), // some stores use a default ttl so we pass it when initializing - database: database, - table: table, - ttl: ttl, // some stores use the ttl on every write, so we remember it here - } -} - -func getStore(store string, nodes []string, database, table string, ttl time.Duration) microstore.Store { - switch store { - case "etcd": - return microetcd.NewEtcdStore( - microstore.Nodes(nodes...), - microstore.Database(database), - microstore.Table(table), - ) - case "nats-js": - // TODO nats needs a DefaultTTL option as it does not support per Write TTL ... - // FIXME nats has restrictions on the key, we cannot use slashes AFAICT - // host, port, clusterid - return natsjs.NewStore( - microstore.Nodes(nodes...), - microstore.Database(database), - microstore.Table(table), - natsjs.NatsOptions(nats.Options{Name: "TODO"}), - natsjs.DefaultTTL(ttl), - ) // TODO test with ocis nats - case "redis": - return redis.NewStore( - microstore.Database(database), - microstore.Table(table), - microstore.Nodes(nodes...), - ) // only the first node is taken into account - case "redis-sentinel": - redisMaster := "" - redisNodes := []string{} - for _, node := range nodes { - parts := strings.SplitN(node, "/", 2) - if len(parts) != 2 { - return nil - } - // the first node is used to retrieve the redis master - redisNodes = append(redisNodes, parts[0]) - if redisMaster == "" { - redisMaster = parts[1] - } - } - - return redis.NewStore( - microstore.Database(database), - microstore.Table(table), - microstore.Nodes(redisNodes...), - redis.WithRedisOptions(redisopts.UniversalOptions{ - MasterName: redisMaster, - }), - ) - case "memory": - return microstore.NewStore( - microstore.Database(database), - microstore.Table(table), - ) - default: - return microstore.NewNoopStore( - microstore.Database(database), - microstore.Table(table), - ) - } -} - // PullFromCache pulls a value from the configured database and table of the underlying store using the given key func (cache cacheStore) PullFromCache(key string, dest interface{}) error { r, err := cache.s.Read(key, microstore.ReadFrom(cache.database, cache.table), microstore.ReadLimit(1)) @@ -249,8 +187,15 @@ func (cache cacheStore) PushToCache(key string, src interface{}) error { if err != nil { return err } + + record := µstore.Record{ + Key: key, + Value: b, + Expiry: cache.ttl, + } + return cache.s.Write( - µstore.Record{Key: key, Value: b}, + record, microstore.WriteTo(cache.database, cache.table), microstore.WriteTTL(cache.ttl), ) @@ -285,3 +230,14 @@ func (cache cacheStore) Delete(key string, opts ...microstore.DeleteOption) erro func (cache cacheStore) Close() error { return cache.s.Close() } + +func getStore(storeType string, nodes []string, database, table string, ttl time.Duration, size int) microstore.Store { + return store.Create( + store.Store(storeType), + microstore.Nodes(nodes...), + microstore.Database(database), + microstore.Table(table), + store.TTL(ttl), + store.Size(size), + ) +} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createhome.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createhome.go index 44e0edefc0..8a39c68a58 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createhome.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createhome.go @@ -32,9 +32,9 @@ type createHomeCache struct { } // NewCreateHomeCache creates a new CreateHomeCache -func NewCreateHomeCache(store string, nodes []string, database, table string, ttl time.Duration) CreateHomeCache { +func NewCreateHomeCache(store string, nodes []string, database, table string, ttl time.Duration, size int) CreateHomeCache { c := &createHomeCache{} - c.s = getStore(store, nodes, database, table, ttl) + c.s = getStore(store, nodes, database, table, ttl, size) c.database = database c.table = table c.ttl = ttl diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createpersonalspace.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createpersonalspace.go index e1d4d7963f..fa6f9d11e9 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createpersonalspace.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/createpersonalspace.go @@ -30,9 +30,9 @@ type createPersonalSpaceCache struct { } // NewCreatePersonalSpaceCache creates a new CreatePersonalSpaceCache -func NewCreatePersonalSpaceCache(store string, nodes []string, database, table string, ttl time.Duration) CreatePersonalSpaceCache { +func NewCreatePersonalSpaceCache(store string, nodes []string, database, table string, ttl time.Duration, size int) CreatePersonalSpaceCache { c := &createPersonalSpaceCache{} - c.s = getStore(store, nodes, database, table, ttl) + c.s = getStore(store, nodes, database, table, ttl, size) c.database = database c.table = table c.ttl = ttl diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/filemetadata.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/filemetadata.go index 1dc6866a7a..58be251881 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/filemetadata.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/filemetadata.go @@ -28,9 +28,9 @@ type fileMetadataCache struct { } // NewFileMetadataCache creates a new FileMetadataCache -func NewFileMetadataCache(store string, nodes []string, database, table string, ttl time.Duration) FileMetadataCache { +func NewFileMetadataCache(store string, nodes []string, database, table string, ttl time.Duration, size int) FileMetadataCache { c := &fileMetadataCache{} - c.s = getStore(store, nodes, database, table, ttl) + c.s = getStore(store, nodes, database, table, ttl, size) c.database = database c.table = table c.ttl = ttl diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/provider.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/provider.go index 123ef7958d..f7ad3e1d06 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/provider.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/provider.go @@ -32,9 +32,9 @@ type providerCache struct { } // NewProviderCache creates a new ProviderCache -func NewProviderCache(store string, nodes []string, database, table string, ttl time.Duration) ProviderCache { +func NewProviderCache(store string, nodes []string, database, table string, ttl time.Duration, size int) ProviderCache { c := &providerCache{} - c.s = getStore(store, nodes, database, table, ttl) + c.s = getStore(store, nodes, database, table, ttl, size) c.database = database c.table = table c.ttl = ttl diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/stat.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/stat.go index c84ff4e864..414fe77f39 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/stat.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/cache/stat.go @@ -27,9 +27,9 @@ import ( ) // NewStatCache creates a new StatCache -func NewStatCache(store string, nodes []string, database, table string, ttl time.Duration) StatCache { +func NewStatCache(store string, nodes []string, database, table string, ttl time.Duration, size int) StatCache { c := statCache{} - c.s = getStore(store, nodes, database, table, ttl) + c.s = getStore(store, nodes, database, table, ttl, size) c.database = database c.table = table c.ttl = ttl diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go index 40b2f15a57..012b7af852 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -160,7 +160,7 @@ func New(o *options.Options, lu *lookup.Lookup, p Permissions, tp Tree, es event p: p, chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), stream: es, - cache: cache.GetStatCache(o.StatCache.CacheStore, o.StatCache.CacheNodes, o.StatCache.CacheDatabase, "stat", 0), + cache: cache.GetStatCache(o.StatCache.Store, o.StatCache.Nodes, o.StatCache.Database, "stat", time.Duration(o.StatCache.TTL)*time.Second, o.StatCache.Size), } if o.AsyncFileUploads { diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go index e180408211..b414ede94e 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go @@ -27,7 +27,6 @@ import ( "time" "github.com/cs3org/reva/v2/pkg/storage/cache" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/pkg/xattr" "github.com/rogpeppe/go-internal/lockedfile" "github.com/shamaton/msgpack/v2" @@ -46,10 +45,10 @@ type readWriteCloseSeekTruncater interface { } // NewMessagePackBackend returns a new MessagePackBackend instance -func NewMessagePackBackend(rootPath string, o options.CacheOptions) MessagePackBackend { +func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend { return MessagePackBackend{ rootPath: filepath.Clean(rootPath), - metaCache: cache.GetFileMetadataCache(o.CacheStore, o.CacheNodes, o.CacheDatabase, "filemetadata", 24*time.Hour), + metaCache: cache.GetFileMetadataCache(o.Store, o.Nodes, o.Database, "filemetadata", time.Duration(o.TTL)*time.Second, o.Size), } } diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go index 9c68a28d5e..06f3ae9e6b 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go @@ -25,9 +25,9 @@ import ( "path/filepath" "strings" + "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" ) // Migration0003 migrates the file metadata to the current backend. @@ -44,7 +44,7 @@ func (m *Migrator) Migration0003() (Result, error) { m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating to messagepack metadata backend...") xattrs := metadata.XattrsBackend{} - mpk := metadata.NewMessagePackBackend(m.lu.InternalRoot(), options.CacheOptions{}) + mpk := metadata.NewMessagePackBackend(m.lu.InternalRoot(), cache.Config{}) spaces, _ := filepath.Glob(filepath.Join(m.lu.InternalRoot(), "spaces", "*", "*")) for _, space := range spaces { diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options/options.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options/options.go index bb4c8342e3..971f682f53 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options/options.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options/options.go @@ -24,6 +24,7 @@ import ( "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/v2/pkg/sharedconf" + "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -63,8 +64,8 @@ type Options struct { Tokens TokenOptions `mapstructure:"tokens"` - StatCache CacheOptions `mapstructure:"statcache"` - FileMetadataCache CacheOptions `mapstructure:"filemetadatacache"` + StatCache cache.Config `mapstructure:"statcache"` + FileMetadataCache cache.Config `mapstructure:"filemetadatacache"` MaxAcquireLockCycles int `mapstructure:"max_acquire_lock_cycles"` LockCycleDurationFactor int `mapstructure:"lock_cycle_duration_factor"` @@ -90,13 +91,6 @@ type TokenOptions struct { TransferExpires int64 `mapstructure:"transfer_expires"` } -// CacheOptions contains options of configuring a cache -type CacheOptions struct { - CacheStore string `mapstructure:"cache_store"` - CacheNodes []string `mapstructure:"cache_nodes"` - CacheDatabase string `mapstructure:"cache_database"` -} - // New returns a new Options instance for the given configuration func New(m map[string]interface{}) (*Options, error) { o := &Options{} diff --git a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaces.go b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaces.go index e3cd4d59f0..96a8841324 100644 --- a/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaces.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaces.go @@ -246,6 +246,9 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide case provider.ListStorageSpacesRequest_Filter_TYPE_USER: // TODO: refactor this to GetUserId() in cs3 requestedUserID = filter[i].GetUser().GetOpaqueId() + case provider.ListStorageSpacesRequest_Filter_TYPE_OWNER: + // TODO: improve further by not evaluating shares + requestedUserID = filter[i].GetOwner().GetOpaqueId() } } if len(spaceTypes) == 0 { diff --git a/ocis-pkg/store/etcd/etcd.go b/vendor/github.com/cs3org/reva/v2/pkg/store/etcd/etcd.go similarity index 91% rename from ocis-pkg/store/etcd/etcd.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/etcd/etcd.go index d5b7c70809..fb75a7e7ae 100644 --- a/ocis-pkg/store/etcd/etcd.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/store/etcd/etcd.go @@ -16,19 +16,20 @@ const ( suffixNS = ".suffix" ) -type EtcdStore struct { +// Store is a store implementation which uses etcd to store the data +type Store struct { options store.Options client *clientv3.Client } -// Create a new go-micro store backed by etcd -func NewEtcdStore(opts ...store.Option) store.Store { - es := &EtcdStore{} +// NewStore creates a new go-micro store backed by etcd +func NewStore(opts ...store.Option) store.Store { + es := &Store{} _ = es.Init(opts...) return es } -func (es *EtcdStore) getCtx() (context.Context, context.CancelFunc) { +func (es *Store) getCtx() (context.Context, context.CancelFunc) { currentCtx := es.options.Context if currentCtx == nil { currentCtx = context.TODO() @@ -42,7 +43,7 @@ func (es *EtcdStore) getCtx() (context.Context, context.CancelFunc) { // Currently, only the etcd nodes are configurable. If no node is provided, // it will use the "127.0.0.1:2379" node. // Context timeout is setup to 10 seconds, and dial timeout to 2 seconds -func (es *EtcdStore) setupClient() { +func (es *Store) setupClient() { if es.client != nil { es.client.Close() } @@ -60,10 +61,10 @@ func (es *EtcdStore) setupClient() { es.client = cli } -// Initialize the go-micro store implementation. +// Init initializes the go-micro store implementation. // Currently, only the nodes are configurable, the rest of the options // will be ignored. -func (es *EtcdStore) Init(opts ...store.Option) error { +func (es *Store) Init(opts ...store.Option) error { optList := store.Options{} for _, opt := range opts { opt(&optList) @@ -74,8 +75,8 @@ func (es *EtcdStore) Init(opts ...store.Option) error { return nil } -// Get the store options -func (es *EtcdStore) Options() store.Options { +// Options returns the store options +func (es *Store) Options() store.Options { return es.options } @@ -124,7 +125,7 @@ func getEffectiveTTL(r *store.Record, opts store.WriteOptions) int64 { // // It's recommended to use a minimum TTL of 10 secs or higher (or not to use // TTL) in order to prevent problematic scenarios. -func (es *EtcdStore) Write(r *store.Record, opts ...store.WriteOption) error { +func (es *Store) Write(r *store.Record, opts ...store.WriteOption) error { wopts := store.WriteOptions{} for _, opt := range opts { opt(&wopts) @@ -208,7 +209,7 @@ func processListResponse(resp *clientv3.GetResponse, offset int64, reverse bool) } // Perform an exact key read and return the result -func (es *EtcdStore) directRead(kv clientv3.KV, key string) ([]*store.Record, error) { +func (es *Store) directRead(kv clientv3.KV, key string) ([]*store.Record, error) { ctx, cancel := es.getCtx() resp, err := kv.Get(ctx, key) cancel() @@ -226,7 +227,7 @@ func (es *EtcdStore) directRead(kv clientv3.KV, key string) ([]*store.Record, er // Perform a prefix read with limit and offset. A limit of 0 will return all // results. Usage of offset isn't recommended because those results must still // be fethed from the server in order to be discarded. -func (es *EtcdStore) prefixRead(kv clientv3.KV, key string, limit, offset int64) ([]*store.Record, error) { +func (es *Store) prefixRead(kv clientv3.KV, key string, limit, offset int64) ([]*store.Record, error) { getOptions := []clientv3.OpOption{ clientv3.WithPrefix(), } @@ -250,7 +251,7 @@ func (es *EtcdStore) prefixRead(kv clientv3.KV, key string, limit, offset int64) // we'll find all the results we need within that range, and we'll likely // need to request more data from the server. The number of requests we need // to perform is unknown and might cause load. -func (es *EtcdStore) prefixSuffixRead(kv clientv3.KV, prefix, suffix string, limit, offset int64) ([]*store.Record, error) { +func (es *Store) prefixSuffixRead(kv clientv3.KV, prefix, suffix string, limit, offset int64) ([]*store.Record, error) { firstKeyOut := firstKeyOutOfPrefixString(prefix) getOptions := []clientv3.OpOption{ clientv3.WithRange(firstKeyOut), @@ -327,7 +328,7 @@ func (es *EtcdStore) prefixSuffixRead(kv clientv3.KV, prefix, suffix string, lim // Don't rely on any particular order of the keys. The records are expected to // be sorted by key except if the suffix option (suffix without prefix) is // used. In this case, the keys will be sorted based on the reversed key -func (es *EtcdStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) { +func (es *Store) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) { ropts := store.ReadOptions{} for _, opt := range opts { opt(&ropts) @@ -362,7 +363,7 @@ func (es *EtcdStore) Read(key string, opts ...store.ReadOption) ([]*store.Record // Since the Write method inserts 2 entries for a given key, those both // entries will also be removed using the same key. This is handled // transparently. -func (es *EtcdStore) Delete(key string, opts ...store.DeleteOption) error { +func (es *Store) Delete(key string, opts ...store.DeleteOption) error { dopts := store.DeleteOptions{} for _, opt := range opts { opt(&dopts) @@ -391,7 +392,7 @@ func (es *EtcdStore) Delete(key string, opts ...store.DeleteOption) error { // // Note that values for the keys won't be requested to the etcd server, that's // why the reverse option is important -func (es *EtcdStore) listKeys(kv clientv3.KV, prefixKey string, limit, offset int64, reverse bool) ([]string, error) { +func (es *Store) listKeys(kv clientv3.KV, prefixKey string, limit, offset int64, reverse bool) ([]string, error) { getOptions := []clientv3.OpOption{ clientv3.WithKeysOnly(), clientv3.WithPrefix(), @@ -415,7 +416,7 @@ func (es *EtcdStore) listKeys(kv clientv3.KV, prefixKey string, limit, offset in // the suffix manually on our side, which means we'll likely need to perform // additional requests to the etcd server to get more results matching all the // requirements. -func (es *EtcdStore) prefixSuffixList(kv clientv3.KV, prefix, suffix string, limit, offset int64) ([]string, error) { +func (es *Store) prefixSuffixList(kv clientv3.KV, prefix, suffix string, limit, offset int64) ([]string, error) { firstKeyOut := firstKeyOutOfPrefixString(prefix) getOptions := []clientv3.OpOption{ clientv3.WithKeysOnly(), @@ -492,7 +493,7 @@ func (es *EtcdStore) prefixSuffixList(kv clientv3.KV, prefix, suffix string, lim // just the suffix option is fine. // In addition, using the offset option is also discouraged because we'll // need to request additional keys that will be skipped on our side. -func (es *EtcdStore) List(opts ...store.ListOption) ([]string, error) { +func (es *Store) List(opts ...store.ListOption) ([]string, error) { lopts := store.ListOptions{} for _, opt := range opts { opt(&lopts) @@ -521,11 +522,11 @@ func (es *EtcdStore) List(opts ...store.ListOption) ([]string, error) { } // Close the client -func (es *EtcdStore) Close() error { +func (es *Store) Close() error { return es.client.Close() } // Return the service name -func (es *EtcdStore) String() string { +func (es *Store) String() string { return "Etcd" } diff --git a/ocis-pkg/store/etcd/utils.go b/vendor/github.com/cs3org/reva/v2/pkg/store/etcd/utils.go similarity index 100% rename from ocis-pkg/store/etcd/utils.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/etcd/utils.go diff --git a/ocis-pkg/store/memory/memstore.go b/vendor/github.com/cs3org/reva/v2/pkg/store/memory/memstore.go similarity index 94% rename from ocis-pkg/store/memory/memstore.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/memory/memstore.go index bf8e1d6b95..c556cf9f65 100644 --- a/ocis-pkg/store/memory/memstore.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/store/memory/memstore.go @@ -11,8 +11,8 @@ import ( "go-micro.dev/v4/store" ) -// In-memory store implementation using radix tree for fast prefix and suffix -// searches. +// MemStore is a in-memory store implementation using radix tree for fast +// prefix and suffix searches. // Insertions are expected to be a bit slow due to the data structures, but // searches are expected to be fast, including exact key search, as well as // prefix and suffix searches (based on the number of elements to be returned). @@ -53,20 +53,22 @@ type contextKey string var targetContextKey contextKey -// Prepare a context to be used with the memory implementation. The context -// is used to set up custom parameters to the specific implementation. +// NewContext prepares a context to be used with the memory implementation. +// The context is used to set up custom parameters to the specific implementation. // In this case, you can configure the maximum capacity for the MemStore // implementation as shown below. // ``` // cache := NewMemStore( -// store.WithContext( -// NewContext( -// ctx, -// map[string]interface{}{ -// "maxCap": 50, -// }, -// ), -// ), +// +// store.WithContext( +// NewContext( +// ctx, +// map[string]interface{}{ +// "maxCap": 50, +// }, +// ), +// ), +// // ) // ``` // @@ -80,7 +82,7 @@ func NewContext(ctx context.Context, storeParams map[string]interface{}) context return context.WithValue(ctx, targetContextKey, storeParams) } -// Create a new MemStore instance +// NewMemStore creates a new MemStore instance func NewMemStore(opts ...store.Option) store.Store { m := &MemStore{} _ = m.Init(opts...) @@ -110,7 +112,7 @@ func (m *MemStore) getMaxCap() int { return maxCap } -// Initialize the MemStore. If the MemStore was used, this will reset +// Init initializes the MemStore. If the MemStore was used, this will reset // all the internal structures and the new options (passed as parameters) // will be used. func (m *MemStore) Init(opts ...store.Option) error { @@ -130,7 +132,7 @@ func (m *MemStore) Init(opts ...store.Option) error { return nil } -// Get the options being used +// Options returns the options being used func (m *MemStore) Options() store.Options { m.lockGlob.RLock() defer m.lockGlob.RUnlock() @@ -288,7 +290,7 @@ func (m *MemStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, return records, nil } -// Remove the record based on the key. It won't return any error if it's missing +// Delete removes the record based on the key. It won't return any error if it's missing // // Database and Table options aren't supported func (m *MemStore) Delete(key string, opts ...store.DeleteOption) error { @@ -307,7 +309,7 @@ func (m *MemStore) Delete(key string, opts ...store.DeleteOption) error { // List the keys currently used in the MemStore // -// All options are supported except Database and Table +// # All options are supported except Database and Table // // For prefix and prefix+suffix options, the keys will be returned in // alphabetical order. @@ -373,19 +375,22 @@ func (m *MemStore) List(opts ...store.ListOption) ([]string, error) { return records, nil } +// Close closes the store func (m *MemStore) Close() error { return nil } +// String returns the name of the store implementation func (m *MemStore) String() string { return "RadixMemStore" } +// Len returns the number of items in the store func (m *MemStore) Len() (int, bool) { eLen := m.evictionList.Len() pLen := m.preRadix.Len() sLen := m.sufRadix.Len() - if eLen == pLen && eLen == sLen { + if eLen == pLen && pLen == sLen { return eLen, true } return 0, false diff --git a/ocis-pkg/store/memory/multimemstore.go b/vendor/github.com/cs3org/reva/v2/pkg/store/memory/multimemstore.go similarity index 90% rename from ocis-pkg/store/memory/multimemstore.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/memory/multimemstore.go index 89756c994c..af65c58934 100644 --- a/ocis-pkg/store/memory/multimemstore.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/store/memory/multimemstore.go @@ -6,8 +6,8 @@ import ( "go-micro.dev/v4/store" ) -// In-memory store implementation using multiple MemStore to provide support -// for multiple databases and tables. +// MultiMemStore is a in-memory store implementation using multiple MemStore +// to provide support for multiple databases and tables. // Each table will be mapped to its own MemStore, which will be completely // isolated from the rest. In particular, each MemStore will have its own // capacity, so it's possible to have 10 MemStores with full capacity (512 @@ -24,7 +24,7 @@ type MultiMemStore struct { genOpts []store.Option } -// Create a new MultiMemStore. A new MemStore will be mapped based on the options. +// NewMultiMemStore creates a new MultiMemStore. A new MemStore will be mapped based on the options. // A default MemStore will be mapped if no Database and Table aren't used. func NewMultiMemStore(opts ...store.Option) store.Store { m := &MultiMemStore{ @@ -55,7 +55,7 @@ func (m *MultiMemStore) getMemStore(prefix string) *MemStore { return newStore } -// Initialize the mapped MemStore based on the Database and Table values +// Init initializes the mapped MemStore based on the Database and Table values // from the options with the same options. The target MemStore will be // reinitialized if needed. func (m *MultiMemStore) Init(opts ...store.Option) error { @@ -70,7 +70,7 @@ func (m *MultiMemStore) Init(opts ...store.Option) error { return mStore.Init(opts...) } -// Get the options used to create the MultiMemStore. +// Options returns the options used to create the MultiMemStore. // Specific options for each MemStore aren't available func (m *MultiMemStore) Options() store.Options { optList := store.Options{} @@ -149,10 +149,12 @@ func (m *MultiMemStore) List(opts ...store.ListOption) ([]string, error) { return mStore.List(opts...) } +// Close closes the store func (m *MultiMemStore) Close() error { return nil } +// String returns the name of the store implementation func (m *MultiMemStore) String() string { return "MultiRadixMemStore" } diff --git a/ocis-pkg/store/memory/utils.go b/vendor/github.com/cs3org/reva/v2/pkg/store/memory/utils.go similarity index 100% rename from ocis-pkg/store/memory/utils.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/memory/utils.go diff --git a/ocis-pkg/store/options.go b/vendor/github.com/cs3org/reva/v2/pkg/store/options.go similarity index 67% rename from ocis-pkg/store/options.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/options.go index 9ad63e8cba..5866d6f09a 100644 --- a/ocis-pkg/store/options.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/store/options.go @@ -1,3 +1,21 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + package store import ( diff --git a/ocis-pkg/store/cache.go b/vendor/github.com/cs3org/reva/v2/pkg/store/store.go similarity index 55% rename from ocis-pkg/store/cache.go rename to vendor/github.com/cs3org/reva/v2/pkg/store/store.go index dab56376df..79a84a065b 100644 --- a/ocis-pkg/store/cache.go +++ b/vendor/github.com/cs3org/reva/v2/pkg/store/store.go @@ -1,3 +1,21 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + package store import ( @@ -5,37 +23,38 @@ import ( "strings" "time" + "github.com/cs3org/reva/v2/pkg/store/etcd" + "github.com/cs3org/reva/v2/pkg/store/memory" natsjs "github.com/go-micro/plugins/v4/store/nats-js" "github.com/go-micro/plugins/v4/store/redis" redisopts "github.com/go-redis/redis/v8" "github.com/nats-io/nats.go" - "github.com/owncloud/ocis/v2/ocis-pkg/store/etcd" - "github.com/owncloud/ocis/v2/ocis-pkg/store/memory" "go-micro.dev/v4/logger" - "go-micro.dev/v4/store" + microstore "go-micro.dev/v4/store" ) -var ocMemStore *store.Store +var ocMemStore *microstore.Store const ( - TypeMemory = "memory" - TypeNoop = "noop" - TypeEtcd = "etcd" - TypeRedis = "redis" + // TypeMemory represents memory stores + TypeMemory = "memory" + // TypeNoop represents noop stores + TypeNoop = "noop" + // TypeEtcd represents etcd stores + TypeEtcd = "etcd" + // TypeRedis represents redis stores + TypeRedis = "redis" + // TypeRedisSentinel represents redis-sentinel stores TypeRedisSentinel = "redis-sentinel" - TypeOCMem = "ocmem" - TypeNatsJS = "nats-js" + // TypeOCMem represents ocmem stores + TypeOCMem = "ocmem" + // TypeNatsJS represents nats-js stores + TypeNatsJS = "nats-js" ) -// Create returns a configured key-value micro store -// -// Each microservice (or whatever piece is using the store) should use the -// options available in the interface's operations to choose the right database -// and table to prevent collisions with other microservices. -// Recommended approach is to use "services" or "ocis-pkg" for the database, -// and "services//" or "ocis-pkg//" for the package name. -func Create(opts ...store.Option) store.Store { - options := &store.Options{ +// Create initializes a new store +func Create(opts ...microstore.Option) microstore.Store { + options := µstore.Options{ Context: context.Background(), } for _, o := range opts { @@ -46,9 +65,9 @@ func Create(opts ...store.Option) store.Store { switch storeType { case TypeNoop: - return store.NewNoopStore(opts...) + return microstore.NewNoopStore(opts...) case TypeEtcd: - return etcd.NewEtcdStore(opts...) + return etcd.NewStore(opts...) case TypeRedis: // FIXME redis plugin does not support redis cluster or ring -> needs upstream patch or our implementation return redis.NewStore(opts...) @@ -67,23 +86,23 @@ func Create(opts ...store.Option) store.Store { } } return redis.NewStore( - store.Database(options.Database), - store.Table(options.Table), - store.Nodes(redisNodes...), + microstore.Database(options.Database), + microstore.Table(options.Table), + microstore.Nodes(redisNodes...), redis.WithRedisOptions(redisopts.UniversalOptions{ MasterName: redisMaster, }), ) case TypeOCMem: if ocMemStore == nil { - var memStore store.Store + var memStore microstore.Store sizeNum, _ := options.Context.Value(sizeContextKey{}).(int) if sizeNum <= 0 { memStore = memory.NewMultiMemStore() } else { memStore = memory.NewMultiMemStore( - store.WithContext( + microstore.WithContext( memory.NewContext( context.Background(), map[string]interface{}{ @@ -106,13 +125,13 @@ func Create(opts ...store.Option) store.Store { natsjs.DefaultTTL(ttl))..., ) // TODO test with ocis nats case TypeMemory, "mem", "": // allow existing short form and use as default - return store.NewMemoryStore(opts...) + return microstore.NewMemoryStore(opts...) default: // try to log an error if options.Logger == nil { options.Logger = logger.DefaultLogger } options.Logger.Logf(logger.ErrorLevel, "unknown store type: '%s', falling back to memory", storeType) - return store.NewMemoryStore(opts...) + return microstore.NewMemoryStore(opts...) } } diff --git a/vendor/github.com/go-micro/plugins/v4/store/nats-js/context.go b/vendor/github.com/go-micro/plugins/v4/store/nats-js/context.go index 07965c5118..e2eec2a6f8 100644 --- a/vendor/github.com/go-micro/plugins/v4/store/nats-js/context.go +++ b/vendor/github.com/go-micro/plugins/v4/store/nats-js/context.go @@ -6,7 +6,7 @@ import ( "go-micro.dev/v4/store" ) -// setStoreOption returns a function to setup a context with given value. +// setStoreOption returns a function to setup a context with given value func setStoreOption(k, v interface{}) store.Option { return func(o *store.Options) { if o.Context == nil { diff --git a/vendor/github.com/go-micro/plugins/v4/store/nats-js/nats.go b/vendor/github.com/go-micro/plugins/v4/store/nats-js/nats.go index 4cd056821d..907c6c0e62 100644 --- a/vendor/github.com/go-micro/plugins/v4/store/nats-js/nats.go +++ b/vendor/github.com/go-micro/plugins/v4/store/nats-js/nats.go @@ -41,7 +41,7 @@ func init() { cmd.DefaultStores["natsjs"] = NewStore } -// NewStore will create a new NATS JetStream Object Store. +// NewStore will create a new NATS JetStream Object Store func NewStore(opts ...store.Option) store.Store { options := store.Options{ Nodes: []string{}, @@ -64,9 +64,7 @@ func NewStore(opts ...store.Option) store.Store { return n } -// Init initializes the store. It must perform any required setup on the -// backing storage implementation and check that it is ready for use, -// returning any errors. +// Init initialises the store. It must perform any required setup on the backing storage implementation and check that it is ready for use, returning any errors. func (n *natsStore) Init(opts ...store.Option) error { n.setOption(opts...) @@ -401,7 +399,7 @@ func (n *natsStore) List(opts ...store.ListOption) ([]string, error) { return keys, nil } -// Close the store. +// Close the store func (n *natsStore) Close() error { n.conn.Close() return nil diff --git a/vendor/github.com/go-micro/plugins/v4/store/nats-js/options.go b/vendor/github.com/go-micro/plugins/v4/store/nats-js/options.go index 21719e2ab6..53a31b110a 100644 --- a/vendor/github.com/go-micro/plugins/v4/store/nats-js/options.go +++ b/vendor/github.com/go-micro/plugins/v4/store/nats-js/options.go @@ -7,7 +7,7 @@ import ( "go-micro.dev/v4/store" ) -// store.Option. +// store.Option type natsOptionsKey struct{} type jsOptionsKey struct{} type objOptionsKey struct{} @@ -15,15 +15,15 @@ type ttlOptionsKey struct{} type memoryOptionsKey struct{} type descriptionOptionsKey struct{} -// store.DeleteOption. +// store.DeleteOption type delBucketOptionsKey struct{} -// NatsOptions accepts nats.Options. +// NatsOptions accepts nats.Options func NatsOptions(opts nats.Options) store.Option { return setStoreOption(natsOptionsKey{}, opts) } -// JetStreamOptions accepts multiple nats.JSOpt. +// JetStreamOptions accepts multiple nats.JSOpt func JetStreamOptions(opts ...nats.JSOpt) store.Option { return setStoreOption(jsOptionsKey{}, opts) } @@ -35,42 +35,34 @@ func ObjectStoreOptions(cfg ...*nats.ObjectStoreConfig) store.Option { } // DefaultTTL sets the default TTL to use for new buckets -// -// By default no TTL is set. +// By default no TTL is set. // // TTL ON INDIVIDUAL WRITE CALLS IS NOT SUPPORTED, only bucket wide TTL. // Either set a default TTL with this option or provide bucket specific options -// -// with ObjectStoreOptions +// with ObjectStoreOptions func DefaultTTL(ttl time.Duration) store.Option { return setStoreOption(ttlOptionsKey{}, ttl) } // DefaultMemory sets the default storage type to memory only. // -// The default is file storage, persisting storage between service restarts. -// +// The default is file storage, persisting storage between service restarts. // Be aware that the default storage location of NATS the /tmp dir is, and thus -// -// won't persist reboots. +// won't persist reboots. func DefaultMemory() store.Option { return setStoreOption(memoryOptionsKey{}, nats.MemoryStorage) } // DefaultDescription sets the default description to use when creating new -// -// buckets. The default is "Store managed by go-micro" +// buckets. The default is "Store managed by go-micro" func DefaultDescription(text string) store.Option { return setStoreOption(descriptionOptionsKey{}, text) } // DeleteBucket will use the key passed to Delete as a bucket (database) name, -// -// and delete the bucket. -// +// and delete the bucket. // This option should not be combined with the store.DeleteFrom option, as -// -// that will overwrite the delete action. +// that will overwrite the delete action. func DeleteBucket() store.DeleteOption { return func(d *store.DeleteOptions) { d.Table = "DELETE_BUCKET" diff --git a/vendor/modules.txt b/vendor/modules.txt index 15c4ddedf4..5697b21b8b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -349,7 +349,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1 github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1 github.com/cs3org/go-cs3apis/cs3/tx/v1beta1 github.com/cs3org/go-cs3apis/cs3/types/v1beta1 -# github.com/cs3org/reva/v2 v2.12.1-0.20230420073005-11edad1f09fe +# github.com/cs3org/reva/v2 v2.12.1-0.20230424091007-8d8b567179b1 ## explicit; go 1.19 github.com/cs3org/reva/v2/cmd/revad/internal/grace github.com/cs3org/reva/v2/cmd/revad/runtime @@ -576,16 +576,13 @@ github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/registry github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/spaces github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus +github.com/cs3org/reva/v2/pkg/rhttp/datatx/metrics github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download github.com/cs3org/reva/v2/pkg/rhttp/global github.com/cs3org/reva/v2/pkg/rhttp/router github.com/cs3org/reva/v2/pkg/sdk/common github.com/cs3org/reva/v2/pkg/share github.com/cs3org/reva/v2/pkg/share/cache -github.com/cs3org/reva/v2/pkg/share/cache/loader -github.com/cs3org/reva/v2/pkg/share/cache/memory -github.com/cs3org/reva/v2/pkg/share/cache/redis -github.com/cs3org/reva/v2/pkg/share/cache/registry github.com/cs3org/reva/v2/pkg/share/cache/warmup/cbox github.com/cs3org/reva/v2/pkg/share/cache/warmup/loader github.com/cs3org/reva/v2/pkg/share/cache/warmup/registry @@ -674,6 +671,9 @@ github.com/cs3org/reva/v2/pkg/storage/utils/sync github.com/cs3org/reva/v2/pkg/storage/utils/templates github.com/cs3org/reva/v2/pkg/storage/utils/walker github.com/cs3org/reva/v2/pkg/storagespace +github.com/cs3org/reva/v2/pkg/store +github.com/cs3org/reva/v2/pkg/store/etcd +github.com/cs3org/reva/v2/pkg/store/memory github.com/cs3org/reva/v2/pkg/sysinfo github.com/cs3org/reva/v2/pkg/tags github.com/cs3org/reva/v2/pkg/token @@ -906,7 +906,7 @@ github.com/go-micro/plugins/v4/server/grpc # github.com/go-micro/plugins/v4/server/http v1.2.1 ## explicit; go 1.17 github.com/go-micro/plugins/v4/server/http -# github.com/go-micro/plugins/v4/store/nats-js v1.2.0 +# github.com/go-micro/plugins/v4/store/nats-js v1.1.0 ## explicit; go 1.17 github.com/go-micro/plugins/v4/store/nats-js # github.com/go-micro/plugins/v4/store/redis v1.2.0