Update reva and vendored libs.

This commit is contained in:
Daniël Franke
2023-07-03 10:39:31 +02:00
parent 826d8f8795
commit c279773ac9
46 changed files with 6726 additions and 333 deletions

4
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.6.0
github.com/cs3org/go-cs3apis v0.0.0-20230516150832-730ac860c71d
github.com/cs3org/reva/v2 v2.14.1-0.20230629081848-5e7f1bf5c21d
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
github.com/disintegration/imaging v1.6.2
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/egirna/icap-client v0.1.1
@@ -335,5 +335,3 @@ require (
)
replace github.com/cs3org/go-cs3apis => github.com/2403905/go-cs3apis v0.0.0-20230517122726-727045414fd1
replace github.com/cs3org/reva/v2 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108

2
go.sum
View File

@@ -626,6 +626,8 @@ github.com/crewjam/saml v0.4.13 h1:TYHggH/hwP7eArqiXSJUvtOPNzQDyQ7vwmwEqlFWhMc=
github.com/crewjam/saml v0.4.13/go.mod h1:igEejV+fihTIlHXYP8zOec3V5A8y3lws5bQBFsTm4gA=
github.com/cs3org/reva/v2 v2.14.1-0.20230629081848-5e7f1bf5c21d h1:D/B1j72MC/IP0DIMOJdmt7rCBKVps2Xob1MSLGHZQ1A=
github.com/cs3org/reva/v2 v2.14.1-0.20230629081848-5e7f1bf5c21d/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806 h1:3fPvPnnZib/cMA4f0GXJvX7lhQs7O31ZmDuSDHxQnVk=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8 h1:Z9lwXumT5ACSmJ7WGnFl+OMLLjpz5uR2fyz7dC255FI=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8/go.mod h1:4abs/jPXcmJzYoYGF91JF9Uq9s/KL5n1jvFDix8KcqY=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=

View File

@@ -24,7 +24,7 @@ import (
"github.com/cs3org/reva/v2/pkg/appctx"
"github.com/rs/zerolog"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
)

View File

@@ -38,7 +38,7 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"

View File

@@ -45,7 +45,7 @@ func (s *svc) CreatePublicShare(ctx context.Context, req *link.CreatePublicShare
}
if res.GetShare() != nil {
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
}
return res, nil
}
@@ -63,7 +63,7 @@ func (s *svc) RemovePublicShare(ctx context.Context, req *link.RemovePublicShare
return nil, err
}
// TODO: How to find out the resourceId? -> get public share first, then delete
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), nil)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), nil)
return res, nil
}
@@ -142,7 +142,7 @@ func (s *svc) UpdatePublicShare(ctx context.Context, req *link.UpdatePublicShare
return nil, errors.Wrap(err, "error updating share")
}
if res.GetShare() != nil {
s.statCache.RemoveStat(
s.statCache.RemoveStatContext(ctx,
&userprovider.UserId{
OpaqueId: res.Share.Owner.GetOpaqueId(),
},

View File

@@ -326,7 +326,7 @@ func (s *svc) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorag
if res.Status.Code == rpc.Code_CODE_OK {
id := res.StorageSpace.Root
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.providerCache.RemoveListStorageProviders(id)
}
return res, nil
@@ -363,7 +363,7 @@ func (s *svc) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorag
}
id := &provider.ResourceId{OpaqueId: req.Id.OpaqueId}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), id)
s.providerCache.RemoveListStorageProviders(id)
if dsRes.Status.Code != rpc.Code_CODE_OK {
@@ -608,7 +608,7 @@ func (s *svc) InitiateFileUpload(ctx context.Context, req *provider.InitiateFile
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return &gateway.InitiateFileUploadResponse{
Opaque: storageRes.Opaque,
Status: storageRes.Status,
@@ -645,7 +645,7 @@ func (s *svc) CreateContainer(ctx context.Context, req *provider.CreateContainer
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -688,7 +688,7 @@ func (s *svc) Delete(ctx context.Context, req *provider.DeleteRequest) (*provide
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -715,8 +715,8 @@ func (s *svc) Move(ctx context.Context, req *provider.MoveRequest) (*provider.Mo
req.Source = sref
req.Destination = dref
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Source.ResourceId)
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Destination.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Source.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Destination.ResourceId)
return c.Move(ctx, req)
}
@@ -739,7 +739,7 @@ func (s *svc) SetArbitraryMetadata(ctx context.Context, req *provider.SetArbitra
return nil, errors.Wrap(err, "gateway: error calling SetArbitraryMetadata")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -761,7 +761,7 @@ func (s *svc) UnsetArbitraryMetadata(ctx context.Context, req *provider.UnsetArb
}
return nil, errors.Wrap(err, "gateway: error calling UnsetArbitraryMetadata")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -785,7 +785,7 @@ func (s *svc) SetLock(ctx context.Context, req *provider.SetLockRequest) (*provi
return nil, errors.Wrap(err, "gateway: error calling SetLock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -826,7 +826,7 @@ func (s *svc) RefreshLock(ctx context.Context, req *provider.RefreshLockRequest)
return nil, errors.Wrap(err, "gateway: error calling RefreshLock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -847,7 +847,7 @@ func (s *svc) Unlock(ctx context.Context, req *provider.UnlockRequest) (*provide
return nil, errors.Wrap(err, "gateway: error calling Unlock")
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -927,7 +927,7 @@ func (s *svc) RestoreFileVersion(ctx context.Context, req *provider.RestoreFileV
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -983,7 +983,7 @@ func (s *svc) RestoreRecycleItem(ctx context.Context, req *provider.RestoreRecyc
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}
@@ -1006,7 +1006,7 @@ func (s *svc) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleReques
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Ref.ResourceId)
return res, nil
}

View File

@@ -130,7 +130,7 @@ func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareReq
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), res.Share.ResourceId)
return res, nil
}
@@ -213,7 +213,7 @@ func (s *svc) UpdateReceivedShare(ctx context.Context, req *collaboration.Update
}, nil
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.Share.Share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.Share.Share.ResourceId)
return c.UpdateReceivedShare(ctx, req)
/*
TODO: Leftover from master merge. Do we need this?
@@ -504,7 +504,7 @@ func (s *svc) addShare(ctx context.Context, req *collaboration.CreateShareReques
switch status.Code {
case rpc.Code_CODE_OK:
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
case rpc.Code_CODE_UNIMPLEMENTED:
appctx.GetLogger(ctx).Debug().Interface("status", status).Interface("req", req).Msg("storing grants not supported, ignoring")
rollBackFn(status)
@@ -548,7 +548,7 @@ func (s *svc) addSpaceShare(ctx context.Context, req *collaboration.CreateShareR
switch st.Code {
case rpc.Code_CODE_OK:
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.ResourceInfo.Id)
s.providerCache.RemoveListStorageProviders(req.ResourceInfo.Id)
case rpc.Code_CODE_UNIMPLEMENTED:
appctx.GetLogger(ctx).Debug().Interface("status", st).Interface("req", req).Msg("storing grants not supported, ignoring")
@@ -618,7 +618,7 @@ func (s *svc) removeShare(ctx context.Context, req *collaboration.RemoveShareReq
}
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), share.ResourceId)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), share.ResourceId)
return res, nil
}
@@ -651,7 +651,7 @@ func (s *svc) removeSpaceShare(ctx context.Context, ref *provider.ResourceId, gr
Status: removeGrantStatus,
}, err
}
s.statCache.RemoveStat(ctxpkg.ContextMustGetUser(ctx).GetId(), ref)
s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), ref)
s.providerCache.RemoveListStorageProviders(ref)
return &collaboration.RemoveShareResponse{Status: status.NewOK(ctx)}, nil
}

View File

@@ -48,7 +48,7 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/rs/zerolog"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/metadata"
)

View File

@@ -36,8 +36,17 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
}
const (
// TokenTransportHeader holds the header key for the reva transfer token
TokenTransportHeader = "X-Reva-Transfer"
@@ -116,6 +125,14 @@ func (s *svc) Unprotected() []string {
func (s *svc) setHandler() {
s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, span := tracer.Start(ctx, "HandlerFunc")
defer span.End()
span.SetAttributes(
semconv.HTTPMethodKey.String(r.Method),
semconv.HTTPURLKey.String(r.URL.String()),
)
r = r.WithContext(ctx)
switch r.Method {
case "HEAD":
addCorsHeader(w)

View File

@@ -54,7 +54,7 @@ import (
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"golang.org/x/sync/errgroup"
"google.golang.org/protobuf/types/known/fieldmaskpb"
)

View File

@@ -40,6 +40,7 @@ import (
"github.com/cs3org/reva/v2/pkg/storagespace"
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/propagation"
)
func sufferMacOSFinder(r *http.Request) bool {
@@ -302,6 +303,7 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(datagateway.TokenTransportHeader, token)
httpRes, err := s.client.Do(httpReq)

View File

@@ -42,6 +42,13 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/rs/zerolog"
tusd "github.com/tus/tusd/pkg/handler"
"go.opentelemetry.io/otel/propagation"
)
// Propagator ensures the importer module uses the same trace propagation strategy.
var Propagator = propagation.NewCompositeTextMapPropagator(
propagation.Baggage{},
propagation.TraceContext{},
)
func (s *svc) handlePathTusPost(w http.ResponseWriter, r *http.Request, ns string) {
@@ -253,6 +260,7 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.
w.WriteHeader(http.StatusInternalServerError)
return
}
Propagator.Inject(ctx, propagation.HeaderCarrier(httpReq.Header))
httpReq.Header.Set(net.HeaderContentType, r.Header.Get(net.HeaderContentType))
httpReq.Header.Set(net.HeaderContentLength, r.Header.Get(net.HeaderContentLength))

View File

@@ -21,6 +21,7 @@
package datatx
import (
"context"
"net/http"
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
@@ -55,5 +56,5 @@ func EmitFileUploadedEvent(spaceOwnerOrManager, executant *userv1beta1.UserId, r
// InvalidateCache is a helper function which invalidates the stat cache
func InvalidateCache(owner *userv1beta1.UserId, ref *provider.Reference, statCache cache.StatCache) {
statCache.RemoveStat(owner, ref.GetResourceId())
statCache.RemoveStatContext(context.TODO(), owner, ref.GetResourceId())
}

View File

@@ -19,6 +19,7 @@
package cache
import (
"context"
"fmt"
"strings"
"sync"
@@ -67,6 +68,7 @@ type Cache interface {
type StatCache interface {
Cache
RemoveStat(userID *userpb.UserId, res *provider.ResourceId)
RemoveStatContext(ctx context.Context, userID *userpb.UserId, res *provider.ResourceId)
GetKey(userID *userpb.UserId, ref *provider.Reference, metaDataKeys, fieldMaskPaths []string) string
}

View File

@@ -19,6 +19,7 @@
package cache
import (
"context"
"strings"
"sync"
"time"
@@ -26,8 +27,18 @@ import (
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
}
// NewStatCache creates a new StatCache
func NewStatCache(store string, nodes []string, database, table string, ttl time.Duration, size int) StatCache {
c := statCache{}
@@ -42,12 +53,21 @@ type statCache struct {
cacheStore
}
// RemoveStat removes a reference from the stat cache
func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
func (c statCache) RemoveStatContext(ctx context.Context, userID *userpb.UserId, res *provider.ResourceId) {
_, span := tracer.Start(ctx, "RemoveStatContext")
defer span.End()
span.SetAttributes(semconv.EnduserIDKey.String(userID.GetOpaqueId()))
uid := "uid:" + userID.GetOpaqueId()
sid := ""
oid := ""
if res != nil {
span.SetAttributes(
attribute.String("space.id", res.SpaceId),
attribute.String("node.id", res.OpaqueId),
)
sid = "sid:" + res.SpaceId
oid = "oid:" + res.OpaqueId
}
@@ -75,6 +95,11 @@ func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
wg.Wait()
}
// RemoveStatContext(ctx, removes a reference from the stat cache
func (c statCache) RemoveStat(userID *userpb.UserId, res *provider.ResourceId) {
c.RemoveStatContext(context.Background(), userID, res)
}
// generates a user specific key pointing to ref - used for statcache
// a key looks like: uid:1234-1233!sid:5678-5677!oid:9923-9934!path:/path/to/source
// as you see it adds "uid:"/"sid:"/"oid:" prefixes to the uuids so they can be differentiated

View File

@@ -37,7 +37,6 @@ import (
rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/cs3org/reva/v2/pkg/appctx"
ctxpkg "github.com/cs3org/reva/v2/pkg/ctx"
"github.com/cs3org/reva/v2/pkg/errtypes"
"github.com/cs3org/reva/v2/pkg/events"
@@ -62,11 +61,16 @@ import (
"github.com/jellydator/ttlcache/v2"
"github.com/pkg/errors"
microstore "go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
// name is the Tracer name used to identify this instrumentation library.
const tracerName = "decomposedfs"
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs")
}
// Tree is used to manage a tree hierarchy
type Tree interface {
@@ -202,7 +206,9 @@ func New(o *options.Options, lu *lookup.Lookup, p Permissions, tp Tree, es event
// Postprocessing starts the postprocessing result collector
func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
ctx := context.TODO()
ctx := context.TODO() // we should pass the trace id in the event and initialize the trace provider here
ctx, span := tracer.Start(ctx, "Postprocessing")
defer span.End()
log := logger.New()
for event := range ch {
switch ev := event.Event.(type) {
@@ -247,7 +253,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read parent")
} else {
// update parent tmtime to propagate etag change
_ = p.SetTMTime(&now)
_ = p.SetTMTime(ctx, &now)
if err := fs.tp.Propagate(ctx, p, 0); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not propagate etag change")
}
@@ -256,7 +262,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
upload.Cleanup(up, failed, keepUpload)
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
if err := events.Publish(
fs.stream,
@@ -331,7 +337,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
continue
}
@@ -369,7 +375,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
continue
}
*/
@@ -390,13 +396,13 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
n = no
}
if err := n.SetScanData(res.Description, res.Scandate); err != nil {
if err := n.SetScanData(ctx, res.Description, res.Scandate); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Interface("resourceID", res.ResourceID).Msg("Failed to set scan results")
continue
}
// remove cache entry in gateway
fs.cache.RemoveStat(ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
default:
log.Error().Interface("event", ev).Msg("Unknown event")
}
@@ -412,6 +418,8 @@ func (fs *Decomposedfs) Shutdown(ctx context.Context) error {
// GetQuota returns the quota available
// TODO Document in the cs3 should we return quota or free space?
func (fs *Decomposedfs) GetQuota(ctx context.Context, ref *provider.Reference) (total uint64, inUse uint64, remaining uint64, err error) {
ctx, span := tracer.Start(ctx, "GetQuota")
defer span.End()
var n *node.Node
if ref == nil {
err = errtypes.BadRequest("no space given")
@@ -487,6 +495,8 @@ func (fs *Decomposedfs) calculateTotalUsedRemaining(quotaStr string, inUse, rema
// CreateHome creates a new home node for the given user
func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) {
ctx, span := tracer.Start(ctx, "CreateHome")
defer span.End()
if fs.o.UserLayout == "" {
return errtypes.NotSupported("Decomposedfs: CreateHome() home supported disabled")
}
@@ -519,6 +529,8 @@ func isAlreadyExists(err error) bool {
// GetHome is called to look up the home path for a user
// It is NOT supposed to return the internal path but the external path
func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "GetHome")
defer span.End()
if fs.o.UserLayout == "" {
return "", errtypes.NotSupported("Decomposedfs: GetHome() home supported disabled")
}
@@ -529,6 +541,8 @@ func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {
// GetPathByID returns the fn pointed by the file id, without the internal namespace
func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) {
ctx, span := tracer.Start(ctx, "GetPathByID")
defer span.End()
n, err := fs.lu.NodeFromID(ctx, id)
if err != nil {
return "", err
@@ -557,6 +571,9 @@ func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId
// CreateDir creates the specified directory
func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "CreateDir")
defer span.End()
name := path.Base(ref.Path)
if name == "" || name == "." || name == "/" {
return errtypes.BadRequest("Invalid path: " + ref.Path)
@@ -617,6 +634,8 @@ func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference)
// TouchFile as defined in the storage.FS interface
func (fs *Decomposedfs) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error {
ctx, span := tracer.Start(ctx, "TouchFile")
defer span.End()
parentRef := &provider.Reference{
ResourceId: ref.ResourceId,
Path: path.Dir(ref.Path),
@@ -669,6 +688,8 @@ func (fs *Decomposedfs) CreateReference(ctx context.Context, p string, targetURI
// Move moves a resource from one reference to another
func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "Move")
defer span.End()
var oldNode, newNode *node.Node
if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil {
return
@@ -703,13 +724,13 @@ func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Refer
switch {
case err != nil:
return err
case oldNode.IsDir() && !rp.CreateContainer:
case oldNode.IsDir(ctx) && !rp.CreateContainer:
f, _ := storagespace.FormatReference(newRef)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
case !oldNode.IsDir() && !rp.InitiateFileUpload:
case !oldNode.IsDir(ctx) && !rp.InitiateFileUpload:
f, _ := storagespace.FormatReference(newRef)
if rp.Stat {
return errtypes.PermissionDenied(f)
@@ -735,6 +756,8 @@ func (fs *Decomposedfs) Move(ctx context.Context, oldRef, newRef *provider.Refer
// GetMD returns the metadata for the specified resource
func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) (ri *provider.ResourceInfo, err error) {
ctx, span := tracer.Start(ctx, "GetMD")
defer span.End()
var node *node.Node
if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
@@ -777,14 +800,13 @@ func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKe
// ListFolder returns a list of resources in the specified folder
func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string, fieldMask []string) ([]*provider.ResourceInfo, error) {
ctx, span := tracer.Start(ctx, "ListFolder")
defer span.End()
n, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return nil, err
}
ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "ListFolder")
defer span.End()
if !n.Exists {
return nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
}
@@ -872,6 +894,8 @@ func (fs *Decomposedfs) ListFolder(ctx context.Context, ref *provider.Reference,
// Delete deletes the specified resource
func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (err error) {
ctx, span := tracer.Start(ctx, "Delete")
defer span.End()
var node *node.Node
if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
@@ -904,6 +928,8 @@ func (fs *Decomposedfs) Delete(ctx context.Context, ref *provider.Reference) (er
// Download returns a reader to the specified resource
func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) {
ctx, span := tracer.Start(ctx, "Download")
defer span.End()
// check if we are trying to download a revision
// TODO the CS3 api should allow initiating a revision download
if ref.ResourceId != nil && strings.Contains(ref.ResourceId.OpaqueId, node.RevisionIDDelimiter) {
@@ -941,6 +967,8 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) (
// GetLock returns an existing lock on the given reference
func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) {
ctx, span := tracer.Start(ctx, "GetLock")
defer span.End()
node, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: error resolving ref")
@@ -968,6 +996,8 @@ func (fs *Decomposedfs) GetLock(ctx context.Context, ref *provider.Reference) (*
// SetLock puts a lock on the given reference
func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "SetLock")
defer span.End()
node, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
@@ -994,6 +1024,8 @@ func (fs *Decomposedfs) SetLock(ctx context.Context, ref *provider.Reference, lo
// RefreshLock refreshes an existing lock on the given reference
func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error {
ctx, span := tracer.Start(ctx, "RefreshLock")
defer span.End()
if lock.LockId == "" {
return errtypes.BadRequest("missing lockid")
}
@@ -1024,6 +1056,8 @@ func (fs *Decomposedfs) RefreshLock(ctx context.Context, ref *provider.Reference
// Unlock removes an existing lock from the given reference
func (fs *Decomposedfs) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "Unlock")
defer span.End()
if lock.LockId == "" {
return errtypes.BadRequest("missing lockid")
}

View File

@@ -135,7 +135,7 @@ func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference)
}
log := appctx.GetLogger(ctx)
var attrs node.Attributes
if attrs, err = grantNode.Xattrs(); err != nil {
if attrs, err = grantNode.Xattrs(ctx); err != nil {
log.Error().Err(err).Msg("error listing attributes")
return nil, err
}
@@ -208,7 +208,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference
attr = prefixes.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId
}
if err = grantNode.RemoveXattr(attr); err != nil {
if err = grantNode.RemoveXattr(ctx, attr); err != nil {
return err
}
@@ -326,7 +326,7 @@ func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provide
// set the grant
e := ace.FromGrant(g)
principal, value := e.Marshal()
if err := n.SetXattr(prefixes.GrantPrefix+principal, value); err != nil {
if err := n.SetXattr(ctx, prefixes.GrantPrefix+principal, value); err != nil {
appctx.GetLogger(ctx).Error().Err(err).
Str("principal", principal).Msg("Could not set grant for principal")
return err

View File

@@ -34,8 +34,16 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
}
// Lookup implements transformations from filepath to node and back
type Lookup struct {
Options *options.Options
@@ -57,8 +65,8 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
}
// ReadBlobSizeAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobSizeAttr(path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(path, prefixes.BlobsizeAttr)
func (lu *Lookup) ReadBlobSizeAttr(ctx context.Context, path string) (int64, error) {
blobSize, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.BlobsizeAttr)
if err != nil {
return 0, errors.Wrapf(err, "error reading blobsize xattr")
}
@@ -66,8 +74,8 @@ func (lu *Lookup) ReadBlobSizeAttr(path string) (int64, error) {
}
// ReadBlobIDAttr reads the blobsize from the xattrs
func (lu *Lookup) ReadBlobIDAttr(path string) (string, error) {
attr, err := lu.metadataBackend.Get(path, prefixes.BlobIDAttr)
func (lu *Lookup) ReadBlobIDAttr(ctx context.Context, path string) (string, error) {
attr, err := lu.metadataBackend.Get(ctx, path, prefixes.BlobIDAttr)
if err != nil {
return "", errors.Wrapf(err, "error reading blobid xattr")
}
@@ -75,9 +83,9 @@ func (lu *Lookup) ReadBlobIDAttr(path string) (string, error) {
}
// TypeFromPath returns the type of the node at the given path
func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.ResourceType {
// Try to read from xattrs
typeAttr, err := lu.metadataBackend.GetInt64(path, prefixes.TypeAttr)
typeAttr, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.TypeAttr)
if err == nil {
return provider.ResourceType(int32(typeAttr))
}
@@ -91,7 +99,7 @@ func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
switch {
case fi.IsDir():
if _, err = lu.metadataBackend.Get(path, prefixes.ReferenceAttr); err == nil {
if _, err = lu.metadataBackend.Get(ctx, path, prefixes.ReferenceAttr); err == nil {
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
} else {
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
@@ -108,6 +116,9 @@ func (lu *Lookup) TypeFromPath(path string) provider.ResourceType {
// NodeFromResource takes in a request path or request id and converts it to a Node
func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) {
ctx, span := tracer.Start(ctx, "NodeFromResource")
defer span.End()
if ref.ResourceId != nil {
// check if a storage space reference is used
// currently, the decomposed fs uses the root node id as the space id
@@ -136,6 +147,8 @@ func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference)
// NodeFromID returns the internal path for the id
func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) {
ctx, span := tracer.Start(ctx, "NodeFromID")
defer span.End()
if id == nil {
return nil, fmt.Errorf("invalid resource id %+v", id)
}
@@ -178,7 +191,7 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.Per
root := n.SpaceRoot
for n.ID != root.ID {
p = filepath.Join(n.Name, p)
if n, err = n.Parent(); err != nil {
if n, err = n.Parent(ctx); err != nil {
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
@@ -207,7 +220,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followRe
}
if followReferences {
if attrBytes, err := r.Xattr(prefixes.ReferenceAttr); err == nil {
if attrBytes, err := r.Xattr(ctx, prefixes.ReferenceAttr); err == nil {
realNodeID := attrBytes
ref, err := refFromCS3(realNodeID)
if err != nil {
@@ -220,7 +233,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followRe
}
}
}
if r.IsSpaceRoot() {
if r.IsSpaceRoot(ctx) {
r.SpaceRoot = r
}
@@ -268,7 +281,7 @@ func refFromCS3(b []byte) (*provider.Reference, error) {
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a shared lock is acquired.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadata(src, target string, filter func(attributeName string) bool) (err error) {
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string) bool) (err error) {
// Acquire a read log on the source node
// write lock existing node before reading treesize or tree time
f, err := lockedfile.Open(lu.MetadataBackend().MetadataPath(src))
@@ -288,14 +301,14 @@ func (lu *Lookup) CopyMetadata(src, target string, filter func(attributeName str
}
}()
return lu.CopyMetadataWithSourceLock(src, target, filter, f)
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, f)
}
// CopyMetadataWithSourceLock copies all extended attributes from source to target.
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a matching lockedfile is required.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filter func(attributeName string) bool, lockedSource *lockedfile.File) (err error) {
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string) bool, lockedSource *lockedfile.File) (err error) {
switch {
case lockedSource == nil:
return errors.New("no lock provided")
@@ -303,7 +316,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filt
return errors.New("lockpath does not match filepath")
}
attrs, err := lu.metadataBackend.AllWithLockedSource(sourcePath, lockedSource)
attrs, err := lu.metadataBackend.AllWithLockedSource(ctx, sourcePath, lockedSource)
if err != nil {
return err
}
@@ -315,7 +328,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(sourcePath, targetPath string, filt
}
}
return lu.MetadataBackend().SetMultiple(targetPath, newAttrs, true)
return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, true)
}
// DetectBackendOnDisk returns the name of the metadata backend being used on disk

View File

@@ -93,7 +93,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.
delete(md.Metadata, node.FavoriteKey)
if u, ok := ctxpkg.ContextGetUser(ctx); ok {
if uid := u.GetId(); uid != nil {
if err := n.SetFavorite(uid, val); err != nil {
if err := n.SetFavorite(ctx, uid, val); err != nil {
sublog.Error().Err(err).
Interface("user", u).
Msg("could not set favorite flag")
@@ -111,7 +111,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.
}
for k, v := range md.Metadata {
attrName := prefixes.MetadataPrefix + k
if err = n.SetXattrString(attrName, v); err != nil {
if err = n.SetXattrString(ctx, attrName, v); err != nil {
errs = append(errs, errors.Wrap(err, "Decomposedfs: could not set metadata attribute "+attrName+" to "+k))
}
}
@@ -184,7 +184,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide
continue
}
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
if err := n.RemoveXattr(fa); err != nil {
if err := n.RemoveXattr(ctx, fa); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}
@@ -195,7 +195,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide
errs = append(errs, errors.Wrap(err, "could not unset favorite flag"))
}
default:
if err = n.RemoveXattr(prefixes.MetadataPrefix + k); err != nil {
if err = n.RemoveXattr(ctx, prefixes.MetadataPrefix+k); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}

View File

@@ -19,6 +19,7 @@
package metadata
import (
"context"
"io"
"os"
"path/filepath"
@@ -30,6 +31,7 @@ import (
"github.com/pkg/xattr"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
"go.opentelemetry.io/otel/codes"
)
// MessagePackBackend persists the attributes in messagepack format inside the file
@@ -48,7 +50,7 @@ type readWriteCloseSeekTruncater interface {
func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
return MessagePackBackend{
rootPath: filepath.Clean(rootPath),
metaCache: cache.GetFileMetadataCache(o.Store, o.Nodes, o.Database, "filemetadata", time.Duration(o.TTL)*time.Second, o.Size),
metaCache: cache.GetFileMetadataCache(o.Store, o.Nodes, o.Database, "filemetadata:", time.Duration(o.TTL)*time.Second, o.Size),
}
}
@@ -56,13 +58,13 @@ func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
func (MessagePackBackend) Name() string { return "messagepack" }
// All reads all extended attributes for a node
func (b MessagePackBackend) All(path string) (map[string][]byte, error) {
return b.loadAttributes(path, nil)
func (b MessagePackBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, nil)
}
// Get an extended attribute value for the given key
func (b MessagePackBackend) Get(path, key string) ([]byte, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return []byte{}, err
}
@@ -74,8 +76,8 @@ func (b MessagePackBackend) Get(path, key string) ([]byte, error) {
}
// GetInt64 reads a string as int64 from the xattrs
func (b MessagePackBackend) GetInt64(path, key string) (int64, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return 0, err
}
@@ -92,8 +94,8 @@ func (b MessagePackBackend) GetInt64(path, key string) (int64, error) {
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (b MessagePackBackend) List(path string) ([]string, error) {
attribs, err := b.loadAttributes(path, nil)
func (b MessagePackBackend) List(ctx context.Context, path string) ([]string, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return nil, err
}
@@ -105,36 +107,50 @@ func (b MessagePackBackend) List(path string) ([]string, error) {
}
// Set sets one attribute for the given path
func (b MessagePackBackend) Set(path, key string, val []byte) error {
return b.SetMultiple(path, map[string][]byte{key: val}, true)
func (b MessagePackBackend) Set(ctx context.Context, path, key string, val []byte) error {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (b MessagePackBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error {
return b.saveAttributes(path, attribs, nil, acquireLock)
func (b MessagePackBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return b.saveAttributes(ctx, path, attribs, nil, acquireLock)
}
// Remove an extended attribute key
func (b MessagePackBackend) Remove(path, key string) error {
return b.saveAttributes(path, nil, []string{key}, true)
func (b MessagePackBackend) Remove(ctx context.Context, path, key string) error {
return b.saveAttributes(ctx, path, nil, []string{key}, true)
}
// AllWithLockedSource reads all extended attributes from the given reader (if possible).
// The path argument is used for storing the data in the cache
func (b MessagePackBackend) AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error) {
return b.loadAttributes(path, source)
func (b MessagePackBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, source)
}
func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
var (
f readWriteCloseSeekTruncater
err error
f readWriteCloseSeekTruncater
)
ctx, span := tracer.Start(ctx, "saveAttributes")
defer func() {
if err != nil {
span.SetStatus(codes.Error, err.Error())
} else {
span.SetStatus(codes.Ok, "")
}
span.End()
}()
metaPath := b.MetadataPath(path)
if acquireLock {
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
f, err = lockedfile.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
} else {
_, subspan := tracer.Start(ctx, "os.OpenFile")
f, err = os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
}
if err != nil {
return err
@@ -142,10 +158,15 @@ func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]
defer f.Close()
// Invalidate cache early
_, subspan := tracer.Start(ctx, "metaCache.RemoveMetadata")
_ = b.metaCache.RemoveMetadata(b.cacheKey(path))
subspan.End()
// Read current state
msgBytes, err := io.ReadAll(f)
_, subspan = tracer.Start(ctx, "io.ReadAll")
var msgBytes []byte
msgBytes, err = io.ReadAll(f)
subspan.End()
if err != nil {
return err
}
@@ -170,25 +191,35 @@ func (b MessagePackBackend) saveAttributes(path string, setAttribs map[string][]
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Truncate")
err = f.Truncate(0)
subspan.End()
if err != nil {
return err
}
// Write new metadata to file
d, err := msgpack.Marshal(attribs)
var d []byte
d, err = msgpack.Marshal(attribs)
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Write")
_, err = f.Write(d)
subspan.End()
if err != nil {
return err
}
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
return err
}
func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[string][]byte, error) {
func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
ctx, span := tracer.Start(ctx, "loadAttributes")
defer span.End()
attribs := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
if err == nil {
@@ -197,14 +228,18 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
metaPath := b.MetadataPath(path)
if source == nil {
_, subspan := tracer.Start(ctx, "lockedfile.Open")
source, err = lockedfile.Open(metaPath)
subspan.End()
// // No cached entry found. Read from storage and store in cache
if err != nil {
if os.IsNotExist(err) {
// some of the caller rely on ENOTEXISTS to be returned when the
// actual file (not the metafile) does not exist in order to
// determine whether a node exists or not -> stat the actual node
_, subspan := tracer.Start(ctx, "os.Stat")
_, err := os.Stat(path)
subspan.End()
if err != nil {
return nil, err
}
@@ -214,7 +249,9 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
defer source.(*lockedfile.File).Close()
}
_, subspan := tracer.Start(ctx, "io.ReadAll")
msgBytes, err := io.ReadAll(source)
subspan.End()
if err != nil {
return nil, err
}
@@ -225,7 +262,9 @@ func (b MessagePackBackend) loadAttributes(path string, source io.Reader) (map[s
}
}
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
if err != nil {
return nil, err
}

View File

@@ -19,31 +19,41 @@
package metadata
import (
"context"
"errors"
"io"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/metadata")
}
var errUnconfiguredError = errors.New("no metadata backend configured. Bailing out")
// Backend defines the interface for file attribute backends
type Backend interface {
Name() string
All(path string) (map[string][]byte, error)
Get(path, key string) ([]byte, error)
All(ctx context.Context, path string) (map[string][]byte, error)
Get(ctx context.Context, path, key string) ([]byte, error)
GetInt64(path, key string) (int64, error)
List(path string) (attribs []string, err error)
Set(path, key string, val []byte) error
SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error
Remove(path, key string) error
GetInt64(ctx context.Context, path, key string) (int64, error)
List(ctx context.Context, path string) (attribs []string, err error)
Set(ctx context.Context, path, key string, val []byte) error
SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error
Remove(ctx context.Context, path, key string) error
Purge(path string) error
Rename(oldPath, newPath string) error
IsMetaFile(path string) bool
MetadataPath(path string) string
AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error)
AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error)
}
// NullBackend is the default stub backend, used to enforce the configuration of a proper backend
@@ -53,28 +63,40 @@ type NullBackend struct{}
func (NullBackend) Name() string { return "null" }
// All reads all extended attributes for a node
func (NullBackend) All(path string) (map[string][]byte, error) { return nil, errUnconfiguredError }
func (NullBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return nil, errUnconfiguredError
}
// Get an extended attribute value for the given key
func (NullBackend) Get(path, key string) ([]byte, error) { return []byte{}, errUnconfiguredError }
func (NullBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
return []byte{}, errUnconfiguredError
}
// GetInt64 reads a string as int64 from the xattrs
func (NullBackend) GetInt64(path, key string) (int64, error) { return 0, errUnconfiguredError }
func (NullBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
return 0, errUnconfiguredError
}
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (NullBackend) List(path string) ([]string, error) { return nil, errUnconfiguredError }
func (NullBackend) List(ctx context.Context, path string) ([]string, error) {
return nil, errUnconfiguredError
}
// Set sets one attribute for the given path
func (NullBackend) Set(path string, key string, val []byte) error { return errUnconfiguredError }
func (NullBackend) Set(ctx context.Context, path string, key string, val []byte) error {
return errUnconfiguredError
}
// SetMultiple sets a set of attribute for the given path
func (NullBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) error {
func (NullBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return errUnconfiguredError
}
// Remove removes an extended attribute key
func (NullBackend) Remove(path string, key string) error { return errUnconfiguredError }
func (NullBackend) Remove(ctx context.Context, path string, key string) error {
return errUnconfiguredError
}
// IsMetaFile returns whether the given path represents a meta file
func (NullBackend) IsMetaFile(path string) bool { return false }
@@ -90,6 +112,6 @@ func (NullBackend) MetadataPath(path string) string { return "" }
// AllWithLockedSource reads all extended attributes from the given reader
// The path argument is used for storing the data in the cache
func (NullBackend) AllWithLockedSource(path string, source io.Reader) (map[string][]byte, error) {
func (NullBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return nil, errUnconfiguredError
}

View File

@@ -19,6 +19,7 @@
package metadata
import (
"context"
"io"
"os"
"path/filepath"
@@ -39,13 +40,13 @@ func (XattrsBackend) Name() string { return "xattrs" }
// Get an extended attribute value for the given key
// No file locking is involved here as reading a single xattr is
// considered to be atomic.
func (b XattrsBackend) Get(filePath, key string) ([]byte, error) {
func (b XattrsBackend) Get(ctx context.Context, filePath, key string) ([]byte, error) {
return xattr.Get(filePath, key)
}
// GetInt64 reads a string as int64 from the xattrs
func (b XattrsBackend) GetInt64(filePath, key string) (int64, error) {
attr, err := b.Get(filePath, key)
func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int64, error) {
attr, err := b.Get(ctx, filePath, key)
if err != nil {
return 0, err
}
@@ -58,7 +59,7 @@ func (b XattrsBackend) GetInt64(filePath, key string) (int64, error) {
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (XattrsBackend) List(filePath string) (attribs []string, err error) {
func (XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
attrs, err := xattr.List(filePath)
if err == nil {
return attrs, nil
@@ -75,8 +76,8 @@ func (XattrsBackend) List(filePath string) (attribs []string, err error) {
// All reads all extended attributes for a node, protected by a
// shared file lock
func (b XattrsBackend) All(filePath string) (attribs map[string][]byte, err error) {
attrNames, err := b.List(filePath)
func (b XattrsBackend) All(ctx context.Context, filePath string) (attribs map[string][]byte, err error) {
attrNames, err := b.List(ctx, filePath)
if err != nil {
return nil, err
@@ -106,12 +107,12 @@ func (b XattrsBackend) All(filePath string) (attribs map[string][]byte, err erro
}
// Set sets one attribute for the given path
func (b XattrsBackend) Set(path string, key string, val []byte) (err error) {
return b.SetMultiple(path, map[string][]byte{key: val}, true)
func (b XattrsBackend) Set(ctx context.Context, path string, key string, val []byte) (err error) {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (XattrsBackend) SetMultiple(path string, attribs map[string][]byte, acquireLock bool) (err error) {
func (XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) (err error) {
if acquireLock {
err := os.MkdirAll(filepath.Dir(path), 0600)
if err != nil {
@@ -144,7 +145,7 @@ func (XattrsBackend) SetMultiple(path string, attribs map[string][]byte, acquire
}
// Remove an extended attribute key
func (XattrsBackend) Remove(filePath string, key string) (err error) {
func (XattrsBackend) Remove(ctx context.Context, filePath string, key string) (err error) {
lockedFile, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
@@ -173,6 +174,6 @@ func cleanupLockfile(f *lockedfile.File) {
// AllWithLockedSource reads all extended attributes from the given reader.
// The path argument is used for storing the data in the cache
func (b XattrsBackend) AllWithLockedSource(path string, _ io.Reader) (map[string][]byte, error) {
return b.All(path)
func (b XattrsBackend) AllWithLockedSource(ctx context.Context, path string, _ io.Reader) (map[string][]byte, error) {
return b.All(ctx, path)
}

View File

@@ -19,6 +19,7 @@
package migrator
import (
"context"
"errors"
"os"
"path/filepath"
@@ -48,7 +49,7 @@ func (m *Migrator) Migration0001() (Result, error) {
for _, n := range nodes {
nodePath := filepath.Join(nodesPath, n.Name())
attr, err := m.lu.MetadataBackend().Get(nodePath, prefixes.ParentidAttr)
attr, err := m.lu.MetadataBackend().Get(context.Background(), nodePath, prefixes.ParentidAttr)
if err == nil && string(attr) == node.RootID {
if err := m.moveNode(n.Name(), n.Name()); err != nil {
m.log.Error().Err(err).

View File

@@ -19,6 +19,7 @@
package migrator
import (
"context"
"errors"
"io/fs"
"os"
@@ -74,7 +75,7 @@ func (m *Migrator) Migration0003() (Result, error) {
return nil
}
attribs, err := xattrs.All(path)
attribs, err := xattrs.All(context.Background(), path)
if err != nil {
m.log.Error().Err(err).Str("path", path).Msg("error converting file")
return err
@@ -83,14 +84,14 @@ func (m *Migrator) Migration0003() (Result, error) {
return nil
}
err = mpk.SetMultiple(path, attribs, false)
err = mpk.SetMultiple(context.Background(), path, attribs, false)
if err != nil {
m.log.Error().Err(err).Str("path", path).Msg("error setting attributes")
return err
}
for k := range attribs {
err = xattrs.Remove(path, k)
err = xattrs.Remove(context.Background(), path, k)
if err != nil {
m.log.Debug().Err(err).Str("path", path).Msg("error removing xattr")
}

View File

@@ -255,6 +255,8 @@ func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error {
// CheckLock compares the context lock with the node lock
func (n *Node) CheckLock(ctx context.Context) error {
ctx, span := tracer.Start(ctx, "CheckLock")
defer span.End()
contextLock, _ := ctxpkg.ContextGetLockID(ctx)
diskLock, _ := n.ReadLock(ctx, false)
if diskLock != nil {

View File

@@ -48,8 +48,16 @@ import (
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/google/uuid"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node")
}
// Define keys and values used in the node metadata
const (
LockdiscoveryKey = "DAV:lockdiscovery"
@@ -97,8 +105,8 @@ type PathLookup interface {
InternalPath(spaceID, nodeID string) string
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
ReadBlobSizeAttr(path string) (int64, error)
ReadBlobIDAttr(path string) (string, error)
ReadBlobSizeAttr(ctx context.Context, path string) (int64, error)
ReadBlobIDAttr(ctx context.Context, path string) (string, error)
}
// New returns a new instance of Node
@@ -120,7 +128,7 @@ func New(spaceID, id, parentID, name string, blobsize int64, blobID string, t pr
}
// Type returns the node's resource type
func (n *Node) Type() provider.ResourceType {
func (n *Node) Type(ctx context.Context) provider.ResourceType {
if n.nodeType != nil {
return *n.nodeType
}
@@ -128,7 +136,7 @@ func (n *Node) Type() provider.ResourceType {
t := provider.ResourceType_RESOURCE_TYPE_INVALID
// Try to read from xattrs
typeAttr, err := n.XattrInt32(prefixes.TypeAttr)
typeAttr, err := n.XattrInt32(ctx, prefixes.TypeAttr)
if err == nil {
t = provider.ResourceType(typeAttr)
n.nodeType = &t
@@ -143,7 +151,7 @@ func (n *Node) Type() provider.ResourceType {
switch {
case fi.IsDir():
if _, err = n.Xattr(prefixes.ReferenceAttr); err == nil {
if _, err = n.Xattr(ctx, prefixes.ReferenceAttr); err == nil {
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
} else {
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
@@ -165,12 +173,12 @@ func (n *Node) SetType(t provider.ResourceType) {
}
// NodeMetadata writes the Node metadata to disk and allows passing additional attributes
func (n *Node) NodeMetadata() Attributes {
func (n *Node) NodeMetadata(ctx context.Context) Attributes {
attribs := Attributes{}
attribs.SetInt64(prefixes.TypeAttr, int64(n.Type()))
attribs.SetInt64(prefixes.TypeAttr, int64(n.Type(ctx)))
attribs.SetString(prefixes.ParentidAttr, n.ParentID)
attribs.SetString(prefixes.NameAttr, n.Name)
if n.Type() == provider.ResourceType_RESOURCE_TYPE_FILE {
if n.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
attribs.SetString(prefixes.BlobIDAttr, n.BlobID)
attribs.SetInt64(prefixes.BlobsizeAttr, n.Blobsize)
}
@@ -206,6 +214,8 @@ func (n *Node) SpaceOwnerOrManager(ctx context.Context) *userpb.UserId {
// ReadNode creates a new instance from an id and checks if it exists
func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
ctx, span := tracer.Start(ctx, "ReadNode")
defer span.End()
var err error
if spaceRoot == nil {
@@ -216,7 +226,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
ID: spaceID,
}
spaceRoot.SpaceRoot = spaceRoot
spaceRoot.owner, err = spaceRoot.readOwner()
spaceRoot.owner, err = spaceRoot.readOwner(ctx)
switch {
case metadata.IsNotExist(err):
return spaceRoot, nil // swallow not found, the node defaults to exists = false
@@ -226,14 +236,14 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
spaceRoot.Exists = true
// lookup name in extended attributes
spaceRoot.Name, err = spaceRoot.XattrString(prefixes.NameAttr)
spaceRoot.Name, err = spaceRoot.XattrString(ctx, prefixes.NameAttr)
if err != nil {
return nil, err
}
}
// TODO ReadNode should not check permissions
if !canListDisabledSpace && spaceRoot.IsDisabled() {
if !canListDisabledSpace && spaceRoot.IsDisabled(ctx) {
// no permission = not found
return nil, errtypes.NotFound(spaceID)
}
@@ -276,7 +286,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
}
}()
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
switch {
case metadata.IsNotExist(err):
return n, nil // swallow not found, the node defaults to exists = false
@@ -305,13 +315,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
n.Blobsize = blobSize
}
} else {
n.BlobID, err = lu.ReadBlobIDAttr(nodePath + revisionSuffix)
n.BlobID, err = lu.ReadBlobIDAttr(ctx, nodePath+revisionSuffix)
if err != nil {
return nil, err
}
// Lookup blobsize
n.Blobsize, err = lu.ReadBlobSizeAttr(nodePath + revisionSuffix)
n.Blobsize, err = lu.ReadBlobSizeAttr(ctx, nodePath+revisionSuffix)
if err != nil {
return nil, err
}
@@ -342,6 +352,9 @@ func readChildNodeFromLink(path string) (string, error) {
// Child returns the child node with the given name
func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
ctx, span := tracer.Start(ctx, "Child")
defer span.End()
spaceID := n.SpaceID
if spaceID == "" && n.ParentID == "root" {
spaceID = n.ID
@@ -375,7 +388,7 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
}
// ParentWithReader returns the parent node
func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
func (n *Node) ParentWithReader(ctx context.Context, r io.Reader) (*Node, error) {
if n.ParentID == "" {
return nil, fmt.Errorf("decomposedfs: root has no parent")
}
@@ -387,7 +400,7 @@ func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
}
// fill metadata cache using the reader
attrs, err := p.XattrsWithReader(r)
attrs, err := p.XattrsWithReader(ctx, r)
switch {
case metadata.IsNotExist(err):
return p, nil // swallow not found, the node defaults to exists = false
@@ -403,8 +416,8 @@ func (n *Node) ParentWithReader(r io.Reader) (*Node, error) {
}
// Parent returns the parent node
func (n *Node) Parent() (p *Node, err error) {
return n.ParentWithReader(nil)
func (n *Node) Parent(ctx context.Context) (p *Node, err error) {
return n.ParentWithReader(ctx, nil)
}
// Owner returns the space owner
@@ -414,14 +427,14 @@ func (n *Node) Owner() *userpb.UserId {
// readOwner reads the owner from the extended attributes of the space root
// in case either owner id or owner idp are unset we return an error and an empty owner object
func (n *Node) readOwner() (*userpb.UserId, error) {
func (n *Node) readOwner(ctx context.Context) (*userpb.UserId, error) {
owner := &userpb.UserId{}
// lookup parent id in extended attributes
var attr string
var err error
// lookup ID in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerIDAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerIDAttr)
switch {
case err == nil:
owner.OpaqueId = attr
@@ -432,7 +445,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) {
}
// lookup IDP in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerIDPAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerIDPAttr)
switch {
case err == nil:
owner.Idp = attr
@@ -443,7 +456,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) {
}
// lookup type in extended attributes
attr, err = n.SpaceRoot.XattrString(prefixes.OwnerTypeAttr)
attr, err = n.SpaceRoot.XattrString(ctx, prefixes.OwnerTypeAttr)
switch {
case err == nil:
owner.Type = utils.UserTypeMap(attr)
@@ -538,7 +551,7 @@ func (n *Node) SetMtime(mtime time.Time) error {
func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger()
var tmTime time.Time
if tmTime, err = n.GetTMTime(); err != nil {
if tmTime, err = n.GetTMTime(ctx); err != nil {
return
}
var etag string
@@ -555,7 +568,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
return nil
}
// etag is only valid until the calculated etag changes, is part of propagation
return n.SetXattrString(prefixes.TmpEtagAttr, val)
return n.SetXattrString(ctx, prefixes.TmpEtagAttr, val)
}
// SetFavorite sets the favorite for the current user
@@ -575,15 +588,15 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) {
// 5. app? = a:<aid>: for apps?
// obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem
// public tags can be mapped to extended attributes
func (n *Node) SetFavorite(uid *userpb.UserId, val string) error {
func (n *Node) SetFavorite(ctx context.Context, uid *userpb.UserId, val string) error {
// the favorite flag is specific to the user, so we need to incorporate the userid
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
return n.SetXattrString(fa, val)
return n.SetXattrString(ctx, fa, val)
}
// IsDir returns true if the node is a directory
func (n *Node) IsDir() bool {
attr, _ := n.XattrInt32(prefixes.TypeAttr)
func (n *Node) IsDir(ctx context.Context) bool {
attr, _ := n.XattrInt32(ctx, prefixes.TypeAttr)
return attr == int32(provider.ResourceType_RESOURCE_TYPE_CONTAINER)
}
@@ -592,17 +605,17 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
sublog := appctx.GetLogger(ctx).With().Interface("node", n.ID).Logger()
var fn string
nodeType := n.Type()
nodeType := n.Type(ctx)
var target string
if nodeType == provider.ResourceType_RESOURCE_TYPE_REFERENCE {
target, _ = n.XattrString(prefixes.ReferenceAttr)
target, _ = n.XattrString(ctx, prefixes.ReferenceAttr)
}
id := &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID}
switch {
case n.IsSpaceRoot():
case n.IsSpaceRoot(ctx):
fn = "." // space roots do not have a path as they are referencing themselves
case returnBasename:
fn = n.Name
@@ -629,12 +642,12 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
Name: n.Name,
}
if n.IsProcessing() {
if n.IsProcessing(ctx) {
ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "status", "processing")
}
if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER {
ts, err := n.GetTreeSize()
ts, err := n.GetTreeSize(ctx)
if err == nil {
ri.Size = ts
} else {
@@ -646,12 +659,12 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
// TODO make etag of files use fileid and checksum
var tmTime time.Time
if tmTime, err = n.GetTMTime(); err != nil {
if tmTime, err = n.GetTMTime(ctx); err != nil {
sublog.Debug().Err(err).Msg("could not get tmtime")
}
// use temporary etag if it is set
if b, err := n.XattrString(prefixes.TmpEtagAttr); err == nil && b != "" {
if b, err := n.XattrString(ctx, prefixes.TmpEtagAttr); err == nil && b != "" {
ri.Etag = fmt.Sprintf(`"%x"`, b)
} else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil {
sublog.Debug().Err(err).Msg("could not calculate etag")
@@ -694,7 +707,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
// the favorite flag is specific to the user, so we need to incorporate the userid
if uid := u.GetId(); uid != nil {
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
if val, err := n.XattrString(fa); err == nil {
if val, err := n.XattrString(ctx, fa); err == nil {
sublog.Debug().
Str("favorite", fa).
Msg("found favorite flag")
@@ -756,7 +769,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
// only read the requested metadata attributes
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
if err != nil {
sublog.Error().Err(err).Msg("error getting list of extended attributes")
} else {
@@ -778,7 +791,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
// add virusscan information
if scanned, _, date := n.ScanData(); scanned {
if scanned, _, date := n.ScanData(ctx); scanned {
ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "scantime", date.Format(time.RFC3339Nano))
}
@@ -790,7 +803,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
}
func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string, ri *provider.ResourceInfo) {
v, err := n.Xattr(prefixes.ChecksumPrefix + algo)
v, err := n.Xattr(ctx, prefixes.ChecksumPrefix+algo)
switch {
case err == nil:
ri.Checksum = &provider.ResourceChecksum{
@@ -805,7 +818,7 @@ func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string
}
func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *provider.ResourceInfo) {
v, err := n.Xattr(prefixes.ChecksumPrefix + algo)
v, err := n.Xattr(ctx, prefixes.ChecksumPrefix+algo)
switch {
case err == nil:
if ri.Opaque == nil {
@@ -826,7 +839,7 @@ func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *prov
// quota is always stored on the root node
func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInfo) {
v, err := n.XattrString(prefixes.QuotaAttr)
v, err := n.XattrString(ctx, prefixes.QuotaAttr)
switch {
case err == nil:
// make sure we have a proper signed int
@@ -855,16 +868,16 @@ func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInf
}
// HasPropagation checks if the propagation attribute exists and is set to "1"
func (n *Node) HasPropagation() (propagation bool) {
if b, err := n.XattrString(prefixes.PropagationAttr); err == nil {
func (n *Node) HasPropagation(ctx context.Context) (propagation bool) {
if b, err := n.XattrString(ctx, prefixes.PropagationAttr); err == nil {
return b == "1"
}
return false
}
// GetTMTime reads the tmtime from the extended attributes, falling back to GetMTime()
func (n *Node) GetTMTime() (time.Time, error) {
b, err := n.XattrString(prefixes.TreeMTimeAttr)
func (n *Node) GetTMTime(ctx context.Context) (time.Time, error) {
b, err := n.XattrString(ctx, prefixes.TreeMTimeAttr)
if err == nil {
return time.Parse(time.RFC3339Nano, b)
}
@@ -883,16 +896,16 @@ func (n *Node) GetMTime() (time.Time, error) {
}
// SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed
func (n *Node) SetTMTime(t *time.Time) (err error) {
func (n *Node) SetTMTime(ctx context.Context, t *time.Time) (err error) {
if t == nil {
return n.RemoveXattr(prefixes.TreeMTimeAttr)
return n.RemoveXattr(ctx, prefixes.TreeMTimeAttr)
}
return n.SetXattrString(prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano))
return n.SetXattrString(ctx, prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano))
}
// GetDTime reads the dtime from the extended attributes
func (n *Node) GetDTime() (tmTime time.Time, err error) {
b, err := n.XattrString(prefixes.DTimeAttr)
func (n *Node) GetDTime(ctx context.Context) (tmTime time.Time, err error) {
b, err := n.XattrString(ctx, prefixes.DTimeAttr)
if err != nil {
return time.Time{}, err
}
@@ -900,26 +913,28 @@ func (n *Node) GetDTime() (tmTime time.Time, err error) {
}
// SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed
func (n *Node) SetDTime(t *time.Time) (err error) {
func (n *Node) SetDTime(ctx context.Context, t *time.Time) (err error) {
if t == nil {
return n.RemoveXattr(prefixes.DTimeAttr)
return n.RemoveXattr(ctx, prefixes.DTimeAttr)
}
return n.SetXattrString(prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano))
return n.SetXattrString(ctx, prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano))
}
// IsDisabled returns true when the node has a dmtime attribute set
// only used to check if a space is disabled
// FIXME confusing with the trash logic
func (n *Node) IsDisabled() bool {
if _, err := n.GetDTime(); err == nil {
func (n *Node) IsDisabled(ctx context.Context) bool {
if _, err := n.GetDTime(ctx); err == nil {
return true
}
return false
}
// GetTreeSize reads the treesize from the extended attributes
func (n *Node) GetTreeSize() (treesize uint64, err error) {
s, err := n.XattrUint64(prefixes.TreesizeAttr)
func (n *Node) GetTreeSize(ctx context.Context) (treesize uint64, err error) {
ctx, span := tracer.Start(ctx, "GetTreeSize")
defer span.End()
s, err := n.XattrUint64(ctx, prefixes.TreesizeAttr)
if err != nil {
return 0, err
}
@@ -927,13 +942,13 @@ func (n *Node) GetTreeSize() (treesize uint64, err error) {
}
// SetTreeSize writes the treesize to the extended attributes
func (n *Node) SetTreeSize(ts uint64) (err error) {
return n.SetXattrString(prefixes.TreesizeAttr, strconv.FormatUint(ts, 10))
func (n *Node) SetTreeSize(ctx context.Context, ts uint64) (err error) {
return n.SetXattrString(ctx, prefixes.TreesizeAttr, strconv.FormatUint(ts, 10))
}
// GetBlobSize reads the blobsize from the extended attributes
func (n *Node) GetBlobSize() (treesize uint64, err error) {
s, err := n.XattrInt64(prefixes.BlobsizeAttr)
func (n *Node) GetBlobSize(ctx context.Context) (treesize uint64, err error) {
s, err := n.XattrInt64(ctx, prefixes.BlobsizeAttr)
if err != nil {
return 0, err
}
@@ -941,13 +956,13 @@ func (n *Node) GetBlobSize() (treesize uint64, err error) {
}
// SetChecksum writes the checksum with the given checksum type to the extended attributes
func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) {
return n.SetXattr(prefixes.ChecksumPrefix+csType, h.Sum(nil))
func (n *Node) SetChecksum(ctx context.Context, csType string, h hash.Hash) (err error) {
return n.SetXattr(ctx, prefixes.ChecksumPrefix+csType, h.Sum(nil))
}
// UnsetTempEtag removes the temporary etag attribute
func (n *Node) UnsetTempEtag() (err error) {
return n.RemoveXattr(prefixes.TmpEtagAttr)
func (n *Node) UnsetTempEtag(ctx context.Context) (err error) {
return n.RemoveXattr(ctx, prefixes.TmpEtagAttr)
}
// ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes
@@ -1070,7 +1085,7 @@ func (n *Node) IsDenied(ctx context.Context) bool {
// We don't want to wast time and memory by creating grantee objects.
// The function will return a list of opaque strings that can be used to make a ReadGrant call
func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) {
attrs, err := n.Xattrs()
attrs, err := n.Xattrs(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("node", n.ID).Msg("error listing attributes")
return nil, err
@@ -1085,7 +1100,7 @@ func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error)
// ReadGrant reads a CS3 grant
func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant, err error) {
xattr, err := n.Xattr(grantee)
xattr, err := n.Xattr(ctx, grantee)
if err != nil {
return nil, err
}
@@ -1157,7 +1172,7 @@ func parseMTime(v string) (t time.Time, err error) {
// FindStorageSpaceRoot calls n.Parent() and climbs the tree
// until it finds the space root node and adds it to the node
func (n *Node) FindStorageSpaceRoot() error {
func (n *Node) FindStorageSpaceRoot(ctx context.Context) error {
if n.SpaceRoot != nil {
return nil
}
@@ -1165,11 +1180,11 @@ func (n *Node) FindStorageSpaceRoot() error {
// remember the node we ask for and use parent to climb the tree
parent := n
for {
if parent.IsSpaceRoot() {
if parent.IsSpaceRoot(ctx) {
n.SpaceRoot = parent
break
}
if parent, err = parent.Parent(); err != nil {
if parent, err = parent.Parent(ctx); err != nil {
return err
}
}
@@ -1177,38 +1192,38 @@ func (n *Node) FindStorageSpaceRoot() error {
}
// UnmarkProcessing removes the processing flag from the node
func (n *Node) UnmarkProcessing(uploadID string) error {
v, _ := n.XattrString(prefixes.StatusPrefix)
func (n *Node) UnmarkProcessing(ctx context.Context, uploadID string) error {
v, _ := n.XattrString(ctx, prefixes.StatusPrefix)
if v != ProcessingStatus+uploadID {
// file started another postprocessing later - do not remove
return nil
}
return n.RemoveXattr(prefixes.StatusPrefix)
return n.RemoveXattr(ctx, prefixes.StatusPrefix)
}
// IsProcessing returns true if the node is currently being processed
func (n *Node) IsProcessing() bool {
v, err := n.XattrString(prefixes.StatusPrefix)
func (n *Node) IsProcessing(ctx context.Context) bool {
v, err := n.XattrString(ctx, prefixes.StatusPrefix)
return err == nil && strings.HasPrefix(v, ProcessingStatus)
}
// IsSpaceRoot checks if the node is a space root
func (n *Node) IsSpaceRoot() bool {
_, err := n.Xattr(prefixes.SpaceNameAttr)
func (n *Node) IsSpaceRoot(ctx context.Context) bool {
_, err := n.Xattr(ctx, prefixes.SpaceNameAttr)
return err == nil
}
// SetScanData sets the virus scan info to the node
func (n *Node) SetScanData(info string, date time.Time) error {
func (n *Node) SetScanData(ctx context.Context, info string, date time.Time) error {
attribs := Attributes{}
attribs.SetString(prefixes.ScanStatusPrefix, info)
attribs.SetString(prefixes.ScanDatePrefix, date.Format(time.RFC3339Nano))
return n.SetXattrs(attribs, true)
return n.SetXattrsWithContext(ctx, attribs, true)
}
// ScanData returns scanning information of the node
func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
ti, _ := n.XattrString(prefixes.ScanDatePrefix)
func (n *Node) ScanData(ctx context.Context) (scanned bool, virus string, scantime time.Time) {
ti, _ := n.XattrString(ctx, prefixes.ScanDatePrefix)
if ti == "" {
return // not scanned yet
}
@@ -1218,7 +1233,7 @@ func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
return
}
i, err := n.XattrString(prefixes.ScanStatusPrefix)
i, err := n.XattrString(ctx, prefixes.ScanStatusPrefix)
if err != nil {
return
}
@@ -1231,12 +1246,12 @@ func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) {
// when creating a new file version. In such a case the function will
// reduce the used bytes by the old file size and then add the new size.
// If overwrite is false oldSize will be ignored.
var CheckQuota = func(spaceRoot *Node, overwrite bool, oldSize, newSize uint64) (quotaSufficient bool, err error) {
used, _ := spaceRoot.GetTreeSize()
var CheckQuota = func(ctx context.Context, spaceRoot *Node, overwrite bool, oldSize, newSize uint64) (quotaSufficient bool, err error) {
used, _ := spaceRoot.GetTreeSize(ctx)
if !enoughDiskSpace(spaceRoot.InternalPath(), newSize) {
return false, errtypes.InsufficientStorage("disk full")
}
quotaByteStr, _ := spaceRoot.XattrString(prefixes.QuotaAttr)
quotaByteStr, _ := spaceRoot.XattrString(ctx, prefixes.QuotaAttr)
switch quotaByteStr {
case "":
// if quota is not set, it means unlimited

View File

@@ -149,7 +149,7 @@ func (p *Permissions) assemblePermissions(ctx context.Context, n *Node, failOnTr
// continue with next segment
}
if cn, err = cn.Parent(); err != nil {
if cn, err = cn.Parent(ctx); err != nil {
// We get an error but get a parent, but can not read it from disk (eg. it has been deleted already)
if cn != nil {
return ap, errors.Wrap(err, "Decomposedfs: error getting parent for node "+cn.ID)

View File

@@ -19,6 +19,7 @@
package node
import (
"context"
"io"
"strconv"
@@ -49,45 +50,50 @@ func (md Attributes) SetInt64(key string, val int64) {
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrs(attribs map[string][]byte, acquireLock bool) (err error) {
func (n *Node) SetXattrsWithContext(ctx context.Context, attribs map[string][]byte, acquireLock bool) (err error) {
if n.xattrsCache != nil {
for k, v := range attribs {
n.xattrsCache[k] = v
}
}
return n.lu.MetadataBackend().SetMultiple(n.InternalPath(), attribs, acquireLock)
return n.lu.MetadataBackend().SetMultiple(ctx, n.InternalPath(), attribs, acquireLock)
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrs(attribs map[string][]byte, acquireLock bool) (err error) {
return n.SetXattrsWithContext(context.Background(), attribs, acquireLock)
}
// SetXattr sets an extended attribute on the write-through cache/node
func (n *Node) SetXattr(key string, val []byte) (err error) {
func (n *Node) SetXattr(ctx context.Context, key string, val []byte) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = val
}
return n.lu.MetadataBackend().Set(n.InternalPath(), key, val)
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, val)
}
// SetXattrString sets a string extended attribute on the write-through cache/node
func (n *Node) SetXattrString(key, val string) (err error) {
func (n *Node) SetXattrString(ctx context.Context, key, val string) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = []byte(val)
}
return n.lu.MetadataBackend().Set(n.InternalPath(), key, []byte(val))
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, []byte(val))
}
// RemoveXattr removes an extended attribute from the write-through cache/node
func (n *Node) RemoveXattr(key string) error {
func (n *Node) RemoveXattr(ctx context.Context, key string) error {
if n.xattrsCache != nil {
delete(n.xattrsCache, key)
}
return n.lu.MetadataBackend().Remove(n.InternalPath(), key)
return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key)
}
// XattrsWithReader returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
func (n *Node) XattrsWithReader(ctx context.Context, r io.Reader) (Attributes, error) {
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
@@ -101,9 +107,9 @@ func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
var attrs Attributes
var err error
if r != nil {
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(n.InternalPath(), r)
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(ctx, n.InternalPath(), r)
} else {
attrs, err = n.lu.MetadataBackend().All(n.InternalPath())
attrs, err = n.lu.MetadataBackend().All(ctx, n.InternalPath())
}
if err != nil {
return nil, err
@@ -115,13 +121,13 @@ func (n *Node) XattrsWithReader(r io.Reader) (Attributes, error) {
// Xattrs returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) Xattrs() (Attributes, error) {
return n.XattrsWithReader(nil)
func (n *Node) Xattrs(ctx context.Context) (Attributes, error) {
return n.XattrsWithReader(ctx, nil)
}
// Xattr returns an extended attribute of the node. If the attributes have already
// been cached it is not read from disk again.
func (n *Node) Xattr(key string) ([]byte, error) {
func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
@@ -129,7 +135,7 @@ func (n *Node) Xattr(key string) ([]byte, error) {
}
if n.xattrsCache == nil {
attrs, err := n.lu.MetadataBackend().All(n.InternalPath())
attrs, err := n.lu.MetadataBackend().All(ctx, n.InternalPath())
if err != nil {
return []byte{}, err
}
@@ -144,8 +150,8 @@ func (n *Node) Xattr(key string) ([]byte, error) {
}
// XattrString returns the string representation of an attribute
func (n *Node) XattrString(key string) (string, error) {
b, err := n.Xattr(key)
func (n *Node) XattrString(ctx context.Context, key string) (string, error) {
b, err := n.Xattr(ctx, key)
if err != nil {
return "", err
}
@@ -153,8 +159,8 @@ func (n *Node) XattrString(key string) (string, error) {
}
// XattrInt32 returns the int32 representation of an attribute
func (n *Node) XattrInt32(key string) (int32, error) {
b, err := n.XattrString(key)
func (n *Node) XattrInt32(ctx context.Context, key string) (int32, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
@@ -167,8 +173,8 @@ func (n *Node) XattrInt32(key string) (int32, error) {
}
// XattrInt64 returns the int64 representation of an attribute
func (n *Node) XattrInt64(key string) (int64, error) {
b, err := n.XattrString(key)
func (n *Node) XattrInt64(ctx context.Context, key string) (int64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
@@ -176,8 +182,8 @@ func (n *Node) XattrInt64(key string) (int64, error) {
}
// XattrUint64 returns the uint64 representation of an attribute
func (n *Node) XattrUint64(key string) (uint64, error) {
b, err := n.XattrString(key)
func (n *Node) XattrUint64(ctx context.Context, key string) (uint64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}

View File

@@ -88,7 +88,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
}
origin := ""
attrs, err := fs.lu.MetadataBackend().All(originalPath)
attrs, err := fs.lu.MetadataBackend().All(ctx, originalPath)
if err != nil {
return items, err
}
@@ -111,7 +111,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
sublog.Error().Err(err).Msg("could not parse time format, ignoring")
}
nodeType := fs.lu.TypeFromPath(originalPath)
nodeType := fs.lu.TypeFromPath(ctx, originalPath)
if nodeType != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
// this is the case when we want to directly list a file in the trashbin
blobsize, err := strconv.ParseInt(string(attrs[prefixes.BlobsizeAttr]), 10, 64)
@@ -154,16 +154,16 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
size := int64(0)
nodeType = fs.lu.TypeFromPath(resolvedChildPath)
nodeType = fs.lu.TypeFromPath(ctx, resolvedChildPath)
switch nodeType {
case provider.ResourceType_RESOURCE_TYPE_FILE:
size, err = fs.lu.ReadBlobSizeAttr(resolvedChildPath)
size, err = fs.lu.ReadBlobSizeAttr(ctx, resolvedChildPath)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping")
continue
}
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
attr, err := fs.lu.MetadataBackend().Get(resolvedChildPath, prefixes.TreesizeAttr)
attr, err := fs.lu.MetadataBackend().Get(ctx, resolvedChildPath, prefixes.TreesizeAttr)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
continue
@@ -235,13 +235,13 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
continue
}
attrs, err := fs.lu.MetadataBackend().All(nodePath)
attrs, err := fs.lu.MetadataBackend().All(ctx, nodePath)
if err != nil {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get extended attributes, skipping")
continue
}
nodeType := fs.lu.TypeFromPath(nodePath)
nodeType := fs.lu.TypeFromPath(ctx, nodePath)
if nodeType == provider.ResourceType_RESOURCE_TYPE_INVALID {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("invalid node type, skipping")
continue

View File

@@ -85,7 +85,7 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen
Key: n.ID + node.RevisionIDDelimiter + parts[1],
Mtime: uint64(mtime.Unix()),
}
blobSize, err := fs.lu.ReadBlobSizeAttr(items[i])
blobSize, err := fs.lu.ReadBlobSizeAttr(ctx, items[i])
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
}
@@ -147,11 +147,11 @@ func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Refe
contentPath := fs.lu.InternalPath(spaceID, revisionKey)
blobid, err := fs.lu.ReadBlobIDAttr(contentPath)
blobid, err := fs.lu.ReadBlobIDAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob id of revision '%s' for node '%s'", n.ID, revisionKey)
}
blobsize, err := fs.lu.ReadBlobSizeAttr(contentPath)
blobsize, err := fs.lu.ReadBlobSizeAttr(ctx, contentPath)
if err != nil {
return nil, errors.Wrapf(err, "Decomposedfs: could not read blob size of revision '%s' for node '%s'", n.ID, revisionKey)
}
@@ -230,7 +230,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
}()
// copy blob metadata from node to new revision node
err = fs.lu.CopyMetadata(nodePath, newRevisionPath, func(attributeName string) bool {
err = fs.lu.CopyMetadata(ctx, nodePath, newRevisionPath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
@@ -249,7 +249,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
// copy blob metadata from restored revision to node
restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey)
err = fs.lu.CopyMetadata(restoredRevisionPath, nodePath, func(attributeName string) bool {
err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
@@ -259,7 +259,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
return errtypes.InternalError("failed to copy blob xattrs to old revision to node")
}
revisionSize, err := fs.lu.MetadataBackend().GetInt64(restoredRevisionPath, prefixes.BlobsizeAttr)
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr)
if err != nil {
return errtypes.InternalError("failed to read blob size xattr from old revision")
}

View File

@@ -38,6 +38,8 @@ func NewPermissions(item PermissionsChecker, permissionsSelector pool.Selectable
// AssemblePermissions is used to assemble file permissions
func (p Permissions) AssemblePermissions(ctx context.Context, n *node.Node) (provider.ResourcePermissions, error) {
ctx, span := tracer.Start(ctx, "AssemblePermissions")
defer span.End()
return p.item.AssemblePermissions(ctx, n)
}

View File

@@ -146,8 +146,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr
metadata.SetString(prefixes.SpaceAliasAttr, alias)
}
// Write node
if err := root.SetXattrs(metadata, true); err != nil {
if err := root.SetXattrsWithContext(ctx, metadata, true); err != nil {
return nil, err
}
@@ -693,7 +692,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
if mapHasKey(metadata, prefixes.QuotaAttr) {
typ, err := spaceNode.SpaceRoot.Xattr(prefixes.SpaceTypeAttr)
typ, err := spaceNode.SpaceRoot.Xattr(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return &provider.UpdateStorageSpaceResponse{
Status: &v1beta11.Status{
@@ -711,13 +710,13 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
metadata[prefixes.TreeMTimeAttr] = []byte(time.Now().UTC().Format(time.RFC3339Nano))
err = spaceNode.SetXattrs(metadata, true)
err = spaceNode.SetXattrsWithContext(ctx, metadata, true)
if err != nil {
return nil, err
}
if restore {
if err := spaceNode.SetDTime(nil); err != nil {
if err := spaceNode.SetDTime(ctx, nil); err != nil {
return nil, err
}
}
@@ -752,7 +751,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
st, err := n.SpaceRoot.XattrString(prefixes.SpaceTypeAttr)
st, err := n.SpaceRoot.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return errtypes.InternalError(fmt.Sprintf("space %s does not have a spacetype, possible corrupt decompsedfs", n.ID))
}
@@ -761,11 +760,11 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
if purge {
if !n.IsDisabled() {
if !n.IsDisabled(ctx) {
return errtypes.NewErrtypeFromStatus(status.NewInvalid(ctx, "can't purge enabled space"))
}
spaceType, err := n.XattrString(prefixes.SpaceTypeAttr)
spaceType, err := n.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return err
}
@@ -792,7 +791,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
// mark as disabled by writing a dtime attribute
dtime := time.Now()
return n.SetDTime(&dtime)
return n.SetDTime(ctx, &dtime)
}
func (fs *Decomposedfs) updateIndexes(ctx context.Context, grantee *provider.Grantee, spaceType, spaceID string) error {
@@ -905,7 +904,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
return nil, errtypes.NotFound(fmt.Sprintf("space %s not found", n.ID))
}
if n.SpaceRoot.IsDisabled() {
if n.SpaceRoot.IsDisabled(ctx) {
rp, err := fs.p.AssemblePermissions(ctx, n)
if err != nil || !IsManager(rp) {
return nil, errtypes.PermissionDenied(fmt.Sprintf("user %s is not allowed to list deleted spaces %s", user.Username, n.ID))
@@ -916,7 +915,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
var err error
// TODO apply more filters
var sname string
if sname, err = n.SpaceRoot.XattrString(prefixes.SpaceNameAttr); err != nil {
if sname, err = n.SpaceRoot.XattrString(ctx, prefixes.SpaceNameAttr); err != nil {
// FIXME: Is that a severe problem?
appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute")
}
@@ -1021,12 +1020,12 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
// Mtime is set either as node.tmtime or as fi.mtime below
}
space.SpaceType, err = n.SpaceRoot.XattrString(prefixes.SpaceTypeAttr)
space.SpaceType, err = n.SpaceRoot.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute")
}
if n.SpaceRoot.IsDisabled() {
if n.SpaceRoot.IsDisabled(ctx) {
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "trashed", "trashed")
}
@@ -1039,7 +1038,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
// we set the space mtime to the root item mtime
// override the stat mtime with a tmtime if it is present
var tmtime time.Time
if tmt, err := n.GetTMTime(); err == nil {
if tmt, err := n.GetTMTime(ctx); err == nil {
tmtime = tmt
un := tmt.UnixNano()
space.Mtime = &types.Timestamp{
@@ -1065,7 +1064,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node,
Value: []byte(etag),
}
spaceAttributes, err := n.SpaceRoot.Xattrs()
spaceAttributes, err := n.SpaceRoot.Xattrs(ctx)
if err != nil {
return nil, err
}

View File

@@ -47,9 +47,17 @@ import (
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog/log"
"go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
}
//go:generate make --no-print-directory -C ../../../../.. mockery NAME=Blobstore
// Blobstore defines an interface for storing blobs in a blobstore
@@ -68,9 +76,9 @@ type PathLookup interface {
InternalPath(spaceID, nodeID string) string
Path(ctx context.Context, n *node.Node, hasPermission node.PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
ReadBlobSizeAttr(path string) (int64, error)
ReadBlobIDAttr(path string) (string, error)
TypeFromPath(path string) provider.ResourceType
ReadBlobSizeAttr(ctx context.Context, path string) (int64, error)
ReadBlobIDAttr(ctx context.Context, path string) (string, error)
TypeFromPath(ctx context.Context, path string) provider.ResourceType
}
// Tree manages a hierarchical tree
@@ -131,7 +139,7 @@ func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) {
func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, mtime string) error {
if n.Exists {
if markprocessing {
return n.SetXattr(prefixes.StatusPrefix, []byte(node.ProcessingStatus))
return n.SetXattr(ctx, prefixes.StatusPrefix, []byte(node.ProcessingStatus))
}
return errtypes.AlreadyExists(n.ID)
@@ -151,7 +159,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata()
attributes := n.NodeMetadata(ctx)
if markprocessing {
attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus)
}
@@ -160,7 +168,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
return errors.Wrap(err, "Decomposedfs: could not set mtime")
}
}
err = n.SetXattrs(attributes, true)
err = n.SetXattrsWithContext(ctx, attributes, true)
if err != nil {
return err
}
@@ -186,6 +194,8 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
// CreateDir creates a new directory entry in the tree
func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "CreateDir")
defer span.End()
if n.Exists {
return errtypes.AlreadyExists(n.ID) // path?
}
@@ -203,7 +213,9 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
// make child appear in listings
relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2))
ctx, subspan := tracer.Start(ctx, "os.Symlink")
err = os.Symlink(relativeNodePath, filepath.Join(n.ParentPath(), n.Name))
subspan.End()
if err != nil {
// no better way to check unfortunately
if !strings.Contains(err.Error(), "file exists") {
@@ -211,7 +223,9 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
}
// try to remove the node
ctx, subspan = tracer.Start(ctx, "os.RemoveAll")
e := os.RemoveAll(n.InternalPath())
subspan.End()
if e != nil {
appctx.GetLogger(ctx).Debug().Err(e).Msg("cannot delete node")
}
@@ -261,7 +275,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
}
// update name attribute
if err := oldNode.SetXattrString(prefixes.NameAttr, newNode.Name); err != nil {
if err := oldNode.SetXattrString(ctx, prefixes.NameAttr, newNode.Name); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set name attribute")
}
@@ -284,14 +298,14 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
attribs := node.Attributes{}
attribs.SetString(prefixes.ParentidAttr, newNode.ParentID)
attribs.SetString(prefixes.NameAttr, newNode.Name)
if err := oldNode.SetXattrs(attribs, true); err != nil {
if err := oldNode.SetXattrsWithContext(ctx, attribs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update old node attributes")
}
// the size diff is the current treesize or blobsize of the old/source node
var sizeDiff int64
if oldNode.IsDir() {
treeSize, err := oldNode.GetTreeSize()
if oldNode.IsDir(ctx) {
treeSize, err := oldNode.GetTreeSize(ctx)
if err != nil {
return err
}
@@ -315,7 +329,9 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return nil
}
func readChildNodeFromLink(path string) (string, error) {
func readChildNodeFromLink(ctx context.Context, path string) (string, error) {
_, span := tracer.Start(ctx, "readChildNodeFromLink")
defer span.End()
link, err := os.Readlink(path)
if err != nil {
return "", err
@@ -327,8 +343,13 @@ func readChildNodeFromLink(path string) (string, error) {
// ListFolder lists the content of a folder node
func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) {
ctx, span := tracer.Start(ctx, "ListFolder")
defer span.End()
dir := n.InternalPath()
_, subspan := tracer.Start(ctx, "os.Open")
f, err := os.Open(dir)
subspan.End()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, errtypes.NotFound(dir)
@@ -337,7 +358,9 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
defer f.Close()
_, subspan = tracer.Start(ctx, "f.Readdirnames")
names, err := f.Readdirnames(0)
subspan.End()
if err != nil {
return nil, err
}
@@ -369,13 +392,13 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
g.Go(func() error {
for name := range work {
path := filepath.Join(dir, name)
nodeID := getNodeIDFromCache(path, t.idCache)
nodeID := getNodeIDFromCache(ctx, path, t.idCache)
if nodeID == "" {
nodeID, err = readChildNodeFromLink(path)
nodeID, err = readChildNodeFromLink(ctx, path)
if err != nil {
return err
}
err = storeNodeIDInCache(path, nodeID, t.idCache)
err = storeNodeIDInCache(ctx, path, nodeID, t.idCache)
if err != nil {
return err
}
@@ -440,13 +463,13 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// set origin location in metadata
nodePath := n.InternalPath()
if err := n.SetXattrString(prefixes.TrashOriginAttr, origin); err != nil {
if err := n.SetXattrString(ctx, prefixes.TrashOriginAttr, origin); err != nil {
return err
}
var sizeDiff int64
if n.IsDir() {
treesize, err := n.GetTreeSize()
if n.IsDir(ctx) {
treesize, err := n.GetTreeSize(ctx)
if err != nil {
return err // TODO calculate treesize if it is not set
}
@@ -461,7 +484,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
trashLink := filepath.Join(t.options.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2))
if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil {
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return err
}
@@ -474,7 +497,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink)
if err != nil {
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
@@ -487,12 +510,12 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// To roll back changes
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
err = t.lookup.MetadataBackend().Rename(nodePath, trashPath)
if err != nil {
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
_ = os.Rename(trashPath, nodePath)
return
}
@@ -506,7 +529,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
// TODO revert the rename
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(prefixes.TrashOriginAttr)
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr)
return
}
@@ -536,7 +559,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
return nil, nil, nil, err
}
parent, err := targetNode.Parent()
parent, err := targetNode.Parent(ctx)
if err != nil {
return nil, nil, nil, err
}
@@ -576,7 +599,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
}
if err = recycleNode.SetXattrs(attrs, true); err != nil {
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
}
@@ -594,8 +617,8 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
}
var sizeDiff int64
if recycleNode.IsDir() {
treeSize, err := recycleNode.GetTreeSize()
if recycleNode.IsDir(ctx) {
treeSize, err := recycleNode.GetTreeSize(ctx)
if err != nil {
return err
}
@@ -616,7 +639,7 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa
}
fn := func() error {
if err := t.removeNode(deletedNodePath, rn); err != nil {
if err := t.removeNode(ctx, deletedNodePath, rn); err != nil {
return err
}
@@ -640,7 +663,7 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa
return rn, fn, nil
}
func (t *Tree) removeNode(path string, n *node.Node) error {
func (t *Tree) removeNode(ctx context.Context, path string, n *node.Node) error {
// delete the actual node
if err := utils.RemoveItem(path); err != nil {
log.Error().Err(err).Str("path", path).Msg("error purging node")
@@ -671,7 +694,7 @@ func (t *Tree) removeNode(path string, n *node.Node) error {
continue
}
bID, err := t.lookup.ReadBlobIDAttr(rev)
bID, err := t.lookup.ReadBlobIDAttr(ctx, rev)
if err != nil {
log.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute")
return err
@@ -696,12 +719,15 @@ func (t *Tree) removeNode(path string, n *node.Node) error {
// Propagate propagates changes to the root of the tree
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
ctx, span := tracer.Start(ctx, "Propagate")
defer span.End()
sublog := appctx.GetLogger(ctx).With().
Str("method", "tree.Propagate").
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Int64("sizeDiff", sizeDiff).
Logger()
if !t.options.TreeTimeAccounting && (!t.options.TreeSizeAccounting || sizeDiff == 0) {
// no propagation enabled
sublog.Debug().Msg("propagation disabled or nothing to propagate")
@@ -723,6 +749,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
var f *lockedfile.File
// lock parent before reading treesize or tree time
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
var parentFilename string
switch t.lookup.MetadataBackend().(type) {
case metadata.MessagePackBackend:
@@ -734,6 +761,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
parentFilename = n.ParentPath() + filelocks.LockFileSuffix
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
}
subspan.End()
if err != nil {
sublog.Error().Err(err).
Str("parent filename", parentFilename).
@@ -749,14 +777,14 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
}()
if n, err = n.ParentWithReader(f); err != nil {
if n, err = n.ParentWithReader(ctx, f); err != nil {
sublog.Error().Err(err).
Msg("Propagation failed. Could not read parent node.")
return err
}
// TODO none, sync and async?
if !n.HasPropagation() {
if !n.HasPropagation(ctx) {
sublog.Debug().Str("attr", prefixes.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating")
// if the attribute is not set treat it as false / none / no propagation
return nil
@@ -769,7 +797,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
updateSyncTime := false
var tmTime time.Time
tmTime, err = n.GetTMTime()
tmTime, err = n.GetTMTime(ctx)
switch {
case err != nil:
// missing attribute, or invalid format, overwrite
@@ -803,7 +831,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
var newSize uint64
// read treesize
treeSize, err := n.GetTreeSize()
treeSize, err := n.GetTreeSize(ctx)
switch {
case metadata.IsAttrUnset(err):
// fallback to calculating the treesize
@@ -834,13 +862,15 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
sublog.Debug().Uint64("newSize", newSize).Msg("updated treesize of parent node")
}
if err = n.SetXattrs(attrs, false); err != nil {
if err = n.SetXattrsWithContext(ctx, attrs, false); err != nil {
sublog.Error().Err(err).Msg("Failed to update extend attributes of parent node")
return err
}
// Release node lock early, ignore already closed error
_, subspan = tracer.Start(ctx, "f.Close")
cerr := f.Close()
subspan.End()
if cerr != nil && !errors.Is(cerr, os.ErrClosed) {
sublog.Error().Err(cerr).Msg("Failed to close parent node and release lock")
return cerr
@@ -854,6 +884,8 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
func (t *Tree) calculateTreeSize(ctx context.Context, childrenPath string) (uint64, error) {
ctx, span := tracer.Start(ctx, "calculateTreeSize")
defer span.End()
var size uint64
f, err := os.Open(childrenPath)
@@ -877,7 +909,7 @@ func (t *Tree) calculateTreeSize(ctx context.Context, childrenPath string) (uint
}
// raw read of the attributes for performance reasons
attribs, err := t.lookup.MetadataBackend().All(resolvedPath)
attribs, err := t.lookup.MetadataBackend().All(ctx, resolvedPath)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read attributes of child entry")
continue // continue after an error
@@ -925,18 +957,20 @@ func (t *Tree) DeleteBlob(node *node.Node) error {
// TODO check if node exists?
func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "createDirNode")
defer span.End()
// create a directory node
nodePath := n.InternalPath()
if err := os.MkdirAll(nodePath, 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata()
attributes := n.NodeMetadata(ctx)
attributes[prefixes.TreesizeAttr] = []byte("0") // initialize as empty, TODO why bother? if it is not set we could treat it as 0?
if t.options.TreeTimeAccounting || t.options.TreeSizeAccounting {
attributes[prefixes.PropagationAttr] = []byte("1") // mark the node for propagation
}
return n.SetXattrs(attributes, true)
return n.SetXattrsWithContext(ctx, attributes, true)
}
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
@@ -967,32 +1001,32 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
if err != nil {
return
}
recycleNode.SetType(t.lookup.TypeFromPath(deletedNodePath))
recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
var attrBytes []byte
if recycleNode.Type() == provider.ResourceType_RESOURCE_TYPE_FILE {
if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
// lookup blobID in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.BlobIDAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
recycleNode.BlobID = string(attrBytes)
} else {
return
}
// lookup blobSize in extended attributes
if recycleNode.Blobsize, err = backend.GetInt64(deletedNodePath, prefixes.BlobsizeAttr); err != nil {
if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
return
}
}
// lookup parent id in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.ParentidAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
recycleNode.ParentID = string(attrBytes)
} else {
return
}
// lookup name in extended attributes
if attrBytes, err = backend.Get(deletedNodePath, prefixes.NameAttr); err == nil {
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
recycleNode.Name = string(attrBytes)
} else {
return
@@ -1002,7 +1036,7 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
origin = "/"
// lookup origin path in extended attributes
if attrBytes, err = backend.Get(resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
origin = filepath.Join(string(attrBytes), path)
} else {
log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
@@ -1011,7 +1045,9 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
return
}
func getNodeIDFromCache(path string, cache store.Store) string {
func getNodeIDFromCache(ctx context.Context, path string, cache store.Store) string {
_, span := tracer.Start(ctx, "getNodeIDFromCache")
defer span.End()
recs, err := cache.Read(path)
if err == nil && len(recs) > 0 {
return string(recs[0].Value)
@@ -1019,7 +1055,9 @@ func getNodeIDFromCache(path string, cache store.Store) string {
return ""
}
func storeNodeIDInCache(path string, nodeID string, cache store.Store) error {
func storeNodeIDInCache(ctx context.Context, path string, nodeID string, cache store.Store) error {
_, span := tracer.Start(ctx, "storeNodeIDInCache")
defer span.End()
return cache.Write(&store.Record{
Key: path,
Value: []byte(nodeID),

View File

@@ -196,7 +196,7 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere
log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename")
_, err = node.CheckQuota(n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(info.Size))
_, err = node.CheckQuota(ctx, n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(info.Size))
if err != nil {
return nil, err
}

View File

@@ -83,7 +83,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p
log.Debug().Interface("info", info).Interface("node", n).Msg("Decomposedfs: resolved filename")
// the parent owner will become the new owner
parent, perr := n.Parent()
parent, perr := n.Parent(ctx)
if perr != nil {
return nil, errors.Wrap(perr, "Decomposedfs: error getting parent "+n.ParentID)
}
@@ -117,7 +117,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p
}
// are we trying to overwriting a folder with a file?
if n.Exists && n.IsDir() {
if n.Exists && n.IsDir(ctx) {
return nil, errtypes.PreconditionFailed("resource is not a file")
}
@@ -294,7 +294,7 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node,
initAttrs.SetString(prefixes.StatusPrefix, node.ProcessingStatus+upload.Info.ID)
// update node metadata with new blobid etc
err = n.SetXattrs(initAttrs, false)
err = n.SetXattrsWithContext(context.TODO(), initAttrs, false)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: could not write metadata")
}
@@ -347,7 +347,7 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File,
// nothing to do
}
if _, err := node.CheckQuota(n.SpaceRoot, false, 0, fsize); err != nil {
if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, false, 0, fsize); err != nil {
return f, err
}
@@ -374,11 +374,11 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File,
func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint64) (*lockedfile.File, error) {
old, _ := node.ReadNode(upload.Ctx, upload.lu, spaceID, n.ID, false, nil, false)
if _, err := node.CheckQuota(n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
return nil, err
}
tmtime, err := old.GetTMTime()
tmtime, err := old.GetTMTime(upload.Ctx)
if err != nil {
return nil, err
}
@@ -414,7 +414,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint
}
// copy blob metadata to version node
if err := upload.lu.CopyMetadataWithSourceLock(targetPath, upload.versionsPath, func(attributeName string) bool {
if err := upload.lu.CopyMetadataWithSourceLock(upload.Ctx, targetPath, upload.versionsPath, func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||

View File

@@ -129,7 +129,7 @@ func Cleanup(upload *Upload, failure bool, keepUpload bool) {
// unset processing status
if upload.Node != nil { // node can be nil when there was an error before it was created (eg. checksum-mismatch)
if err := upload.Node.UnmarkProcessing(upload.Info.ID); err != nil {
if err := upload.Node.UnmarkProcessing(upload.Ctx, upload.Info.ID); err != nil {
upload.log.Info().Str("path", upload.Node.InternalPath()).Err(err).Msg("unmarking processing failed")
}
}
@@ -370,7 +370,7 @@ func (upload *Upload) cleanup(cleanNode, cleanBin, cleanInfo bool) {
upload.Node = nil
default:
if err := upload.lu.CopyMetadata(p, upload.Node.InternalPath(), func(attributeName string) bool {
if err := upload.lu.CopyMetadata(upload.Ctx, p, upload.Node.InternalPath(), func(attributeName string) bool {
return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||

View File

@@ -33,26 +33,24 @@ import (
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var (
// Propagator is the default Reva propagator.
Propagator = propagation.NewCompositeTextMapPropagator(propagation.Baggage{}, propagation.TraceContext{})
defaultProvider = revaDefaultTracerProvider{
provider: trace.NewNoopTracerProvider(),
}
defaultProvider = revaDefaultTracerProvider{}
)
type revaDefaultTracerProvider struct {
mutex sync.RWMutex
initialized bool
provider trace.TracerProvider
}
// NewTracerProvider returns a new TracerProvider, configure for the specified service
@@ -86,9 +84,7 @@ func NewTracerProvider(opts ...Option) trace.TracerProvider {
// SetDefaultTracerProvider sets the default trace provider
func SetDefaultTracerProvider(tp trace.TracerProvider) {
defaultProvider.mutex.Lock()
defer defaultProvider.mutex.Unlock()
defaultProvider.provider = tp
otel.SetTracerProvider(tp)
defaultProvider.initialized = true
}
@@ -99,14 +95,13 @@ func InitDefaultTracerProvider(collector, endpoint string) {
defaultProvider.mutex.Lock()
defer defaultProvider.mutex.Unlock()
if !defaultProvider.initialized {
defaultProvider.provider = getJaegerTracerProvider(Options{
SetDefaultTracerProvider(getJaegerTracerProvider(Options{
Enabled: true,
Collector: collector,
Endpoint: endpoint,
ServiceName: "reva default jaeger provider",
})
}))
}
defaultProvider.initialized = true
}
// DefaultProvider returns the "global" default TracerProvider
@@ -114,7 +109,7 @@ func InitDefaultTracerProvider(collector, endpoint string) {
func DefaultProvider() trace.TracerProvider {
defaultProvider.mutex.RLock()
defer defaultProvider.mutex.RUnlock()
return defaultProvider.provider
return otel.GetTracerProvider()
}
// getJaegerTracerProvider returns a new TracerProvider, configure for the specified service

File diff suppressed because it is too large Load Diff

20
vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the conventions
// as of the v1.20.0 version of the OpenTelemetry specification.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"

View File

@@ -0,0 +1,199 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated from semantic convention specification. DO NOT EDIT.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
import "go.opentelemetry.io/otel/attribute"
// This semantic convention defines the attributes used to represent a feature
// flag evaluation as an event.
const (
// FeatureFlagKeyKey is the attribute Key conforming to the
// "feature_flag.key" semantic conventions. It represents the unique
// identifier of the feature flag.
//
// Type: string
// RequirementLevel: Required
// Stability: stable
// Examples: 'logo-color'
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
// FeatureFlagProviderNameKey is the attribute Key conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the
// name of the service provider that performs the flag evaluation.
//
// Type: string
// RequirementLevel: Recommended
// Stability: stable
// Examples: 'Flag Manager'
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
// FeatureFlagVariantKey is the attribute Key conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be
// a semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
//
// Type: string
// RequirementLevel: Recommended
// Stability: stable
// Examples: 'red', 'true', 'on'
// Note: A semantic identifier, commonly referred to as a variant, provides
// a means
// for referring to a value without including the value itself. This can
// provide additional context for understanding the meaning behind a value.
// For example, the variant `red` maybe be used for the value `#c05543`.
//
// A stringified version of the value can be used in situations where a
// semantic identifier is unavailable. String representation of the value
// should be determined by the implementer.
FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
)
// FeatureFlagKey returns an attribute KeyValue conforming to the
// "feature_flag.key" semantic conventions. It represents the unique identifier
// of the feature flag.
func FeatureFlagKey(val string) attribute.KeyValue {
return FeatureFlagKeyKey.String(val)
}
// FeatureFlagProviderName returns an attribute KeyValue conforming to the
// "feature_flag.provider_name" semantic conventions. It represents the name of
// the service provider that performs the flag evaluation.
func FeatureFlagProviderName(val string) attribute.KeyValue {
return FeatureFlagProviderNameKey.String(val)
}
// FeatureFlagVariant returns an attribute KeyValue conforming to the
// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
// semantic identifier for a value. If one is unavailable, a stringified
// version of the value can be used.
func FeatureFlagVariant(val string) attribute.KeyValue {
return FeatureFlagVariantKey.String(val)
}
// RPC received/sent message.
const (
// MessageTypeKey is the attribute Key conforming to the "message.type"
// semantic conventions. It represents the whether this is a received or
// sent message.
//
// Type: Enum
// RequirementLevel: Optional
// Stability: stable
MessageTypeKey = attribute.Key("message.type")
// MessageIDKey is the attribute Key conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two
// different counters starting from `1` one for sent messages and one for
// received message.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
// Note: This way we guarantee that the values will be consistent between
// different implementations.
MessageIDKey = attribute.Key("message.id")
// MessageCompressedSizeKey is the attribute Key conforming to the
// "message.compressed_size" semantic conventions. It represents the
// compressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
MessageCompressedSizeKey = attribute.Key("message.compressed_size")
// MessageUncompressedSizeKey is the attribute Key conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
//
// Type: int
// RequirementLevel: Optional
// Stability: stable
MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
)
var (
// sent
MessageTypeSent = MessageTypeKey.String("SENT")
// received
MessageTypeReceived = MessageTypeKey.String("RECEIVED")
)
// MessageID returns an attribute KeyValue conforming to the "message.id"
// semantic conventions. It represents the mUST be calculated as two different
// counters starting from `1` one for sent messages and one for received
// message.
func MessageID(val int) attribute.KeyValue {
return MessageIDKey.Int(val)
}
// MessageCompressedSize returns an attribute KeyValue conforming to the
// "message.compressed_size" semantic conventions. It represents the compressed
// size of the message in bytes.
func MessageCompressedSize(val int) attribute.KeyValue {
return MessageCompressedSizeKey.Int(val)
}
// MessageUncompressedSize returns an attribute KeyValue conforming to the
// "message.uncompressed_size" semantic conventions. It represents the
// uncompressed size of the message in bytes.
func MessageUncompressedSize(val int) attribute.KeyValue {
return MessageUncompressedSizeKey.Int(val)
}
// The attributes used to report a single exception associated with a span.
const (
// ExceptionEscapedKey is the attribute Key conforming to the
// "exception.escaped" semantic conventions. It represents the sHOULD be
// set to true if the exception event is recorded at a point where it is
// known that the exception is escaping the scope of the span.
//
// Type: boolean
// RequirementLevel: Optional
// Stability: stable
// Note: An exception is considered to have escaped (or left) the scope of
// a span,
// if that span is ended while the exception is still logically "in
// flight".
// This may be actually "in flight" in some languages (e.g. if the
// exception
// is passed to a Context manager's `__exit__` method in Python) but will
// usually be caught at the point of recording the exception in most
// languages.
//
// It is usually not possible to determine at the point where an exception
// is thrown
// whether it will escape the scope of a span.
// However, it is trivial to know that an exception
// will escape, if one checks for an active exception just before ending
// the span,
// as done in the [example above](#recording-an-exception).
//
// It follows that an exception may still escape the scope of the span
// even if the `exception.escaped` attribute was not set or set to false,
// since the event might have been recorded at a time where it was not
// clear whether the exception will escape.
ExceptionEscapedKey = attribute.Key("exception.escaped")
)
// ExceptionEscaped returns an attribute KeyValue conforming to the
// "exception.escaped" semantic conventions. It represents the sHOULD be set to
// true if the exception event is recorded at a point where it is known that
// the exception is escaping the scope of the span.
func ExceptionEscaped(val bool) attribute.KeyValue {
return ExceptionEscapedKey.Bool(val)
}

View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

View File

@@ -0,0 +1,21 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
// HTTP scheme attributes.
var (
HTTPSchemeHTTP = HTTPSchemeKey.String("http")
HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"

2610
vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4
vendor/modules.txt vendored
View File

@@ -352,7 +352,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
# github.com/cs3org/reva/v2 v2.14.1-0.20230629081848-5e7f1bf5c21d
# github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
## explicit; go 1.20
github.com/cs3org/reva/v2/cmd/revad/internal/grace
github.com/cs3org/reva/v2/cmd/revad/runtime
@@ -1841,6 +1841,7 @@ go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.10.0
go.opentelemetry.io/otel/semconv/v1.12.0
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.4.0
# go.opentelemetry.io/otel/exporters/jaeger v1.15.1
## explicit; go 1.19
@@ -2195,4 +2196,3 @@ stash.kopano.io/kgol/oidc-go
## explicit; go 1.13
stash.kopano.io/kgol/rndm
# github.com/cs3org/go-cs3apis => github.com/2403905/go-cs3apis v0.0.0-20230517122726-727045414fd1
# github.com/cs3org/reva/v2 => github.com/micbar/reva/v2 v2.0.0-20230626125956-c381fe19a108