Bump reva to get latest changes for LDAP client

This commit is contained in:
root
2023-07-10 10:04:33 +00:00
committed by Ralf Haferkamp
parent 99f27e569d
commit 6989b17a13
34 changed files with 1293 additions and 283 deletions

5
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.6.0
github.com/cs3org/go-cs3apis v0.0.0-20230516150832-730ac860c71d
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
github.com/cs3org/reva/v2 v2.14.1-0.20230711102918-b095db01ac36
github.com/disintegration/imaging v1.6.2
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/egirna/icap-client v0.1.1
@@ -25,7 +25,7 @@ require (
github.com/go-ldap/ldap/v3 v3.4.5
github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3
github.com/go-micro/plugins/v4/client/grpc v1.2.0
github.com/go-micro/plugins/v4/events/natsjs v1.2.0
github.com/go-micro/plugins/v4/events/natsjs v1.2.1
github.com/go-micro/plugins/v4/logger/zerolog v1.2.0
github.com/go-micro/plugins/v4/registry/consul v1.2.0
github.com/go-micro/plugins/v4/registry/etcd v1.2.0
@@ -212,6 +212,7 @@ require (
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/google/renameio/v2 v2.0.0 // indirect
github.com/gookit/color v1.5.3 // indirect
github.com/gookit/goutil v0.6.9 // indirect
github.com/gorilla/handlers v1.5.1 // indirect

10
go.sum
View File

@@ -625,8 +625,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo
github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4=
github.com/crewjam/saml v0.4.13 h1:TYHggH/hwP7eArqiXSJUvtOPNzQDyQ7vwmwEqlFWhMc=
github.com/crewjam/saml v0.4.13/go.mod h1:igEejV+fihTIlHXYP8zOec3V5A8y3lws5bQBFsTm4gA=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806 h1:3fPvPnnZib/cMA4f0GXJvX7lhQs7O31ZmDuSDHxQnVk=
github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cs3org/reva/v2 v2.14.1-0.20230711102918-b095db01ac36 h1:Jww+ia7QQPE6zctpRb60iflrAaasRxNl9AfmOyM8Fw0=
github.com/cs3org/reva/v2 v2.14.1-0.20230711102918-b095db01ac36/go.mod h1:4z5EQghS2LhSWZWocH51Dw9VAs16No1zSFvFgQtgS7w=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8 h1:Z9lwXumT5ACSmJ7WGnFl+OMLLjpz5uR2fyz7dC255FI=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8/go.mod h1:4abs/jPXcmJzYoYGF91JF9Uq9s/KL5n1jvFDix8KcqY=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
@@ -776,8 +776,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-micro/plugins/v4/client/grpc v1.2.0 h1:Z8BB6jqslXM2aMMhjZ+QfNuzR+msCMtGd83DGlsQQG0=
github.com/go-micro/plugins/v4/client/grpc v1.2.0/go.mod h1:3fDuzyfYLwEImn8+lkhKl3W4Ay1jFevkTeC32PBlgQs=
github.com/go-micro/plugins/v4/events/natsjs v1.2.0 h1:c+iG3FholfRJI27QkIVPWOERazwDZro4GLl6Odx2g1c=
github.com/go-micro/plugins/v4/events/natsjs v1.2.0/go.mod h1:lYuiEYKQTpbE2LA8HEcC8D6kQ29M7ILfEak3dzeucEg=
github.com/go-micro/plugins/v4/events/natsjs v1.2.1 h1:wCq5pyUkHSJ31QAtTLiIRowNMe2OFQysg7fjw3SPJZ4=
github.com/go-micro/plugins/v4/events/natsjs v1.2.1/go.mod h1:lYuiEYKQTpbE2LA8HEcC8D6kQ29M7ILfEak3dzeucEg=
github.com/go-micro/plugins/v4/logger/zerolog v1.2.0 h1:JZ516VQ9zekRoi868XG7x0EWxZ2AMq/euHIBChITsTI=
github.com/go-micro/plugins/v4/logger/zerolog v1.2.0/go.mod h1:AieYOIeOxobYa5B8WGEqxXM3Ndi26tDIu9fZ4RYkCvQ=
github.com/go-micro/plugins/v4/registry/consul v1.2.0 h1:nqrTzfWUTWKAy+M+i2FazbHnQn4m77EOtoch57kbCH4=
@@ -1000,6 +1000,8 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=
github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=

View File

@@ -51,10 +51,13 @@ func NewUnary() grpc.UnaryServerInterceptor {
log := appctx.GetLogger(ctx)
var event *zerolog.Event
var msg string
if code != codes.OK {
event = log.Error()
msg = err.Error()
} else {
event = log.Debug()
msg = "unary"
}
event.Str("user-agent", userAgent).
@@ -63,7 +66,7 @@ func NewUnary() grpc.UnaryServerInterceptor {
Str("start", start.Format("02/Jan/2006:15:04:05 -0700")).
Str("end", end.Format("02/Jan/2006:15:04:05 -0700")).Int("time_ns", int(diff)).
Str("code", code.String()).
Msg("unary")
Msg(msg)
return res, err
}
@@ -91,10 +94,13 @@ func NewStream() grpc.StreamServerInterceptor {
log := appctx.GetLogger(ss.Context())
var event *zerolog.Event
var msg string
if code != codes.OK {
event = log.Error()
msg = err.Error()
} else {
event = log.Info()
event = log.Debug()
msg = "stream"
}
event.Str("user-agent", userAgent).
@@ -103,7 +109,7 @@ func NewStream() grpc.StreamServerInterceptor {
Str("start", start.Format("02/Jan/2006:15:04:05 -0700")).
Str("end", end.Format("02/Jan/2006:15:04:05 -0700")).Int("time_ns", int(diff)).
Str("code", code.String()).
Msg("stream")
Msg(msg)
return err
}

View File

@@ -47,10 +47,11 @@
package eos_grpc
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (

View File

@@ -184,3 +184,16 @@ func (ResumePostprocessing) Unmarshal(v []byte) (interface{}, error) {
err := json.Unmarshal(v, &e)
return e, err
}
// RestartPostprocessing will be emitted by postprocessing service if it doesn't know about an upload
type RestartPostprocessing struct {
UploadID string
Timestamp *types.Timestamp
}
// Unmarshal to fulfill umarshaller interface
func (RestartPostprocessing) Unmarshal(v []byte) (interface{}, error) {
e := RestartPostprocessing{}
err := json.Unmarshal(v, &e)
return e, err
}

View File

@@ -52,6 +52,7 @@ func NatsFromConfig(cfg NatsConfig) (events.Stream, error) {
natsjs.TLSConfig(tlsConf),
natsjs.Address(cfg.Endpoint),
natsjs.ClusterID(cfg.Cluster),
natsjs.SynchronousPublish(true),
)
}

View File

@@ -28,6 +28,7 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/favorite"
"github.com/rs/zerolog"
"go-micro.dev/v4/broker"
"go.opentelemetry.io/otel/trace"
)
// Option defines a single option function.
@@ -54,6 +55,8 @@ type Options struct {
TracingCollector string
TracingEndpoint string
TraceProvider trace.TracerProvider
MetricsEnabled bool
MetricsNamespace string
MetricsSubsystem string
@@ -234,6 +237,13 @@ func WithTracingExporter(exporter string) Option {
}
}
// WithTraceProvider option
func WithTraceProvider(provider trace.TracerProvider) Option {
return func(o *Options) {
o.TraceProvider = provider
}
}
// Version provides a function to set the Version config option.
func Version(val string) Option {
return func(o *Options) {

View File

@@ -51,7 +51,6 @@ const (
// Service initializes the ocdav service and underlying http server.
func Service(opts ...Option) (micro.Service, error) {
sopts := newOptions(opts...)
// set defaults
@@ -86,19 +85,23 @@ func Service(opts ...Option) (micro.Service, error) {
// chi.RegisterMethod(ocdav.MethodMkcol)
// chi.RegisterMethod(ocdav.MethodReport)
r := chi.NewRouter()
topts := []rtrace.Option{
rtrace.WithExporter(sopts.TracingExporter),
rtrace.WithEndpoint(sopts.TracingEndpoint),
rtrace.WithCollector(sopts.TracingCollector),
rtrace.WithServiceName(sopts.Name),
tp := sopts.TraceProvider
if tp == nil {
topts := []rtrace.Option{
rtrace.WithExporter(sopts.TracingExporter),
rtrace.WithEndpoint(sopts.TracingEndpoint),
rtrace.WithCollector(sopts.TracingCollector),
rtrace.WithServiceName(sopts.Name),
}
if sopts.TracingEnabled {
topts = append(topts, rtrace.WithEnabled())
}
if sopts.TracingInsecure {
topts = append(topts, rtrace.WithInsecure())
}
tp = rtrace.NewTracerProvider(topts...)
}
if sopts.TracingEnabled {
topts = append(topts, rtrace.WithEnabled())
}
if sopts.TracingInsecure {
topts = append(topts, rtrace.WithInsecure())
}
tp := rtrace.NewTracerProvider(topts...)
if err := useMiddlewares(r, &sopts, revaService, tp); err != nil {
return nil, err
}
@@ -132,7 +135,6 @@ func Service(opts ...Option) (micro.Service, error) {
}
func setDefaults(sopts *Options) error {
// set defaults
if sopts.Name == "" {
sopts.Name = ServerName

View File

@@ -36,7 +36,7 @@ import (
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/cache")
}
// NewStatCache creates a new StatCache

View File

@@ -32,9 +32,9 @@ import (
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
ctxpkg "github.com/cs3org/reva/v2/pkg/ctx"
@@ -48,9 +48,9 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaceidindex"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload"
"github.com/cs3org/reva/v2/pkg/storage/utils/filelocks"
@@ -66,7 +66,15 @@ import (
"golang.org/x/sync/errgroup"
)
var tracer trace.Tracer
var (
tracer trace.Tracer
_registeredEvents = []events.Unmarshaller{
events.PostprocessingFinished{},
events.PostprocessingStepFinished{},
events.RestartPostprocessing{},
}
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs")
@@ -104,8 +112,10 @@ type Decomposedfs struct {
stream events.Stream
cache cache.StatCache
UserCache *ttlcache.Cache
spaceIDCache mtimesyncedcache.Cache[string, map[string]string]
UserCache *ttlcache.Cache
userSpaceIndex *spaceidindex.Index
groupSpaceIndex *spaceidindex.Index
spaceTypeIndex *spaceidindex.Index
}
// NewDefault returns an instance with default components
@@ -169,16 +179,34 @@ func New(o *options.Options, lu *lookup.Lookup, p Permissions, tp Tree, es event
if o.LockCycleDurationFactor != 0 {
filelocks.SetLockCycleDurationFactor(o.LockCycleDurationFactor)
}
userSpaceIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-user-id")
err = userSpaceIndex.Init()
if err != nil {
return nil, err
}
groupSpaceIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-group-id")
err = groupSpaceIndex.Init()
if err != nil {
return nil, err
}
spaceTypeIndex := spaceidindex.New(filepath.Join(o.Root, "indexes"), "by-type")
err = spaceTypeIndex.Init()
if err != nil {
return nil, err
}
fs := &Decomposedfs{
tp: tp,
lu: lu,
o: o,
p: p,
chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")),
stream: es,
cache: cache.GetStatCache(o.StatCache.Store, o.StatCache.Nodes, o.StatCache.Database, "stat", time.Duration(o.StatCache.TTL)*time.Second, o.StatCache.Size),
UserCache: ttlcache.NewCache(),
tp: tp,
lu: lu,
o: o,
p: p,
chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")),
stream: es,
cache: cache.GetStatCache(o.StatCache.Store, o.StatCache.Nodes, o.StatCache.Database, "stat", time.Duration(o.StatCache.TTL)*time.Second, o.StatCache.Size),
UserCache: ttlcache.NewCache(),
userSpaceIndex: userSpaceIndex,
groupSpaceIndex: groupSpaceIndex,
spaceTypeIndex: spaceTypeIndex,
}
if o.AsyncFileUploads {
@@ -187,7 +215,7 @@ func New(o *options.Options, lu *lookup.Lookup, p Permissions, tp Tree, es event
return nil, errors.New("need nats for async file processing")
}
ch, err := events.Consume(fs.stream, "dcfs", events.PostprocessingFinished{}, events.PostprocessingStepFinished{})
ch, err := events.Consume(fs.stream, "dcfs", _registeredEvents...)
if err != nil {
return nil, err
}
@@ -285,7 +313,34 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to publish UploadReady event")
}
case events.RestartPostprocessing:
up, err := upload.Get(ctx, ev.UploadID, fs.lu, fs.tp, fs.o.Root, fs.stream, fs.o.AsyncFileUploads, fs.o.Tokens)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to get upload")
continue
}
n, err := node.ReadNode(ctx, fs.lu, up.Info.Storage["SpaceRoot"], up.Info.Storage["NodeId"], false, nil, true)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read node")
continue
}
s, err := up.URL(up.Ctx)
if err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not create url")
continue
}
// restart postprocessing
if err := events.Publish(fs.stream, events.BytesReceived{
UploadID: up.Info.ID,
URL: s,
SpaceOwner: n.SpaceOwnerOrManager(up.Ctx),
ExecutingUser: &user.User{Id: &user.UserId{OpaqueId: "postprocessing-restart"}}, // send nil instead?
ResourceID: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID},
Filename: up.Info.Storage["NodeName"],
Filesize: uint64(up.Info.Size),
}); err != nil {
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to publish BytesReceived event")
}
case events.PostprocessingStepFinished:
if ev.FinishedStep != events.PPStepAntivirus {
// atm we are only interested in antivirus results
@@ -515,17 +570,6 @@ func (fs *Decomposedfs) CreateHome(ctx context.Context) (err error) {
return nil
}
// The os not exists error is buried inside the xattr error,
// so we cannot just use os.IsNotExists().
func isAlreadyExists(err error) bool {
if xerr, ok := err.(*os.LinkError); ok {
if serr, ok2 := xerr.Err.(syscall.Errno); ok2 {
return serr == syscall.EEXIST
}
}
return false
}
// GetHome is called to look up the home path for a user
// It is NOT supposed to return the internal path but the external path
func (fs *Decomposedfs) GetHome(ctx context.Context) (string, error) {

View File

@@ -20,7 +20,6 @@ package decomposedfs
import (
"context"
"os"
"path/filepath"
"strings"
@@ -220,14 +219,12 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference
switch {
case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER:
// remove from user index
userIDPath := filepath.Join(fs.o.Root, "indexes", "by-user-id", g.Grantee.GetUserId().GetOpaqueId(), grantNode.SpaceID)
if err := os.Remove(userIDPath); err != nil {
if err := fs.userSpaceIndex.Remove(g.Grantee.GetUserId().GetOpaqueId(), grantNode.SpaceID); err != nil {
return err
}
case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP:
// remove from group index
userIDPath := filepath.Join(fs.o.Root, "indexes", "by-group-id", g.Grantee.GetGroupId().GetOpaqueId(), grantNode.SpaceID)
if err := os.Remove(userIDPath); err != nil {
if err := fs.groupSpaceIndex.Remove(g.Grantee.GetGroupId().GetOpaqueId(), grantNode.SpaceID); err != nil {
return err
}
}

View File

@@ -284,7 +284,7 @@ func refFromCS3(b []byte) (*provider.Reference, error) {
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string) bool) (err error) {
// Acquire a read log on the source node
// write lock existing node before reading treesize or tree time
f, err := lockedfile.Open(lu.MetadataBackend().MetadataPath(src))
lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(src), os.O_RDONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
@@ -293,7 +293,7 @@ func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter f
return errors.Wrap(err, "xattrs: Unable to lock source to read")
}
defer func() {
rerr := f.Close()
rerr := lock.Close()
// if err is non nil we do not overwrite that
if err == nil {
@@ -301,7 +301,7 @@ func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter f
}
}()
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, f)
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, lock)
}
// CopyMetadataWithSourceLock copies all extended attributes from source to target.
@@ -312,11 +312,11 @@ func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, ta
switch {
case lockedSource == nil:
return errors.New("no lock provided")
case lockedSource.File.Name() != lu.MetadataBackend().MetadataPath(sourcePath):
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(sourcePath):
return errors.New("lockpath does not match filepath")
}
attrs, err := lu.metadataBackend.AllWithLockedSource(ctx, sourcePath, lockedSource)
attrs, err := lu.metadataBackend.All(ctx, sourcePath)
if err != nil {
return err
}

View File

@@ -20,7 +20,9 @@ package metadata
import (
"context"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"
@@ -28,6 +30,7 @@ import (
"time"
"github.com/cs3org/reva/v2/pkg/storage/cache"
"github.com/google/renameio/v2"
"github.com/pkg/xattr"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
@@ -142,74 +145,60 @@ func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, set
span.End()
}()
lockPath := b.LockfilePath(path)
metaPath := b.MetadataPath(path)
if acquireLock {
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
f, err = lockedfile.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
} else {
_, subspan := tracer.Start(ctx, "os.OpenFile")
f, err = os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0600)
f, err = lockedfile.OpenFile(lockPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
defer f.Close()
}
if err != nil {
return err
}
defer f.Close()
// Invalidate cache early
_, subspan := tracer.Start(ctx, "metaCache.RemoveMetadata")
_ = b.metaCache.RemoveMetadata(b.cacheKey(path))
subspan.End()
// Read current state
_, subspan = tracer.Start(ctx, "io.ReadAll")
_, subspan := tracer.Start(ctx, "os.ReadFile")
var msgBytes []byte
msgBytes, err = io.ReadAll(f)
msgBytes, err = os.ReadFile(metaPath)
subspan.End()
if err != nil {
return err
}
attribs := map[string][]byte{}
if len(msgBytes) > 0 {
switch {
case err != nil:
if !errors.Is(err, fs.ErrNotExist) {
return err
}
case len(msgBytes) == 0:
// ugh. an empty file? bail out
return errors.New("encountered empty metadata file")
default:
// only unmarshal if we read data
err = msgpack.Unmarshal(msgBytes, &attribs)
if err != nil {
return err
}
}
// set new metadata
// prepare metadata
for key, val := range setAttribs {
attribs[key] = val
}
for _, key := range deleteAttribs {
delete(attribs, key)
}
// Truncate file
_, err = f.Seek(0, io.SeekStart)
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Truncate")
err = f.Truncate(0)
subspan.End()
if err != nil {
return err
}
// Write new metadata to file
var d []byte
d, err = msgpack.Marshal(attribs)
if err != nil {
return err
}
_, subspan = tracer.Start(ctx, "f.Write")
_, err = f.Write(d)
subspan.End()
// overwrite file atomically
_, subspan = tracer.Start(ctx, "renameio.Writefile")
err = renameio.WriteFile(metaPath, d, 0600)
if err != nil {
return err
}
subspan.End()
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
@@ -227,9 +216,13 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
}
metaPath := b.MetadataPath(path)
var msgBytes []byte
if source == nil {
_, subspan := tracer.Start(ctx, "lockedfile.Open")
source, err = lockedfile.Open(metaPath)
// // No cached entry found. Read from storage and store in cache
_, subspan := tracer.Start(ctx, "os.OpenFile")
// source, err = lockedfile.Open(metaPath)
source, err = os.Open(metaPath)
subspan.End()
// // No cached entry found. Read from storage and store in cache
if err != nil {
@@ -246,12 +239,16 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
return attribs, nil // no attributes set yet
}
}
defer source.(*lockedfile.File).Close()
_, subspan = tracer.Start(ctx, "io.ReadAll")
msgBytes, err = io.ReadAll(source)
source.(*os.File).Close()
subspan.End()
} else {
_, subspan := tracer.Start(ctx, "io.ReadAll")
msgBytes, err = io.ReadAll(source)
subspan.End()
}
_, subspan := tracer.Start(ctx, "io.ReadAll")
msgBytes, err := io.ReadAll(source)
subspan.End()
if err != nil {
return nil, err
}
@@ -262,7 +259,7 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
}
}
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
_, subspan := tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
if err != nil {
@@ -273,7 +270,9 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
}
// IsMetaFile returns whether the given path represents a meta file
func (MessagePackBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".mpk") }
func (MessagePackBackend) IsMetaFile(path string) bool {
return strings.HasSuffix(path, ".mpk") || strings.HasSuffix(path, ".mlock")
}
// Purge purges the data of a given path
func (b MessagePackBackend) Purge(path string) error {
@@ -304,6 +303,9 @@ func (b MessagePackBackend) Rename(oldPath, newPath string) error {
// MetadataPath returns the path of the file holding the metadata for the given path
func (MessagePackBackend) MetadataPath(path string) string { return path + ".mpk" }
// LockfilePath returns the path of the lock file
func (MessagePackBackend) LockfilePath(path string) string { return path + ".mlock" }
func (b MessagePackBackend) cacheKey(path string) string {
// rootPath is guaranteed to have no trailing slash
// the cache key shouldn't begin with a slash as some stores drop it which can cause

View File

@@ -52,6 +52,7 @@ type Backend interface {
Rename(oldPath, newPath string) error
IsMetaFile(path string) bool
MetadataPath(path string) string
LockfilePath(path string) string
AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error)
}
@@ -110,6 +111,9 @@ func (NullBackend) Rename(oldPath, newPath string) error { return errUnconfigure
// MetadataPath returns the path of the file holding the metadata for the given path
func (NullBackend) MetadataPath(path string) string { return "" }
// LockfilePath returns the path of the lock file
func (NullBackend) LockfilePath(path string) string { return "" }
// AllWithLockedSource reads all extended attributes from the given reader
// The path argument is used for storing the data in the cache
func (NullBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {

View File

@@ -24,6 +24,7 @@ import (
"os"
"path/filepath"
"strconv"
"strings"
"github.com/cs3org/reva/v2/pkg/storage/utils/filelocks"
"github.com/pkg/errors"
@@ -156,7 +157,7 @@ func (XattrsBackend) Remove(ctx context.Context, filePath string, key string) (e
}
// IsMetaFile returns whether the given path represents a meta file
func (XattrsBackend) IsMetaFile(path string) bool { return false }
func (XattrsBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".meta.lock") }
// Purge purges the data of a given path
func (XattrsBackend) Purge(path string) error { return nil }
@@ -167,6 +168,9 @@ func (XattrsBackend) Rename(oldPath, newPath string) error { return nil }
// MetadataPath returns the path of the file holding the metadata for the given path
func (XattrsBackend) MetadataPath(path string) string { return path }
// LockfilePath returns the path of the lock file
func (XattrsBackend) LockfilePath(path string) string { return path + ".mlock" }
func cleanupLockfile(f *lockedfile.File) {
_ = f.Close()
_ = os.Remove(f.Name())

View File

@@ -0,0 +1,120 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"os"
"path/filepath"
"github.com/shamaton/msgpack/v2"
)
// Migration0004 migrates the directory tree based space indexes to messagepack
func (m *Migrator) Migration0004() (Result, error) {
root := m.lu.InternalRoot()
// migrate user indexes
users, err := os.ReadDir(filepath.Join(root, "indexes", "by-user-id"))
if err != nil {
m.log.Warn().Err(err).Msg("error listing user indexes")
}
for _, user := range users {
if !user.IsDir() {
continue
}
id := user.Name()
indexPath := filepath.Join(root, "indexes", "by-user-id", id+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-user-id", id)
cacheKey := "by-user-id:" + id
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err := migrateSpaceIndex(indexPath, dirIndexPath, cacheKey)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate group indexes
groups, err := os.ReadDir(filepath.Join(root, "indexes", "by-group-id"))
if err != nil {
m.log.Warn().Err(err).Msg("error listing group indexes")
}
for _, group := range groups {
if !group.IsDir() {
continue
}
id := group.Name()
indexPath := filepath.Join(root, "indexes", "by-group-id", id+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-group-id", id)
cacheKey := "by-group-id:" + id
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err := migrateSpaceIndex(indexPath, dirIndexPath, cacheKey)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate project indexes
for _, spaceType := range []string{"personal", "project", "share"} {
indexPath := filepath.Join(root, "indexes", "by-type", spaceType+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-type", spaceType)
cacheKey := "by-type:" + spaceType
_, err := os.Stat(dirIndexPath)
if err != nil {
continue
}
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err = migrateSpaceIndex(indexPath, dirIndexPath, cacheKey)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
m.log.Info().Msg("done.")
return resultSucceeded, nil
}
func migrateSpaceIndex(indexPath, dirIndexPath, cacheKey string) error {
links := map[string][]byte{}
m, err := filepath.Glob(dirIndexPath + "/*")
if err != nil {
return err
}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
links[filepath.Base(match)] = []byte(link)
}
// rewrite index as file
d, err := msgpack.Marshal(links)
if err != nil {
return err
}
err = os.WriteFile(indexPath, d, 0600)
if err != nil {
return err
}
return os.RemoveAll(dirIndexPath)
}

View File

@@ -29,7 +29,7 @@ import (
"github.com/rs/zerolog"
)
var allMigrations = []string{"0001", "0002", "0003"}
var allMigrations = []string{"0001", "0002", "0003", "0004"}
const (
resultFailed = "failed"

View File

@@ -70,7 +70,7 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen
np := n.InternalPath()
if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil {
for i := range items {
if fs.lu.MetadataBackend().IsMetaFile(items[i]) {
if fs.lu.MetadataBackend().IsMetaFile(items[i]) || strings.HasSuffix(items[i], ".mlock") {
continue
}
@@ -237,7 +237,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
attributeName == prefixes.BlobsizeAttr
})
if err != nil {
return errtypes.InternalError("failed to copy blob xattrs to version node")
return errtypes.InternalError("failed to copy blob xattrs to version node: " + err.Error())
}
// remember mtime from node as new revision mtime
@@ -256,7 +256,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
attributeName == prefixes.BlobsizeAttr
})
if err != nil {
return errtypes.InternalError("failed to copy blob xattrs to old revision to node")
return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error())
}
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr)

View File

@@ -0,0 +1,153 @@
package spaceidindex
import (
"io"
"os"
"path/filepath"
"time"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
)
// Index holds space id indexes
type Index struct {
root string
name string
cache mtimesyncedcache.Cache[string, map[string]string]
}
type readWriteCloseSeekTruncater interface {
io.ReadWriteCloser
io.Seeker
Truncate(int64) error
}
// New returns a new index instance
func New(root, name string) *Index {
return &Index{
root: root,
name: name,
}
}
// Init initializes the index and makes sure it can be used
func (i *Index) Init() error {
// Make sure to work on an existing tree
return os.MkdirAll(filepath.Join(i.root, i.name), 0700)
}
// Load returns the content of an index
func (i *Index) Load(index string) (map[string]string, error) {
indexPath := filepath.Join(i.root, i.name, index+".mpk")
fi, err := os.Stat(indexPath)
if err != nil {
return nil, err
}
return i.readSpaceIndex(indexPath, i.name+":"+index, fi.ModTime())
}
// Add adds an entry to an index
func (i *Index) Add(index, key string, value string) error {
return i.updateIndex(index, map[string]string{key: value}, []string{})
}
// Remove removes an entry from the index
func (i *Index) Remove(index, key string) error {
return i.updateIndex(index, map[string]string{}, []string{key})
}
func (i *Index) updateIndex(index string, addLinks map[string]string, removeLinks []string) error {
indexPath := filepath.Join(i.root, i.name, index+".mpk")
var err error
// acquire writelock
var f readWriteCloseSeekTruncater
f, err = lockedfile.OpenFile(indexPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors.Wrap(err, "unable to lock index to write")
}
defer func() {
rerr := f.Close()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
// Read current state
msgBytes, err := io.ReadAll(f)
if err != nil {
return err
}
links := map[string]string{}
if len(msgBytes) > 0 {
err = msgpack.Unmarshal(msgBytes, &links)
if err != nil {
return err
}
}
// set new metadata
for key, val := range addLinks {
links[key] = val
}
for _, key := range removeLinks {
delete(links, key)
}
// Truncate file
_, err = f.Seek(0, io.SeekStart)
if err != nil {
return err
}
err = f.Truncate(0)
if err != nil {
return err
}
// Write new metadata to file
d, err := msgpack.Marshal(links)
if err != nil {
return errors.Wrap(err, "unable to marshal index")
}
_, err = f.Write(d)
if err != nil {
return errors.Wrap(err, "unable to write index")
}
return nil
}
func (i *Index) readSpaceIndex(indexPath, cacheKey string, mtime time.Time) (map[string]string, error) {
return i.cache.LoadOrStore(cacheKey, mtime, func() (map[string]string, error) {
// Acquire a read log on the index file
f, err := lockedfile.Open(indexPath)
if err != nil {
return nil, errors.Wrap(err, "unable to lock index to read")
}
defer func() {
rerr := f.Close()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
// Read current state
msgBytes, err := io.ReadAll(f)
if err != nil {
return nil, errors.Wrap(err, "unable to read index")
}
links := map[string]string{}
if len(msgBytes) > 0 {
err = msgpack.Unmarshal(msgBytes, &links)
if err != nil {
return nil, errors.Wrap(err, "unable to parse index")
}
}
return links, nil
})
}

View File

@@ -298,31 +298,13 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
}
matches := map[string]struct{}{}
var allMatches map[string]string
var err error
if requestedUserID != nil {
allMatches := map[string]string{}
indexPath := filepath.Join(fs.o.Root, "indexes", "by-user-id", requestedUserID.GetOpaqueId())
fi, err := os.Stat(indexPath)
if err == nil {
allMatches, err = fs.spaceIDCache.LoadOrStore("by-user-id:"+requestedUserID.GetOpaqueId(), fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-user-id", requestedUserID.GetOpaqueId(), "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
}
allMatches, err = fs.userSpaceIndex.Load(requestedUserID.GetOpaqueId())
if err != nil {
return nil, err
return nil, errors.Wrap(err, "error reading user index")
}
if nodeID == spaceIDAny {
@@ -344,29 +326,12 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
}
for _, group := range user.Groups {
indexPath := filepath.Join(fs.o.Root, "indexes", "by-group-id", group)
fi, err := os.Stat(indexPath)
allMatches, err = fs.groupSpaceIndex.Load(group)
if err != nil {
continue
}
allMatches, err := fs.spaceIDCache.LoadOrStore("by-group-id:"+group, fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-group-id", group, "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
if os.IsNotExist(err) {
continue // no spaces for this group
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "error reading group index")
}
if nodeID == spaceIDAny {
@@ -381,33 +346,22 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
}
if requestedUserID == nil {
if _, ok := spaceTypes[spaceTypeAny]; ok {
// TODO do not hardcode dirs
spaceTypes = map[string]struct{}{
"personal": {},
"project": {},
"share": {},
}
}
for spaceType := range spaceTypes {
indexPath := filepath.Join(fs.o.Root, "indexes", "by-type")
if spaceType != spaceTypeAny {
indexPath = filepath.Join(indexPath, spaceType)
}
fi, err := os.Stat(indexPath)
allMatches, err = fs.spaceTypeIndex.Load(spaceType)
if err != nil {
continue
}
allMatches, err := fs.spaceIDCache.LoadOrStore("by-type:"+spaceType, fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-type", spaceType, "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
if os.IsNotExist(err) {
continue // no spaces for this space type
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "error reading type index")
}
if nodeID == spaceIDAny {
@@ -764,13 +718,12 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return errtypes.NewErrtypeFromStatus(status.NewInvalid(ctx, "can't purge enabled space"))
}
// TODO invalidate ALL indexes in msgpack, not only by type
spaceType, err := n.XattrString(ctx, prefixes.SpaceTypeAttr)
if err != nil {
return err
}
// remove type index
spaceTypePath := filepath.Join(fs.o.Root, "indexes", "by-type", spaceType, spaceID)
if err := os.Remove(spaceTypePath); err != nil {
if err := fs.spaceTypeIndex.Remove(spaceType, spaceID); err != nil {
return err
}
@@ -817,80 +770,18 @@ func (fs *Decomposedfs) updateIndexes(ctx context.Context, grantee *provider.Gra
}
func (fs *Decomposedfs) linkSpaceByUser(ctx context.Context, userID, spaceID string) error {
if userID == "" {
return nil
}
// create user index dir
// TODO: pathify userID
if err := os.MkdirAll(filepath.Join(fs.o.Root, "indexes", "by-user-id", userID), 0700); err != nil {
return err
}
err := os.Symlink("../../../spaces/"+lookup.Pathify(spaceID, 1, 2)+"/nodes/"+lookup.Pathify(spaceID, 4, 2), filepath.Join(fs.o.Root, "indexes/by-user-id", userID, spaceID))
if err != nil {
if isAlreadyExists(err) {
appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("user-id", userID).Msg("symlink already exists")
// FIXME: is it ok to wipe this err if the symlink already exists?
err = nil //nolint
} else {
// TODO how should we handle error cases here?
appctx.GetLogger(ctx).Error().Err(err).Str("space", spaceID).Str("user-id", userID).Msg("could not create symlink")
}
}
return nil
target := "../../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
return fs.userSpaceIndex.Add(userID, spaceID, target)
}
func (fs *Decomposedfs) linkSpaceByGroup(ctx context.Context, groupID, spaceID string) error {
if groupID == "" {
return nil
}
// create group index dir
// TODO: pathify groupid
if err := os.MkdirAll(filepath.Join(fs.o.Root, "indexes", "by-group-id", groupID), 0700); err != nil {
return err
}
err := os.Symlink("../../../spaces/"+lookup.Pathify(spaceID, 1, 2)+"/nodes/"+lookup.Pathify(spaceID, 4, 2), filepath.Join(fs.o.Root, "indexes/by-group-id", groupID, spaceID))
if err != nil {
if isAlreadyExists(err) {
appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("group-id", groupID).Msg("symlink already exists")
// FIXME: is it ok to wipe this err if the symlink already exists?
err = nil //nolint
} else {
// TODO how should we handle error cases here?
appctx.GetLogger(ctx).Error().Err(err).Str("space", spaceID).Str("group-id", groupID).Msg("could not create symlink")
}
}
return nil
target := "../../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
return fs.groupSpaceIndex.Add(groupID, spaceID, target)
}
// TODO: implement linkSpaceByGroup
func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType string, spaceID string) error {
if spaceType == "" {
return nil
}
// create space type dir
if err := os.MkdirAll(filepath.Join(fs.o.Root, "indexes", "by-type", spaceType), 0700); err != nil {
return err
}
// link space in spacetypes
err := os.Symlink("../../../spaces/"+lookup.Pathify(spaceID, 1, 2)+"/nodes/"+lookup.Pathify(spaceID, 4, 2), filepath.Join(fs.o.Root, "indexes", "by-type", spaceType, spaceID))
if err != nil {
if isAlreadyExists(err) {
appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("symlink already exists")
// FIXME: is it ok to wipe this err if the symlink already exists?
} else {
// TODO how should we handle error cases here?
appctx.GetLogger(ctx).Error().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("could not create symlink")
return err
}
}
// touch index root to invalidate caches
now := time.Now()
return os.Chtimes(filepath.Join(fs.o.Root, "indexes", "by-type"), now, now)
target := "../../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
return fs.spaceTypeIndex.Add(spaceType, spaceID, target)
}
func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, checkPermissions bool) (*provider.StorageSpace, error) {

View File

@@ -40,7 +40,6 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/cs3org/reva/v2/pkg/storage/utils/filelocks"
"github.com/cs3org/reva/v2/pkg/utils"
"github.com/google/uuid"
"github.com/pkg/errors"
@@ -750,17 +749,8 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
// lock parent before reading treesize or tree time
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
var parentFilename string
switch t.lookup.MetadataBackend().(type) {
case metadata.MessagePackBackend:
parentFilename = t.lookup.MetadataBackend().MetadataPath(n.ParentPath())
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
case metadata.XattrsBackend:
// we have to use dedicated lockfiles to lock directories
// this only works because the xattr backend also locks folders with separate lock files
parentFilename = n.ParentPath() + filelocks.LockFileSuffix
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
}
parentFilename := t.lookup.MetadataBackend().LockfilePath(n.ParentPath())
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
if err != nil {
sublog.Error().Err(err).
@@ -777,7 +767,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
}()
if n, err = n.ParentWithReader(ctx, f); err != nil {
if n, err = n.Parent(ctx); err != nil {
sublog.Error().Err(err).
Msg("Propagation failed. Could not read parent node.")
return err

View File

@@ -38,7 +38,6 @@ import (
"github.com/cs3org/reva/v2/pkg/logger"
"github.com/cs3org/reva/v2/pkg/storage/utils/chunking"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
@@ -329,23 +328,17 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File,
}
// create and write lock new node metadata
f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().MetadataPath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600)
f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
switch upload.lu.MetadataBackend().(type) {
case metadata.MessagePackBackend:
// for the ini and metadata backend we also need to touch the actual node file here.
// it stores the mtime of the resource, which must not change when we update the ini file
h, err := os.OpenFile(n.InternalPath(), os.O_CREATE, 0600)
if err != nil {
return f, err
}
h.Close()
case metadata.XattrsBackend:
// nothing to do
// we also need to touch the actual node file here it stores the mtime of the resource
h, err := os.OpenFile(n.InternalPath(), os.O_CREATE, 0600)
if err != nil {
return f, err
}
h.Close()
if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, false, 0, fsize); err != nil {
return f, err
@@ -403,7 +396,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint
targetPath := n.InternalPath()
// write lock existing node before reading treesize or tree time
f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().MetadataPath(targetPath), os.O_RDWR, 0600)
f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().LockfilePath(targetPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return nil, err
}

View File

@@ -240,7 +240,23 @@ func (c *ConnWithReconnect) StartTLS(*tls.Config) error {
}
// Close implements the ldap.Client interface
func (c *ConnWithReconnect) Close() {}
func (c *ConnWithReconnect) Close() (err error) {
conn, err := c.getConnection()
if err != nil {
return err
}
return conn.Close()
}
func (c *ConnWithReconnect) GetLastError() error {
conn, err := c.getConnection()
if err != nil {
return err
}
return conn.GetLastError()
}
// IsClosing implements the ldap.Client interface
func (c *ConnWithReconnect) IsClosing() bool {
@@ -304,3 +320,8 @@ func (c *ConnWithReconnect) TLSConnectionState() (tls.ConnectionState, bool) {
func (c *ConnWithReconnect) Unbind() error {
return ldap.NewError(ldap.LDAPResultNotSupported, fmt.Errorf("not implemented"))
}
// DirSync implements the ldap.Client interface
func (c *ConnWithReconnect) DirSync(searchRequest *ldap.SearchRequest, flags, maxAttrCount int64, cookie []byte) (*ldap.SearchResult, error) {
return nil, ldap.NewError(ldap.LDAPResultNotSupported, fmt.Errorf("not implemented"))
}

View File

@@ -112,6 +112,16 @@ func (s *stream) Publish(topic string, msg interface{}, opts ...events.PublishOp
}
// publish the event to the topic's channel
// publish synchronously if configured
if s.opts.SyncPublish {
_, err := s.natsJetStreamCtx.Publish(event.Topic, bytes)
if err != nil {
err = errors.Wrap(err, "Error publishing message to topic")
}
return err
}
// publish asynchronously by default
if _, err := s.natsJetStreamCtx.PublishAsync(event.Topic, bytes); err != nil {
return errors.Wrap(err, "Error publishing message to topic")
}

View File

@@ -8,11 +8,12 @@ import (
// Options which are used to configure the nats stream.
type Options struct {
ClusterID string
ClientID string
Address string
TLSConfig *tls.Config
Logger logger.Logger
ClusterID string
ClientID string
Address string
TLSConfig *tls.Config
Logger logger.Logger
SyncPublish bool
}
// Option is a function which configures options.
@@ -52,3 +53,10 @@ func Logger(log logger.Logger) Option {
o.Logger = log
}
}
// SynchronousPublish allows using a synchronous publishing instead of the default asynchronous
func SynchronousPublish(sync bool) Option {
return func(o *Options) {
o.SyncPublish = sync
}
}

5
vendor/github.com/google/renameio/v2/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,5 @@
linters:
disable:
- errcheck
enable:
- gofmt

28
vendor/github.com/google/renameio/v2/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,28 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google.com/conduct/).

202
vendor/github.com/google/renameio/v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

74
vendor/github.com/google/renameio/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,74 @@
[![Build Status](https://github.com/google/renameio/workflows/Test/badge.svg)](https://github.com/google/renameio/actions?query=workflow%3ATest)
[![PkgGoDev](https://pkg.go.dev/badge/github.com/google/renameio)](https://pkg.go.dev/github.com/google/renameio)
[![Go Report Card](https://goreportcard.com/badge/github.com/google/renameio)](https://goreportcard.com/report/github.com/google/renameio)
The `renameio` Go package provides a way to atomically create or replace a file or
symbolic link.
## Atomicity vs durability
`renameio` concerns itself *only* with atomicity, i.e. making sure applications
never see unexpected file content (a half-written file, or a 0-byte file).
As a practical example, consider https://manpages.debian.org/: if there is a
power outage while the site is updating, we are okay with losing the manpages
which were being rendered at the time of the power outage. They will be added in
a later run of the software. We are not okay with having a manpage replaced by a
0-byte file under any circumstances, though.
## Advantages of this package
There are other packages for atomically replacing files, and sometimes ad-hoc
implementations can be found in programs.
A naive approach to the problem is to create a temporary file followed by a call
to `os.Rename()`. However, there are a number of subtleties which make the
correct sequence of operations hard to identify:
* The temporary file should be removed when an error occurs, but a remove must
not be attempted if the rename succeeded, as a new file might have been
created with the same name. This renders a throwaway `defer
os.Remove(t.Name())` insufficient; state must be kept.
* The temporary file must be created on the same file system (same mount point)
for the rename to work, but the TMPDIR environment variable should still be
respected, e.g. to direct temporary files into a separate directory outside of
the webservers document root but on the same file system.
* On POSIX operating systems, the
[`fsync`](https://manpages.debian.org/stretch/manpages-dev/fsync.2) system
call must be used to ensure that the `os.Rename()` call will not result in a
0-length file.
This package attempts to get all of these details right, provides an intuitive,
yet flexible API and caters to use-cases where high performance is required.
## Major changes in v2
With major version renameio/v2, `renameio.WriteFile` changes the way that
permissions are handled. Before version 2, files were created with the
permissions passed to the function, ignoring the
[umask](https://en.wikipedia.org/wiki/Umask). From version 2 onwards, these
permissions are further modified by process' umask (usually the user's
preferred umask).
If you were relying on the umask being ignored, add the
`renameio.IgnoreUmask()` option to your `renameio.WriteFile` calls when
upgrading to v2.
## Windows support
It is [not possible to reliably write files atomically on
Windows](https://github.com/golang/go/issues/22397#issuecomment-498856679), and
[`chmod` is not reliably supported by the Go standard library on
Windows](https://github.com/google/renameio/issues/17).
As it is not possible to provide a correct implementation, this package does not
export any functions on Windows.
## Disclaimer
This is not an official Google product (experimental or otherwise), it
is just code that happens to be owned by Google.
This project is not affiliated with the Go project.

21
vendor/github.com/google/renameio/v2/doc.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package renameio provides a way to atomically create or replace a file or
// symbolic link.
//
// Caveat: this package requires the file system rename(2) implementation to be
// atomic. Notably, this is not the case when using NFS with multiple clients:
// https://stackoverflow.com/a/41396801
package renameio

79
vendor/github.com/google/renameio/v2/option.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
// Copyright 2021 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package renameio
import "os"
// Option is the interface implemented by all configuration function return
// values.
type Option interface {
apply(*config)
}
type optionFunc func(*config)
func (fn optionFunc) apply(cfg *config) {
fn(cfg)
}
// WithTempDir configures the directory to use for temporary, uncommitted
// files. Suitable for using a cached directory from
// TempDir(filepath.Base(path)).
func WithTempDir(dir string) Option {
return optionFunc(func(cfg *config) {
cfg.dir = dir
})
}
// WithPermissions sets the permissions for the target file while respecting
// the umask(2). Bits set in the umask are removed from the permissions given
// unless IgnoreUmask is used.
func WithPermissions(perm os.FileMode) Option {
perm &= os.ModePerm
return optionFunc(func(cfg *config) {
cfg.createPerm = perm
})
}
// IgnoreUmask causes the permissions configured using WithPermissions to be
// applied directly without applying the umask.
func IgnoreUmask() Option {
return optionFunc(func(cfg *config) {
cfg.ignoreUmask = true
})
}
// WithStaticPermissions sets the permissions for the target file ignoring the
// umask(2). This is equivalent to calling Chmod() on the file handle or using
// WithPermissions in combination with IgnoreUmask.
func WithStaticPermissions(perm os.FileMode) Option {
perm &= os.ModePerm
return optionFunc(func(cfg *config) {
cfg.chmod = &perm
})
}
// WithExistingPermissions configures the file creation to try to use the
// permissions from an already existing target file. If the target file doesn't
// exist yet or is not a regular file the default permissions are used unless
// overridden using WithPermissions or WithStaticPermissions.
func WithExistingPermissions() Option {
return optionFunc(func(c *config) {
c.attemptPermCopy = true
})
}

283
vendor/github.com/google/renameio/v2/tempfile.go generated vendored Normal file
View File

@@ -0,0 +1,283 @@
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package renameio
import (
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
)
// Default permissions for created files
const defaultPerm os.FileMode = 0o600
// nextrandom is a function generating a random number.
var nextrandom = rand.Int63
// openTempFile creates a randomly named file and returns an open handle. It is
// similar to ioutil.TempFile except that the directory must be given, the file
// permissions can be controlled and patterns in the name are not supported.
// The name is always suffixed with a random number.
func openTempFile(dir, name string, perm os.FileMode) (*os.File, error) {
prefix := filepath.Join(dir, name)
for attempt := 0; ; {
// Generate a reasonably random name which is unlikely to already
// exist. O_EXCL ensures that existing files generate an error.
name := prefix + strconv.FormatInt(nextrandom(), 10)
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
if !os.IsExist(err) {
return f, err
}
if attempt++; attempt > 10000 {
return nil, &os.PathError{
Op: "tempfile",
Path: name,
Err: os.ErrExist,
}
}
}
}
// TempDir checks whether os.TempDir() can be used as a temporary directory for
// later atomically replacing files within dest. If no (os.TempDir() resides on
// a different mount point), dest is returned.
//
// Note that the returned value ceases to be valid once either os.TempDir()
// changes (e.g. on Linux, once the TMPDIR environment variable changes) or the
// file system is unmounted.
func TempDir(dest string) string {
return tempDir("", filepath.Join(dest, "renameio-TempDir"))
}
func tempDir(dir, dest string) string {
if dir != "" {
return dir // caller-specified directory always wins
}
// Chose the destination directory as temporary directory so that we
// definitely can rename the file, for which both temporary and destination
// file need to point to the same mount point.
fallback := filepath.Dir(dest)
// The user might have overridden the os.TempDir() return value by setting
// the TMPDIR environment variable.
tmpdir := os.TempDir()
testsrc, err := ioutil.TempFile(tmpdir, "."+filepath.Base(dest))
if err != nil {
return fallback
}
cleanup := true
defer func() {
if cleanup {
os.Remove(testsrc.Name())
}
}()
testsrc.Close()
testdest, err := ioutil.TempFile(filepath.Dir(dest), "."+filepath.Base(dest))
if err != nil {
return fallback
}
defer os.Remove(testdest.Name())
testdest.Close()
if err := os.Rename(testsrc.Name(), testdest.Name()); err != nil {
return fallback
}
cleanup = false // testsrc no longer exists
return tmpdir
}
// PendingFile is a pending temporary file, waiting to replace the destination
// path in a call to CloseAtomicallyReplace.
type PendingFile struct {
*os.File
path string
done bool
closed bool
}
// Cleanup is a no-op if CloseAtomicallyReplace succeeded, and otherwise closes
// and removes the temporary file.
//
// This method is not safe for concurrent use by multiple goroutines.
func (t *PendingFile) Cleanup() error {
if t.done {
return nil
}
// An error occurred. Close and remove the tempfile. Errors are returned for
// reporting, there is nothing the caller can recover here.
var closeErr error
if !t.closed {
closeErr = t.Close()
}
if err := os.Remove(t.Name()); err != nil {
return err
}
t.done = true
return closeErr
}
// CloseAtomicallyReplace closes the temporary file and atomically replaces
// the destination file with it, i.e., a concurrent open(2) call will either
// open the file previously located at the destination path (if any), or the
// just written file, but the file will always be present.
//
// This method is not safe for concurrent use by multiple goroutines.
func (t *PendingFile) CloseAtomicallyReplace() error {
// Even on an ordered file system (e.g. ext4 with data=ordered) or file
// systems with write barriers, we cannot skip the fsync(2) call as per
// Theodore Ts'o (ext2/3/4 lead developer):
//
// > data=ordered only guarantees the avoidance of stale data (e.g., the previous
// > contents of a data block showing up after a crash, where the previous data
// > could be someone's love letters, medical records, etc.). Without the fsync(2)
// > a zero-length file is a valid and possible outcome after the rename.
if err := t.Sync(); err != nil {
return err
}
t.closed = true
if err := t.Close(); err != nil {
return err
}
if err := os.Rename(t.Name(), t.path); err != nil {
return err
}
t.done = true
return nil
}
// TempFile creates a temporary file destined to atomically creating or
// replacing the destination file at path.
//
// If dir is the empty string, TempDir(filepath.Base(path)) is used. If you are
// going to write a large number of files to the same file system, store the
// result of TempDir(filepath.Base(path)) and pass it instead of the empty
// string.
//
// The file's permissions will be 0600. You can change these by explicitly
// calling Chmod on the returned PendingFile.
func TempFile(dir, path string) (*PendingFile, error) {
return NewPendingFile(path, WithTempDir(dir), WithStaticPermissions(defaultPerm))
}
type config struct {
dir, path string
createPerm os.FileMode
attemptPermCopy bool
ignoreUmask bool
chmod *os.FileMode
}
// NewPendingFile creates a temporary file destined to atomically creating or
// replacing the destination file at path.
//
// TempDir(filepath.Base(path)) is used to store the temporary file. If you are
// going to write a large number of files to the same file system, use the
// result of TempDir(filepath.Base(path)) with the WithTempDir option.
//
// The file's permissions will be (0600 & ^umask). Use WithPermissions,
// IgnoreUmask, WithStaticPermissions and WithExistingPermissions to control
// them.
func NewPendingFile(path string, opts ...Option) (*PendingFile, error) {
cfg := config{
path: path,
createPerm: defaultPerm,
}
for _, o := range opts {
o.apply(&cfg)
}
if cfg.ignoreUmask && cfg.chmod == nil {
cfg.chmod = &cfg.createPerm
}
if cfg.attemptPermCopy {
// Try to determine permissions from an existing file.
if existing, err := os.Lstat(cfg.path); err == nil && existing.Mode().IsRegular() {
perm := existing.Mode() & os.ModePerm
cfg.chmod = &perm
// Try to already create file with desired permissions; at worst
// a chmod will be needed afterwards.
cfg.createPerm = perm
} else if err != nil && !os.IsNotExist(err) {
return nil, err
}
}
f, err := openTempFile(tempDir(cfg.dir, cfg.path), "."+filepath.Base(cfg.path), cfg.createPerm)
if err != nil {
return nil, err
}
if cfg.chmod != nil {
if fi, err := f.Stat(); err != nil {
return nil, err
} else if fi.Mode()&os.ModePerm != *cfg.chmod {
if err := f.Chmod(*cfg.chmod); err != nil {
return nil, err
}
}
}
return &PendingFile{File: f, path: cfg.path}, nil
}
// Symlink wraps os.Symlink, replacing an existing symlink with the same name
// atomically (os.Symlink fails when newname already exists, at least on Linux).
func Symlink(oldname, newname string) error {
// Fast path: if newname does not exist yet, we can skip the whole dance
// below.
if err := os.Symlink(oldname, newname); err == nil || !os.IsExist(err) {
return err
}
// We need to use ioutil.TempDir, as we cannot overwrite a ioutil.TempFile,
// and removing+symlinking creates a TOCTOU race.
d, err := ioutil.TempDir(filepath.Dir(newname), "."+filepath.Base(newname))
if err != nil {
return err
}
cleanup := true
defer func() {
if cleanup {
os.RemoveAll(d)
}
}()
symlink := filepath.Join(d, "tmp.symlink")
if err := os.Symlink(oldname, symlink); err != nil {
return err
}
if err := os.Rename(symlink, newname); err != nil {
return err
}
cleanup = false
return os.RemoveAll(d)
}

41
vendor/github.com/google/renameio/v2/writefile.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package renameio
import "os"
// WriteFile mirrors ioutil.WriteFile, replacing an existing file with the same
// name atomically.
func WriteFile(filename string, data []byte, perm os.FileMode, opts ...Option) error {
opts = append([]Option{
WithPermissions(perm),
WithExistingPermissions(),
}, opts...)
t, err := NewPendingFile(filename, opts...)
if err != nil {
return err
}
defer t.Cleanup()
if _, err := t.Write(data); err != nil {
return err
}
return t.CloseAtomicallyReplace()
}

8
vendor/modules.txt vendored
View File

@@ -352,7 +352,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
# github.com/cs3org/reva/v2 v2.14.1-0.20230630110658-4d867d522806
# github.com/cs3org/reva/v2 v2.14.1-0.20230711102918-b095db01ac36
## explicit; go 1.20
github.com/cs3org/reva/v2/cmd/revad/internal/grace
github.com/cs3org/reva/v2/cmd/revad/runtime
@@ -658,6 +658,7 @@ github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaceidindex
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload
github.com/cs3org/reva/v2/pkg/storage/utils/downloader
@@ -881,7 +882,7 @@ github.com/go-logr/stdr
# github.com/go-micro/plugins/v4/client/grpc v1.2.0
## explicit; go 1.17
github.com/go-micro/plugins/v4/client/grpc
# github.com/go-micro/plugins/v4/events/natsjs v1.2.0
# github.com/go-micro/plugins/v4/events/natsjs v1.2.1
## explicit; go 1.17
github.com/go-micro/plugins/v4/events/natsjs
# github.com/go-micro/plugins/v4/logger/zerolog v1.2.0
@@ -1052,6 +1053,9 @@ github.com/google/go-tika/tika
# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
## explicit; go 1.14
github.com/google/pprof/profile
# github.com/google/renameio/v2 v2.0.0
## explicit; go 1.13
github.com/google/renameio/v2
# github.com/google/uuid v1.3.0
## explicit
github.com/google/uuid