mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-01-05 03:40:01 -06:00
Merge pull request #9879 from owncloud/use-key-to-get-specific-trash-item
use key to get specific trash item
This commit is contained in:
@@ -2,4 +2,5 @@ Enhancement: Bump reva
|
||||
|
||||
Bumps reva version
|
||||
|
||||
https://github.com/owncloud/ocis/pull/9879
|
||||
https://github.com/owncloud/ocis/pull/9860
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
Bugfix: Use key to get specific trash item
|
||||
|
||||
The activitylog and clientlog services now only fetch the specific trash item instead of getting all items in trash and filtering them on their side. This reduces the load on the storage users service because it no longer has to assemble a full trash listing.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/9879
|
||||
2
go.mod
2
go.mod
@@ -15,7 +15,7 @@ require (
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823074930-ff4b71b50b7d
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823142954-51e6e33750e7
|
||||
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
|
||||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
|
||||
github.com/egirna/icap-client v0.1.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -255,8 +255,8 @@ github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c=
|
||||
github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb h1:KmYZDReplv/yfwc1LNYpDcVhVujC3Pasv6WjXx1haSU=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb/go.mod h1:yyP8PRo0EZou3nSH7H4qjlzQwaydPeIRNgX50npQHpE=
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823074930-ff4b71b50b7d h1:fnb2A+ClZjlwMTPCBXQ3+NoSf7e5zBWK1XJsIaj0834=
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823074930-ff4b71b50b7d/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823142954-51e6e33750e7 h1:q5U8sebSA3VqeLuf8Xhg1bVRxc8oJuRjQCjkl8xQPaI=
|
||||
github.com/cs3org/reva/v2 v2.23.1-0.20240823142954-51e6e33750e7/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
|
||||
@@ -99,6 +99,7 @@ func WithTrashedResource(ref *provider.Reference, rid *provider.ResourceId) Acti
|
||||
|
||||
resp, err := gwc.ListRecycle(ctx, &provider.ListRecycleRequest{
|
||||
Ref: ref,
|
||||
Key: rid.GetOpaqueId(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -221,6 +221,7 @@ func processShareEvent(ctx context.Context, ref *provider.Reference, gwc gateway
|
||||
func processItemTrashedEvent(ctx context.Context, ref *provider.Reference, gwc gateway.GatewayAPIClient, initiatorid string, itemID *provider.ResourceId) ([]string, FileEvent, error) {
|
||||
resp, err := gwc.ListRecycle(ctx, &provider.ListRecycleRequest{
|
||||
Ref: ref,
|
||||
Key: itemID.GetOpaqueId(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, FileEvent{}, err
|
||||
|
||||
@@ -345,7 +345,7 @@ func restoreTrashBindItem(cfg *config.Config) *cli.Command {
|
||||
|
||||
func listRecycle(ctx context.Context, client gateway.GatewayAPIClient, ref provider.Reference) (*provider.ListRecycleResponse, error) {
|
||||
_retrievingErrorMsg := "trash-bin items retrieving error"
|
||||
res, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: &ref, Key: "/"})
|
||||
res, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: &ref, Key: ""})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %w", _retrievingErrorMsg, err)
|
||||
}
|
||||
|
||||
@@ -126,30 +126,12 @@ The expected failures in this file are from features in the owncloud/ocis repo.
|
||||
|
||||
#### [Trying to upload to a locked file gives 500](https://github.com/owncloud/ocis/issues/7638)
|
||||
|
||||
- [apiLocks/lockFiles.feature:330](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L330)
|
||||
- [apiLocks/lockFiles.feature:331](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L331)
|
||||
- [apiLocks/lockFiles.feature:332](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L332)
|
||||
- [apiLocks/lockFiles.feature:333](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L333)
|
||||
- [apiLocks/lockFiles.feature:334](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L334)
|
||||
- [apiLocks/lockFiles.feature:335](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L335)
|
||||
- [apiLocks/unlockFiles.feature:87](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L87)
|
||||
- [apiLocks/unlockFiles.feature:88](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L88)
|
||||
- [apiLocks/unlockFiles.feature:89](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L89)
|
||||
- [apiLocks/unlockFiles.feature:90](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L90)
|
||||
- [apiLocks/unlockFiles.feature:91](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L91)
|
||||
- [apiLocks/unlockFiles.feature:92](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/unlockFiles.feature#L92)
|
||||
- [apiLocks/lockFiles.feature:445](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L445)
|
||||
- [apiLocks/lockFiles.feature:446](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L446)
|
||||
- [apiLocks/lockFiles.feature:447](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L447)
|
||||
- [apiLocks/lockFiles.feature:448](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L448)
|
||||
- [apiLocks/lockFiles.feature:449](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L449)
|
||||
- [apiLocks/lockFiles.feature:450](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L450)
|
||||
- [apiLocks/lockFiles.feature:489](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L489)
|
||||
- [apiLocks/lockFiles.feature:490](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L490)
|
||||
- [apiLocks/lockFiles.feature:491](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L491)
|
||||
- [apiLocks/lockFiles.feature:492](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L492)
|
||||
- [apiLocks/lockFiles.feature:493](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L493)
|
||||
- [apiLocks/lockFiles.feature:494](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/apiLocks/lockFiles.feature#L494)
|
||||
|
||||
#### [Folders can be locked and locking works partially](https://github.com/owncloud/ocis/issues/7641)
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
|
||||
@@ -436,6 +437,8 @@ func (s *Service) InitiateFileUpload(ctx context.Context, req *provider.Initiate
|
||||
st = status.NewInsufficientStorage(ctx, err, "insufficient storage")
|
||||
case errtypes.PreconditionFailed:
|
||||
st = status.NewFailedPrecondition(ctx, err, "failed precondition")
|
||||
case errtypes.Locked:
|
||||
st = status.NewLocked(ctx, "locked")
|
||||
default:
|
||||
st = status.NewInternal(ctx, "error getting upload id: "+err.Error())
|
||||
}
|
||||
@@ -880,8 +883,11 @@ func (s *Service) ListRecycleStream(req *provider.ListRecycleStreamRequest, ss p
|
||||
ctx := ss.Context()
|
||||
log := appctx.GetLogger(ctx)
|
||||
|
||||
key, itemPath := router.ShiftPath(req.Key)
|
||||
items, err := s.Storage.ListRecycle(ctx, req.Ref, key, itemPath)
|
||||
// if no slash is present in the key, do not pass a relative path to the storage
|
||||
// when a path is passed to the storage, it will list the contents of the directory
|
||||
key, relativePath := splitKeyAndPath(req.GetKey())
|
||||
items, err := s.Storage.ListRecycle(ctx, req.Ref, key, relativePath)
|
||||
|
||||
if err != nil {
|
||||
var st *rpc.Status
|
||||
switch err.(type) {
|
||||
@@ -924,8 +930,10 @@ func (s *Service) ListRecycleStream(req *provider.ListRecycleStreamRequest, ss p
|
||||
}
|
||||
|
||||
func (s *Service) ListRecycle(ctx context.Context, req *provider.ListRecycleRequest) (*provider.ListRecycleResponse, error) {
|
||||
key, itemPath := router.ShiftPath(req.Key)
|
||||
items, err := s.Storage.ListRecycle(ctx, req.Ref, key, itemPath)
|
||||
// if no slash is present in the key, do not pass a relative path to the storage
|
||||
// when a path is passed to the storage, it will list the contents of the directory
|
||||
key, relativePath := splitKeyAndPath(req.GetKey())
|
||||
items, err := s.Storage.ListRecycle(ctx, req.Ref, key, relativePath)
|
||||
if err != nil {
|
||||
var st *rpc.Status
|
||||
switch err.(type) {
|
||||
@@ -962,8 +970,8 @@ func (s *Service) RestoreRecycleItem(ctx context.Context, req *provider.RestoreR
|
||||
ctx = ctxpkg.ContextSetLockID(ctx, req.LockId)
|
||||
|
||||
// TODO(labkode): CRITICAL: fill recycle info with storage provider.
|
||||
key, itemPath := router.ShiftPath(req.Key)
|
||||
err := s.Storage.RestoreRecycleItem(ctx, req.Ref, key, itemPath, req.RestoreRef)
|
||||
key, relativePath := splitKeyAndPath(req.GetKey())
|
||||
err := s.Storage.RestoreRecycleItem(ctx, req.Ref, key, relativePath, req.RestoreRef)
|
||||
|
||||
res := &provider.RestoreRecycleItemResponse{
|
||||
Status: status.NewStatusFromErrType(ctx, "restore recycle item", err),
|
||||
@@ -980,9 +988,9 @@ func (s *Service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRe
|
||||
}
|
||||
|
||||
// if a key was sent as opaque id purge only that item
|
||||
key, itemPath := router.ShiftPath(req.Key)
|
||||
key, relativePath := splitKeyAndPath(req.GetKey())
|
||||
if key != "" {
|
||||
if err := s.Storage.PurgeRecycleItem(ctx, req.Ref, key, itemPath); err != nil {
|
||||
if err := s.Storage.PurgeRecycleItem(ctx, req.Ref, key, relativePath); err != nil {
|
||||
st := status.NewStatusFromErrType(ctx, "error purging recycle item", err)
|
||||
appctx.GetLogger(ctx).
|
||||
Error().
|
||||
@@ -1313,3 +1321,12 @@ func canLockPublicShare(ctx context.Context) bool {
|
||||
psr := utils.ReadPlainFromOpaque(u.Opaque, "public-share-role")
|
||||
return psr == "" || psr == conversions.RoleEditor
|
||||
}
|
||||
|
||||
// splitKeyAndPath splits a key into a root and a relative path
|
||||
func splitKeyAndPath(key string) (string, string) {
|
||||
root, relativePath := router.ShiftPath(key)
|
||||
if relativePath == "/" && !strings.HasSuffix(key, "/") {
|
||||
relativePath = ""
|
||||
}
|
||||
return root, relativePath
|
||||
}
|
||||
|
||||
2
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/dav.go
generated
vendored
2
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/dav.go
generated
vendored
@@ -291,7 +291,7 @@ func (h *DavHandler) Handler(s *svc) http.Handler {
|
||||
http.Redirect(w, r, rUrl, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
log.Debug().Str("token", token).Interface("status", res.Status).Msg("resource id not found")
|
||||
log.Debug().Str("token", token).Interface("status", psRes.Status).Msg("resource id not found")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
2
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go
generated
vendored
2
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/put.go
generated
vendored
@@ -325,8 +325,6 @@ func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
case rpc.Code_CODE_FAILED_PRECONDITION:
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
case rpc.Code_CODE_NOT_FOUND:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
default:
|
||||
errors.HandleErrorStatus(&log, w, uRes.Status)
|
||||
}
|
||||
|
||||
25
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/spaces.go
generated
vendored
25
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/spaces.go
generated
vendored
@@ -21,6 +21,7 @@ package ocdav
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/config"
|
||||
@@ -132,8 +133,7 @@ func (h *SpacesHandler) handleSpacesTrashbin(w http.ResponseWriter, r *http.Requ
|
||||
ctx := r.Context()
|
||||
log := appctx.GetLogger(ctx)
|
||||
|
||||
var spaceID string
|
||||
spaceID, r.URL.Path = router.ShiftPath(r.URL.Path)
|
||||
spaceID, key := splitSpaceAndKey(r.URL.Path)
|
||||
if spaceID == "" {
|
||||
// listing is disabled, no auth will change that
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
@@ -146,12 +146,9 @@ func (h *SpacesHandler) handleSpacesTrashbin(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
var key string
|
||||
key, r.URL.Path = router.ShiftPath(r.URL.Path)
|
||||
|
||||
switch r.Method {
|
||||
case MethodPropfind:
|
||||
trashbinHandler.listTrashbin(w, r, s, &ref, path.Join(_trashbinPath, spaceID), key, r.URL.Path)
|
||||
trashbinHandler.listTrashbin(w, r, s, &ref, path.Join(_trashbinPath, spaceID), key)
|
||||
case MethodMove:
|
||||
if key == "" {
|
||||
http.Error(w, "501 Not implemented", http.StatusNotImplemented)
|
||||
@@ -167,15 +164,25 @@ func (h *SpacesHandler) handleSpacesTrashbin(w http.ResponseWriter, r *http.Requ
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Debug().Str("key", key).Str("path", r.URL.Path).Str("dst", dst).Msg("spaces restore")
|
||||
log.Debug().Str("key", key).Str("dst", dst).Msg("spaces restore")
|
||||
|
||||
dstRef := proto.Clone(&ref).(*provider.Reference)
|
||||
dstRef.Path = utils.MakeRelativePath(dst)
|
||||
|
||||
trashbinHandler.restore(w, r, s, &ref, dstRef, key, r.URL.Path)
|
||||
trashbinHandler.restore(w, r, s, &ref, dstRef, key)
|
||||
case http.MethodDelete:
|
||||
trashbinHandler.delete(w, r, s, &ref, key, r.URL.Path)
|
||||
trashbinHandler.delete(w, r, s, &ref, key)
|
||||
default:
|
||||
http.Error(w, "501 Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
func splitSpaceAndKey(p string) (space, key string) {
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
parts := strings.SplitN(p, "/", 2)
|
||||
space = parts[0]
|
||||
if len(parts) > 1 {
|
||||
key = parts[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
127
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/trashbin.go
generated
vendored
127
vendor/github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/trashbin.go
generated
vendored
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/cs3org/reva/v2/pkg/appctx"
|
||||
ctxpkg "github.com/cs3org/reva/v2/pkg/ctx"
|
||||
rstatus "github.com/cs3org/reva/v2/pkg/rgrpc/status"
|
||||
"github.com/cs3org/reva/v2/pkg/rhttp/router"
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
|
||||
)
|
||||
@@ -74,7 +73,7 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler {
|
||||
}
|
||||
|
||||
var username string
|
||||
username, r.URL.Path = router.ShiftPath(r.URL.Path)
|
||||
username, r.URL.Path = splitSpaceAndKey(r.URL.Path)
|
||||
if username == "" {
|
||||
// listing is disabled, no auth will change that
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
@@ -131,13 +130,12 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler {
|
||||
}
|
||||
ref := spacelookup.MakeRelativeReference(space, ".", false)
|
||||
|
||||
// key will be a base64 encoded cs3 path, it uniquely identifies a trash item & storage
|
||||
var key string
|
||||
key, r.URL.Path = router.ShiftPath(r.URL.Path)
|
||||
// key will be a base64 encoded cs3 path, it uniquely identifies a trash item with an opaque id and an optional path
|
||||
key := r.URL.Path
|
||||
|
||||
switch r.Method {
|
||||
case MethodPropfind:
|
||||
h.listTrashbin(w, r, s, ref, user.Username, key, r.URL.Path)
|
||||
h.listTrashbin(w, r, s, ref, user.Username, key)
|
||||
case MethodMove:
|
||||
if key == "" {
|
||||
http.Error(w, "501 Not implemented", http.StatusNotImplemented)
|
||||
@@ -172,50 +170,55 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler {
|
||||
dstRef := spacelookup.MakeRelativeReference(space, p, false)
|
||||
|
||||
log.Debug().Str("key", key).Str("dst", dst).Msg("restore")
|
||||
h.restore(w, r, s, ref, dstRef, key, r.URL.Path)
|
||||
h.restore(w, r, s, ref, dstRef, key)
|
||||
case http.MethodDelete:
|
||||
h.delete(w, r, s, ref, key, r.URL.Path)
|
||||
h.delete(w, r, s, ref, key)
|
||||
default:
|
||||
http.Error(w, "501 Not implemented", http.StatusNotImplemented)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, refBase, key, itemPath string) {
|
||||
func (h *TrashbinHandler) getDepth(r *http.Request) (net.Depth, error) {
|
||||
dh := r.Header.Get(net.HeaderDepth)
|
||||
depth, err := net.ParseDepth(dh)
|
||||
if err != nil || depth == net.DepthInfinity && !h.allowPropfindDepthInfinitiy {
|
||||
return "", errors.ErrInvalidDepth
|
||||
}
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, refBase, key string) {
|
||||
ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "list_trashbin")
|
||||
defer span.End()
|
||||
|
||||
sublog := appctx.GetLogger(ctx).With().Logger()
|
||||
|
||||
dh := r.Header.Get(net.HeaderDepth)
|
||||
depth, err := net.ParseDepth(dh)
|
||||
depth, err := h.getDepth(r)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
span.SetStatus(codes.Error, "Invalid Depth header value")
|
||||
span.SetAttributes(semconv.HTTPStatusCodeKey.Int(http.StatusBadRequest))
|
||||
sublog.Debug().Str("depth", dh).Msg(err.Error())
|
||||
sublog.Debug().Str("depth", r.Header.Get(net.HeaderDepth)).Msg(err.Error())
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
m := fmt.Sprintf("Invalid Depth header value: %v", dh)
|
||||
m := fmt.Sprintf("Invalid Depth header value: %v", r.Header.Get(net.HeaderDepth))
|
||||
b, err := errors.Marshal(http.StatusBadRequest, m, "", "")
|
||||
errors.HandleWebdavError(&sublog, w, b, err)
|
||||
return
|
||||
}
|
||||
|
||||
if depth == net.DepthInfinity && !h.allowPropfindDepthInfinitiy {
|
||||
span.RecordError(errors.ErrInvalidDepth)
|
||||
span.SetStatus(codes.Error, "DEPTH: infinity is not supported")
|
||||
span.SetAttributes(semconv.HTTPStatusCodeKey.Int(http.StatusBadRequest))
|
||||
sublog.Debug().Str("depth", dh).Msg(errors.ErrInvalidDepth.Error())
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
m := fmt.Sprintf("Invalid Depth header value: %v", dh)
|
||||
b, err := errors.Marshal(http.StatusBadRequest, m, "", "")
|
||||
errors.HandleWebdavError(&sublog, w, b, err)
|
||||
pf, status, err := propfind.ReadPropfind(r.Body)
|
||||
if err != nil {
|
||||
sublog.Debug().Err(err).Msg("error reading propfind request")
|
||||
w.WriteHeader(status)
|
||||
return
|
||||
}
|
||||
|
||||
if depth == net.DepthZero {
|
||||
rootHref := path.Join(refBase, key, itemPath)
|
||||
propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, nil, nil)
|
||||
if key == "" && depth == net.DepthZero {
|
||||
// we are listing the trash root, but without children
|
||||
// so we just fake a root element without actually querying the gateway
|
||||
rootHref := path.Join(refBase, key)
|
||||
propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, &pf, nil, true)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Msg("error formatting propfind")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
@@ -232,11 +235,9 @@ func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s
|
||||
return
|
||||
}
|
||||
|
||||
pf, status, err := propfind.ReadPropfind(r.Body)
|
||||
if err != nil {
|
||||
sublog.Debug().Err(err).Msg("error reading propfind request")
|
||||
w.WriteHeader(status)
|
||||
return
|
||||
if depth == net.DepthOne && key != "" && !strings.HasSuffix(key, "/") {
|
||||
// when a key is provided and the depth is 1 we need to append a / to the key to list the children
|
||||
key += "/"
|
||||
}
|
||||
|
||||
client, err := s.gatewaySelector.Next()
|
||||
@@ -246,7 +247,7 @@ func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s
|
||||
return
|
||||
}
|
||||
// ask gateway for recycle items
|
||||
getRecycleRes, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: ref, Key: path.Join(key, itemPath)})
|
||||
getRecycleRes, err := client.ListRecycle(ctx, &provider.ListRecycleRequest{Ref: ref, Key: key})
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Msg("error calling ListRecycle")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
@@ -270,7 +271,7 @@ func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s
|
||||
for i := len(items) - 1; i >= 0; i-- {
|
||||
// for i := range res.Infos {
|
||||
if items[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER {
|
||||
stack = append(stack, items[i].Key)
|
||||
stack = append(stack, items[i].Key+"/") // fetch children of the item
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,8 +305,8 @@ func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s
|
||||
}
|
||||
}
|
||||
|
||||
rootHref := path.Join(refBase, key, itemPath)
|
||||
propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, &pf, items)
|
||||
rootHref := path.Join(refBase, key)
|
||||
propRes, err := h.formatTrashPropfind(ctx, s, ref.ResourceId.SpaceId, refBase, rootHref, &pf, items, depth != net.DepthZero)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Msg("error formatting propfind")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
@@ -321,29 +322,30 @@ func (h *TrashbinHandler) listTrashbin(w http.ResponseWriter, r *http.Request, s
|
||||
}
|
||||
}
|
||||
|
||||
func (h *TrashbinHandler) formatTrashPropfind(ctx context.Context, s *svc, spaceID, refBase, rootHref string, pf *propfind.XML, items []*provider.RecycleItem) ([]byte, error) {
|
||||
func (h *TrashbinHandler) formatTrashPropfind(ctx context.Context, s *svc, spaceID, refBase, rootHref string, pf *propfind.XML, items []*provider.RecycleItem, fakeRoot bool) ([]byte, error) {
|
||||
responses := make([]*propfind.ResponseXML, 0, len(items)+1)
|
||||
// add trashbin dir . entry
|
||||
responses = append(responses, &propfind.ResponseXML{
|
||||
Href: net.EncodePath(path.Join(ctx.Value(net.CtxKeyBaseURI).(string), rootHref) + "/"), // url encode response.Href TODO
|
||||
Propstat: []propfind.PropstatXML{
|
||||
{
|
||||
Status: "HTTP/1.1 200 OK",
|
||||
Prop: []prop.PropertyXML{
|
||||
prop.Raw("d:resourcetype", "<d:collection/>"),
|
||||
if fakeRoot {
|
||||
responses = append(responses, &propfind.ResponseXML{
|
||||
Href: net.EncodePath(path.Join(ctx.Value(net.CtxKeyBaseURI).(string), rootHref) + "/"), // url encode response.Href TODO
|
||||
Propstat: []propfind.PropstatXML{
|
||||
{
|
||||
Status: "HTTP/1.1 200 OK",
|
||||
Prop: []prop.PropertyXML{
|
||||
prop.Raw("d:resourcetype", "<d:collection/>"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: "HTTP/1.1 404 Not Found",
|
||||
Prop: []prop.PropertyXML{
|
||||
prop.NotFound("oc:trashbin-original-filename"),
|
||||
prop.NotFound("oc:trashbin-original-location"),
|
||||
prop.NotFound("oc:trashbin-delete-datetime"),
|
||||
prop.NotFound("d:getcontentlength"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: "HTTP/1.1 404 Not Found",
|
||||
Prop: []prop.PropertyXML{
|
||||
prop.NotFound("oc:trashbin-original-filename"),
|
||||
prop.NotFound("oc:trashbin-original-location"),
|
||||
prop.NotFound("oc:trashbin-delete-datetime"),
|
||||
prop.NotFound("d:getcontentlength"),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
res, err := h.itemToPropResponse(ctx, s, spaceID, refBase, pf, items[i])
|
||||
@@ -401,7 +403,7 @@ func (h *TrashbinHandler) itemToPropResponse(ctx context.Context, s *svc, spaceI
|
||||
propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:trashbin-delete-datetime", dTime))
|
||||
if item.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER {
|
||||
propstatOK.Prop = append(propstatOK.Prop, prop.Raw("d:resourcetype", "<d:collection/>"))
|
||||
// TODO(jfd): decide if we can and want to list oc:size for folders
|
||||
propstatOK.Prop = append(propstatOK.Prop, prop.Raw("oc:size", size))
|
||||
} else {
|
||||
propstatOK.Prop = append(propstatOK.Prop,
|
||||
prop.Escaped("d:resourcetype", ""),
|
||||
@@ -426,7 +428,7 @@ func (h *TrashbinHandler) itemToPropResponse(ctx context.Context, s *svc, spaceI
|
||||
switch pf.Prop[i].Local {
|
||||
case "oc:size":
|
||||
if item.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER {
|
||||
propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("d:getcontentlength", size))
|
||||
propstatOK.Prop = append(propstatOK.Prop, prop.Escaped("oc:size", size))
|
||||
} else {
|
||||
propstatNotFound.Prop = append(propstatNotFound.Prop, prop.NotFound("oc:size"))
|
||||
}
|
||||
@@ -480,7 +482,7 @@ func (h *TrashbinHandler) itemToPropResponse(ctx context.Context, s *svc, spaceI
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func (h *TrashbinHandler) restore(w http.ResponseWriter, r *http.Request, s *svc, ref, dst *provider.Reference, key, itemPath string) {
|
||||
func (h *TrashbinHandler) restore(w http.ResponseWriter, r *http.Request, s *svc, ref, dst *provider.Reference, key string) {
|
||||
ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "restore")
|
||||
defer span.End()
|
||||
|
||||
@@ -566,7 +568,7 @@ func (h *TrashbinHandler) restore(w http.ResponseWriter, r *http.Request, s *svc
|
||||
|
||||
req := &provider.RestoreRecycleItemRequest{
|
||||
Ref: ref,
|
||||
Key: path.Join(key, itemPath),
|
||||
Key: key,
|
||||
RestoreRef: dst,
|
||||
}
|
||||
|
||||
@@ -608,16 +610,15 @@ func (h *TrashbinHandler) restore(w http.ResponseWriter, r *http.Request, s *svc
|
||||
}
|
||||
|
||||
// delete has only a key
|
||||
func (h *TrashbinHandler) delete(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, key, itemPath string) {
|
||||
func (h *TrashbinHandler) delete(w http.ResponseWriter, r *http.Request, s *svc, ref *provider.Reference, key string) {
|
||||
ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(r.Context(), "erase")
|
||||
defer span.End()
|
||||
|
||||
sublog := appctx.GetLogger(ctx).With().Interface("reference", ref).Str("key", key).Str("item_path", itemPath).Logger()
|
||||
sublog := appctx.GetLogger(ctx).With().Interface("reference", ref).Str("key", key).Logger()
|
||||
|
||||
trashPath := path.Join(key, itemPath)
|
||||
req := &provider.PurgeRecycleRequest{
|
||||
Ref: ref,
|
||||
Key: trashPath,
|
||||
Key: key,
|
||||
}
|
||||
|
||||
client, err := s.gatewaySelector.Next()
|
||||
@@ -638,7 +639,7 @@ func (h *TrashbinHandler) delete(w http.ResponseWriter, r *http.Request, s *svc,
|
||||
case rpc.Code_CODE_NOT_FOUND:
|
||||
sublog.Debug().Interface("status", res.Status).Msg("resource not found")
|
||||
w.WriteHeader(http.StatusConflict)
|
||||
m := fmt.Sprintf("path %s not found", trashPath)
|
||||
m := fmt.Sprintf("key %s not found", key)
|
||||
b, err := errors.Marshal(http.StatusConflict, m, "", "")
|
||||
errors.HandleWebdavError(&sublog, w, b, err)
|
||||
case rpc.Code_CODE_PERMISSION_DENIED:
|
||||
|
||||
10
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/nextcloud/nextcloud_server_mock.go
generated
vendored
10
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/nextcloud/nextcloud_server_mock.go
generated
vendored
@@ -129,8 +129,8 @@ var responses = map[string]Response{
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListGrants {"path":"/subdir"} GRANT-UPDATED`: {200, `[{"grantee":{"type":1,"Id":{"UserId":{"idp":"some-idp","opaque_id":"some-opaque-id","type":1}}},"permissions":{"add_grant":true,"create_container":true,"delete":true,"get_path":true,"get_quota":true,"initiate_file_download":true,"initiate_file_upload":true,"list_grants":true,"list_container":true,"list_file_versions":true,"list_recycle":true,"move":true,"remove_grant":true,"purge_recycle":true,"restore_file_version":true,"restore_recycle_item":true,"stat":true,"update_grant":true,"deny_grant":true}}]`, serverStateEmpty},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListGrants {"path":"/subdir"} GRANT-REMOVED`: {200, `[]`, serverStateEmpty},
|
||||
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRecycle {"key":"","path":"/"} EMPTY`: {200, `[]`, serverStateEmpty},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRecycle {"key":"","path":"/"} RECYCLE`: {200, `[{"opaque":{},"key":"some-deleted-version","ref":{"resource_id":{},"path":"/subdir"},"size":12345,"deletion_time":{"seconds":1234567890}}]`, serverStateRecycle},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRecycle {"key":"","path":""} EMPTY`: {200, `[]`, serverStateEmpty},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRecycle {"key":"","path":""} RECYCLE`: {200, `[{"opaque":{},"key":"some-deleted-version","ref":{"resource_id":{},"path":"/subdir"},"size":12345,"deletion_time":{"seconds":1234567890}}]`, serverStateRecycle},
|
||||
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRevisions {"path":"/versionedFile"} EMPTY`: {200, `[{"opaque":{"map":{"some":{"value":"ZGF0YQ=="}}},"key":"version-12","size":1,"mtime":1234567890,"etag":"deadb00f"}]`, serverStateEmpty},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/ListRevisions {"path":"/versionedFile"} FILE-RESTORED`: {200, `[{"opaque":{"map":{"some":{"value":"ZGF0YQ=="}}},"key":"version-12","size":1,"mtime":1234567890,"etag":"deadb00f"},{"opaque":{"map":{"different":{"value":"c3R1ZmY="}}},"key":"asdf","size":2,"mtime":1234567890,"etag":"deadbeef"}]`, serverStateFileRestored},
|
||||
@@ -139,9 +139,9 @@ var responses = map[string]Response{
|
||||
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RemoveGrant {"path":"/subdir"} GRANT-ADDED`: {200, ``, serverStateGrantRemoved},
|
||||
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem null`: {200, ``, serverStateSubdir},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem {"key":"some-deleted-version","path":"/","restoreRef":{"path":"/subdirRestored"}}`: {200, ``, serverStateFileRestored},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem {"key":"some-deleted-version","path":"/","restoreRef":null}`: {200, ``, serverStateFileRestored},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem null`: {200, ``, serverStateSubdir},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem {"key":"some-deleted-version","path":"","restoreRef":{"path":"/subdirRestored"}}`: {200, ``, serverStateFileRestored},
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRecycleItem {"key":"some-deleted-version","path":"","restoreRef":null}`: {200, ``, serverStateFileRestored},
|
||||
|
||||
`POST /apps/sciencemesh/~f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c/api/storage/RestoreRevision {"ref":{"path":"/versionedFile"},"key":"version-12"}`: {200, ``, serverStateFileRestored},
|
||||
|
||||
|
||||
51
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/recycle.go
generated
vendored
51
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/recycle.go
generated
vendored
@@ -23,7 +23,6 @@ import (
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -55,6 +54,9 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
if ref == nil || ref.ResourceId == nil || ref.ResourceId.OpaqueId == "" {
|
||||
return nil, errtypes.BadRequest("spaceid required")
|
||||
}
|
||||
if key == "" && relativePath != "" {
|
||||
return nil, errtypes.BadRequest("key is required when navigating with a path")
|
||||
}
|
||||
spaceID := ref.ResourceId.OpaqueId
|
||||
|
||||
sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("key", key).Str("relative_path", relativePath).Logger()
|
||||
@@ -75,7 +77,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
return nil, errtypes.NotFound(key)
|
||||
}
|
||||
|
||||
if key == "" && relativePath == "/" {
|
||||
if key == "" && relativePath == "" {
|
||||
return fs.listTrashRoot(ctx, spaceID)
|
||||
}
|
||||
|
||||
@@ -113,16 +115,25 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
sublog.Error().Err(err).Msg("could not parse time format, ignoring")
|
||||
}
|
||||
|
||||
nodeType := fs.lu.TypeFromPath(ctx, originalPath)
|
||||
if nodeType != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
|
||||
var size int64
|
||||
if relativePath == "" {
|
||||
// this is the case when we want to directly list a file in the trashbin
|
||||
blobsize, err := strconv.ParseInt(string(attrs[prefixes.BlobsizeAttr]), 10, 64)
|
||||
if err != nil {
|
||||
return items, err
|
||||
nodeType := fs.lu.TypeFromPath(ctx, originalPath)
|
||||
switch nodeType {
|
||||
case provider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
size, err = fs.lu.ReadBlobSizeAttr(ctx, originalPath)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
size, err = fs.lu.MetadataBackend().GetInt64(ctx, originalPath, prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
}
|
||||
item := &provider.RecycleItem{
|
||||
Type: nodeType,
|
||||
Size: uint64(blobsize),
|
||||
Type: fs.lu.TypeFromPath(ctx, originalPath),
|
||||
Size: uint64(size),
|
||||
Key: filepath.Join(key, relativePath),
|
||||
DeletionTime: deletionTime,
|
||||
Ref: &provider.Reference{
|
||||
@@ -134,9 +145,6 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
}
|
||||
|
||||
// we have to read the names and stat the path to follow the symlinks
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
childrenPath := filepath.Join(originalPath, relativePath)
|
||||
childrenDir, err := os.Open(childrenPath)
|
||||
if err != nil {
|
||||
@@ -154,9 +162,10 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
continue
|
||||
}
|
||||
|
||||
size := int64(0)
|
||||
// reset size
|
||||
size = 0
|
||||
|
||||
nodeType = fs.lu.TypeFromPath(ctx, resolvedChildPath)
|
||||
nodeType := fs.lu.TypeFromPath(ctx, resolvedChildPath)
|
||||
switch nodeType {
|
||||
case provider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
size, err = fs.lu.ReadBlobSizeAttr(ctx, resolvedChildPath)
|
||||
@@ -165,12 +174,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
|
||||
continue
|
||||
}
|
||||
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
attr, err := fs.lu.MetadataBackend().Get(ctx, resolvedChildPath, prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
|
||||
continue
|
||||
}
|
||||
size, err = strconv.ParseInt(string(attr), 10, 64)
|
||||
size, err = fs.lu.MetadataBackend().GetInt64(ctx, resolvedChildPath, prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
|
||||
continue
|
||||
@@ -217,7 +221,7 @@ func readTrashLink(path string) (string, string, string, error) {
|
||||
func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*provider.RecycleItem, error) {
|
||||
log := appctx.GetLogger(ctx)
|
||||
trashRoot := fs.getRecycleRoot(spaceID)
|
||||
|
||||
items := []*provider.RecycleItem{}
|
||||
subTrees, err := filepath.Glob(trashRoot + "/*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -256,6 +260,7 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
|
||||
}
|
||||
|
||||
for _, itemPath := range matches {
|
||||
// TODO can we encode this in the path instead of reading the link?
|
||||
nodePath, nodeID, timeSuffix, err := readTrashLink(itemPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Msg("error reading trash link, skipping")
|
||||
@@ -300,6 +305,7 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
|
||||
} else {
|
||||
log.Error().Str("trashRoot", trashRoot).Str("item", itemPath).Str("spaceid", spaceID).Str("nodeid", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path")
|
||||
}
|
||||
|
||||
select {
|
||||
case results <- item:
|
||||
case <-ctx.Done():
|
||||
@@ -318,7 +324,6 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p
|
||||
}()
|
||||
|
||||
// Collect results
|
||||
items := []*provider.RecycleItem{}
|
||||
for ri := range results {
|
||||
items = append(items, ri)
|
||||
}
|
||||
@@ -414,7 +419,7 @@ func (fs *Decomposedfs) EmptyRecycle(ctx context.Context, ref *provider.Referenc
|
||||
return errtypes.BadRequest("spaceid must be set")
|
||||
}
|
||||
|
||||
items, err := fs.ListRecycle(ctx, ref, "", "/")
|
||||
items, err := fs.ListRecycle(ctx, ref, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
6
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/tree.go
generated
vendored
6
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/tree.go
generated
vendored
@@ -584,10 +584,8 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
|
||||
attrs := node.Attributes{}
|
||||
attrs.SetString(prefixes.NameAttr, targetNode.Name)
|
||||
if trashPath != "" {
|
||||
// set ParentidAttr to restorePath's node parent id
|
||||
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
|
||||
}
|
||||
// set ParentidAttr to restorePath's node parent id
|
||||
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
|
||||
|
||||
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
|
||||
|
||||
20
vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go
generated
vendored
20
vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go
generated
vendored
@@ -199,7 +199,6 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||
if origin := r.Header.Get("Origin"); !cors.Disable && origin != "" {
|
||||
originIsAllowed := cors.AllowOrigin.MatchString(origin)
|
||||
if !originIsAllowed {
|
||||
fmt.Println("ORIGIN IS NOT ALLOWED", origin)
|
||||
handler.sendError(c, ErrOriginNotAllowed)
|
||||
return
|
||||
}
|
||||
@@ -692,14 +691,12 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||
// if enough space in the upload is left.
|
||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println("PATCH FILE")
|
||||
c := handler.getContext(w, r)
|
||||
|
||||
isTusV1 := !handler.usesIETFDraft(r)
|
||||
|
||||
// Check for presence of application/offset+octet-stream
|
||||
if isTusV1 && r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||
fmt.Println("WRONG CONTENT TYPE")
|
||||
handler.sendError(c, ErrInvalidContentType)
|
||||
return
|
||||
}
|
||||
@@ -707,14 +704,12 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
// Check for presence of a valid Upload-Offset Header
|
||||
offset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
||||
if err != nil || offset < 0 {
|
||||
fmt.Println("WRONG OFFSET")
|
||||
handler.sendError(c, ErrInvalidOffset)
|
||||
return
|
||||
}
|
||||
|
||||
id, err := extractIDFromPath(r.URL.Path)
|
||||
if err != nil {
|
||||
fmt.Println("WRONG ID")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -723,7 +718,6 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
if handler.composer.UsesLocker {
|
||||
lock, err := handler.lockUpload(c, id)
|
||||
if err != nil {
|
||||
fmt.Println("WRONG LOCK")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -733,27 +727,23 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
|
||||
upload, err := handler.composer.Core.GetUpload(c, id)
|
||||
if err != nil {
|
||||
fmt.Println("WRONG UPLOAD")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := upload.GetInfo(c)
|
||||
if err != nil {
|
||||
fmt.Println("WRONG INFO")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Modifying a final upload is not allowed
|
||||
if info.IsFinal {
|
||||
fmt.Println("WRONG FINAL")
|
||||
handler.sendError(c, ErrModifyFinal)
|
||||
return
|
||||
}
|
||||
|
||||
if offset != info.Offset {
|
||||
fmt.Println("WRONG INFO OFFSET")
|
||||
handler.sendError(c, ErrMismatchOffset)
|
||||
return
|
||||
}
|
||||
@@ -770,32 +760,27 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
// Do not proxy the call to the data store if the upload is already completed
|
||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||
resp.Header["Upload-Offset"] = strconv.FormatInt(offset, 10)
|
||||
fmt.Println("UPLOAD ALREADY COMPLETED")
|
||||
handler.sendResp(c, resp)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Header.Get("Upload-Length") != "" {
|
||||
if !handler.composer.UsesLengthDeferrer {
|
||||
fmt.Println("UPLOAD LENGTH DEFERRER")
|
||||
handler.sendError(c, ErrNotImplemented)
|
||||
return
|
||||
}
|
||||
if !info.SizeIsDeferred {
|
||||
fmt.Println("UPLOAD LENGTH NOT DEFERED")
|
||||
handler.sendError(c, ErrInvalidUploadLength)
|
||||
return
|
||||
}
|
||||
uploadLength, err := strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64)
|
||||
if err != nil || uploadLength < 0 || uploadLength < info.Offset || (handler.config.MaxSize > 0 && uploadLength > handler.config.MaxSize) {
|
||||
fmt.Println("UPLOAD LENGTH INVALID")
|
||||
handler.sendError(c, ErrInvalidUploadLength)
|
||||
return
|
||||
}
|
||||
|
||||
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||
if err := lengthDeclarableUpload.DeclareLength(c, uploadLength); err != nil {
|
||||
fmt.Println("UPLOAD LENGTH DECLARED")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -806,7 +791,6 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
|
||||
resp, err = handler.writeChunk(c, resp, upload, info)
|
||||
if err != nil {
|
||||
fmt.Println("CANT WRITE CHUNK")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -815,7 +799,6 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
if willCompleteUpload && info.SizeIsDeferred {
|
||||
info, err = upload.GetInfo(c)
|
||||
if err != nil {
|
||||
fmt.Println("CANT GET INFO")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -824,7 +807,6 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
|
||||
lengthDeclarableUpload := handler.composer.LengthDeferrer.AsLengthDeclarableUpload(upload)
|
||||
if err := lengthDeclarableUpload.DeclareLength(c, uploadLength); err != nil {
|
||||
fmt.Println("CANT UPLOAD LENGTH")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
@@ -834,14 +816,12 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
||||
|
||||
resp, err = handler.finishUploadIfComplete(c, resp, upload, info)
|
||||
if err != nil {
|
||||
fmt.Println("CANT COMPLETE")
|
||||
handler.sendError(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
handler.sendResp(c, resp)
|
||||
fmt.Println("PATCH COMPLETE")
|
||||
}
|
||||
|
||||
// writeChunk reads the body from the requests r and appends it to the upload
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -367,7 +367,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
|
||||
# github.com/cs3org/reva/v2 v2.23.1-0.20240823074930-ff4b71b50b7d
|
||||
# github.com/cs3org/reva/v2 v2.23.1-0.20240823142954-51e6e33750e7
|
||||
## explicit; go 1.21
|
||||
github.com/cs3org/reva/v2/cmd/revad/internal/grace
|
||||
github.com/cs3org/reva/v2/cmd/revad/runtime
|
||||
|
||||
Reference in New Issue
Block a user