mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-01-07 04:40:05 -06:00
Merge pull request #9852 from owncloud/bump-reva-to-2.23.0
[full-ci] chore: bump reva to 2.23.0
This commit is contained in:
@@ -1,5 +1,22 @@
|
||||
Enhancement: Bump reva
|
||||
Enhancement: Bump reva to 2.23.0
|
||||
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4741): Always find unique providers
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4762): Blanks in dav Content-Disposition header
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4775): Fixed the response code when copying the shared from to personal
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4633): Allow all users to create internal links
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4771): Deleting resources via their id
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4768): Fixed the file name validation if nodeid is used
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4758): Fix moving locked files, enable handling locked files via ocdav
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4774): Fix micro ocdav service init and registration
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4776): Fix response code for DEL file that in postprocessing
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4746): Uploading the same file multiple times leads to orphaned blobs
|
||||
* Fix [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4778): Zero byte uploads
|
||||
* Chg [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4759): Updated to the latest version of the go-cs3apis
|
||||
* Chg [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4773): Ocis bumped
|
||||
* Enh [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4766): Set archiver output format via query parameter
|
||||
* Enh [cs3org/reva#4741](https://github.com/cs3org/reva/pull/4763): Improve posixfs storage driver
|
||||
|
||||
https://github.com/owncloud/ocis/pull/9852
|
||||
https://github.com/owncloud/ocis/pull/9763
|
||||
https://github.com/owncloud/ocis/pull/9714
|
||||
https://github.com/owncloud/ocis/pull/9715
|
||||
|
||||
2
go.mod
2
go.mod
@@ -15,7 +15,7 @@ require (
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/coreos/go-oidc/v3 v3.11.0
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb
|
||||
github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc
|
||||
github.com/cs3org/reva/v2 v2.23.0
|
||||
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
|
||||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
|
||||
github.com/egirna/icap-client v0.1.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -255,8 +255,8 @@ github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c=
|
||||
github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb h1:KmYZDReplv/yfwc1LNYpDcVhVujC3Pasv6WjXx1haSU=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb/go.mod h1:yyP8PRo0EZou3nSH7H4qjlzQwaydPeIRNgX50npQHpE=
|
||||
github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc h1:ctPVsRj/QeWhYpNDAkUFXsBgtcR/PPsehdk8AIMLHok=
|
||||
github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
|
||||
github.com/cs3org/reva/v2 v2.23.0 h1:tRa+q6usndTQ6LbaxtfEub3UsKVruJ1l7HY6K+ZKS9s=
|
||||
github.com/cs3org/reva/v2 v2.23.0/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
@@ -60,11 +61,13 @@ func init() {
|
||||
type config struct {
|
||||
GatewayAddr string `mapstructure:"gateway_addr"`
|
||||
UserShareProviderEndpoint string `mapstructure:"usershareprovidersvc"`
|
||||
MaxConcurrency int `mapstructure:"max_concurrency"`
|
||||
}
|
||||
|
||||
type service struct {
|
||||
gatewaySelector pool.Selectable[gateway.GatewayAPIClient]
|
||||
sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient]
|
||||
maxConcurrency int
|
||||
}
|
||||
|
||||
func (s *service) Close() error {
|
||||
@@ -98,14 +101,19 @@ func NewDefault(m map[string]interface{}, _ *grpc.Server) (rgrpc.Service, error)
|
||||
return nil, errors.Wrap(err, "sharesstorageprovider: error getting UserShareProvider client")
|
||||
}
|
||||
|
||||
return New(gatewaySelector, sharingCollaborationSelector)
|
||||
if c.MaxConcurrency <= 0 {
|
||||
c.MaxConcurrency = 5
|
||||
}
|
||||
|
||||
return New(gatewaySelector, sharingCollaborationSelector, c.MaxConcurrency)
|
||||
}
|
||||
|
||||
// New returns a new instance of the SharesStorageProvider service
|
||||
func New(gatewaySelector pool.Selectable[gateway.GatewayAPIClient], sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient]) (rgrpc.Service, error) {
|
||||
func New(gatewaySelector pool.Selectable[gateway.GatewayAPIClient], sharingCollaborationSelector pool.Selectable[collaboration.CollaborationAPIClient], maxConcurrency int) (rgrpc.Service, error) {
|
||||
s := &service{
|
||||
gatewaySelector: gatewaySelector,
|
||||
sharingCollaborationSelector: sharingCollaborationSelector,
|
||||
maxConcurrency: maxConcurrency,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@@ -399,7 +407,7 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora
|
||||
var shareInfo map[string]*provider.ResourceInfo
|
||||
var err error
|
||||
if fetchShares {
|
||||
receivedShares, shareInfo, err = s.fetchShares(ctx, req.Opaque, []string{}, &fieldmaskpb.FieldMask{ /*TODO mtime and etag only?*/ })
|
||||
receivedShares, shareInfo, err = s.fetchAcceptedShares(ctx, req.Opaque, []string{}, &fieldmaskpb.FieldMask{ /*TODO mtime and etag only?*/ })
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
|
||||
}
|
||||
@@ -710,7 +718,7 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing user in context")
|
||||
}
|
||||
receivedShares, shareMd, err := s.fetchShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask)
|
||||
receivedShares, shareMd, err := s.fetchAcceptedShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -806,7 +814,7 @@ func (s *service) ListContainer(ctx context.Context, req *provider.ListContainer
|
||||
// The root is empty, it is filled by mountpoints
|
||||
// so, when accessing the root via /dav/spaces, we need to list the accepted shares with their mountpoint
|
||||
|
||||
receivedShares, shareMd, err := s.fetchShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask)
|
||||
receivedShares, shareMd, err := s.fetchAcceptedShares(ctx, req.Opaque, req.ArbitraryMetadataKeys, req.FieldMask)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
|
||||
}
|
||||
@@ -1143,14 +1151,21 @@ func (s *service) rejectReceivedShare(ctx context.Context, receivedShare *collab
|
||||
return errtypes.NewErrtypeFromStatus(res.Status)
|
||||
}
|
||||
|
||||
func (s *service) fetchShares(ctx context.Context, opaque *typesv1beta1.Opaque, arbitraryMetadataKeys []string, fieldMask *field_mask.FieldMask) ([]*collaboration.ReceivedShare, map[string]*provider.ResourceInfo, error) {
|
||||
func (s *service) fetchAcceptedShares(ctx context.Context, opaque *typesv1beta1.Opaque, arbitraryMetadataKeys []string, fieldMask *field_mask.FieldMask) ([]*collaboration.ReceivedShare, map[string]*provider.ResourceInfo, error) {
|
||||
sharingCollaborationClient, err := s.sharingCollaborationSelector.Next()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
lsRes, err := sharingCollaborationClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{
|
||||
// FIXME filter by received shares for resource id - listing all shares is tooo expensive!
|
||||
Filters: []*collaboration.Filter{
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_STATE,
|
||||
Term: &collaboration.Filter_State{
|
||||
State: collaboration.ShareState_SHARE_STATE_ACCEPTED,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
|
||||
@@ -1159,42 +1174,98 @@ func (s *service) fetchShares(ctx context.Context, opaque *typesv1beta1.Opaque,
|
||||
return nil, nil, fmt.Errorf("sharesstorageprovider: error calling ListReceivedSharesRequest")
|
||||
}
|
||||
|
||||
gatewayClient, err := s.gatewaySelector.Next()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
numWorkers := s.maxConcurrency
|
||||
if len(lsRes.Shares) < numWorkers {
|
||||
numWorkers = len(lsRes.Shares)
|
||||
}
|
||||
type res struct {
|
||||
shareid string
|
||||
info *provider.ResourceInfo
|
||||
}
|
||||
work := make(chan *collaboration.ReceivedShare, len(lsRes.Shares))
|
||||
results := make(chan res, len(lsRes.Shares))
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Distribute work
|
||||
g.Go(func() error {
|
||||
defer close(work)
|
||||
for _, share := range lsRes.Shares {
|
||||
select {
|
||||
case work <- share:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Spawn workers that'll concurrently work the queue
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
g.Go(func() error {
|
||||
for rs := range work {
|
||||
|
||||
// only stat accepted shares
|
||||
if rs.State != collaboration.ShareState_SHARE_STATE_ACCEPTED {
|
||||
continue
|
||||
}
|
||||
if rs.Share.ResourceId.SpaceId == "" {
|
||||
// convert backwards compatible share id
|
||||
rs.Share.ResourceId.StorageId, rs.Share.ResourceId.SpaceId = storagespace.SplitStorageID(rs.Share.ResourceId.StorageId)
|
||||
}
|
||||
|
||||
gatewayClient, err := s.gatewaySelector.Next()
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().
|
||||
Err(err).
|
||||
Interface("resourceID", rs.Share.ResourceId).
|
||||
Msg("ListRecievedShares: failed to select next gateway client")
|
||||
return err
|
||||
}
|
||||
sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{
|
||||
Opaque: opaque,
|
||||
Ref: &provider.Reference{ResourceId: rs.Share.ResourceId},
|
||||
ArbitraryMetadataKeys: arbitraryMetadataKeys,
|
||||
FieldMask: fieldMask,
|
||||
})
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().
|
||||
Err(err).
|
||||
Interface("resourceID", rs.Share.ResourceId).
|
||||
Msg("ListRecievedShares: failed to make stat call")
|
||||
return err
|
||||
}
|
||||
if sRes.Status.Code != rpc.Code_CODE_OK {
|
||||
appctx.GetLogger(ctx).Debug().
|
||||
Interface("resourceID", rs.Share.ResourceId).
|
||||
Interface("status", sRes.Status).
|
||||
Msg("ListRecievedShares: failed to stat the resource")
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case results <- res{shareid: rs.Share.Id.OpaqueId, info: sRes.Info}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
shareMetaData := make(map[string]*provider.ResourceInfo, len(lsRes.Shares))
|
||||
for _, rs := range lsRes.Shares {
|
||||
// only stat accepted shares
|
||||
if rs.State != collaboration.ShareState_SHARE_STATE_ACCEPTED {
|
||||
continue
|
||||
}
|
||||
if rs.Share.ResourceId.SpaceId == "" {
|
||||
// convert backwards compatible share id
|
||||
rs.Share.ResourceId.StorageId, rs.Share.ResourceId.SpaceId = storagespace.SplitStorageID(rs.Share.ResourceId.StorageId)
|
||||
}
|
||||
sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{
|
||||
Opaque: opaque,
|
||||
Ref: &provider.Reference{ResourceId: rs.Share.ResourceId},
|
||||
ArbitraryMetadataKeys: arbitraryMetadataKeys,
|
||||
FieldMask: fieldMask,
|
||||
})
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().
|
||||
Err(err).
|
||||
Interface("resourceID", rs.Share.ResourceId).
|
||||
Msg("ListRecievedShares: failed to make stat call")
|
||||
continue
|
||||
}
|
||||
if sRes.Status.Code != rpc.Code_CODE_OK {
|
||||
appctx.GetLogger(ctx).Debug().
|
||||
Interface("resourceID", rs.Share.ResourceId).
|
||||
Interface("status", sRes.Status).
|
||||
Msg("ListRecievedShares: failed to stat the resource")
|
||||
continue
|
||||
}
|
||||
shareMetaData[rs.Share.Id.OpaqueId] = sRes.Info
|
||||
// Wait for things to settle down, then close results chan
|
||||
go func() {
|
||||
_ = g.Wait() // error is checked later
|
||||
close(results)
|
||||
}()
|
||||
|
||||
// some results might have been skipped, so we cannot preallocate the map
|
||||
shareMetaData := make(map[string]*provider.ResourceInfo)
|
||||
for r := range results {
|
||||
shareMetaData[r.shareid] = r.info
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return lsRes.Shares, shareMetaData, nil
|
||||
|
||||
29
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go
generated
vendored
29
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/owncloudsql.go
generated
vendored
@@ -309,6 +309,17 @@ func (fs *owncloudsqlfs) toDatabasePath(ip string) string {
|
||||
return p
|
||||
}
|
||||
|
||||
func (fs *owncloudsqlfs) toResourcePath(ip, owner string) string {
|
||||
trim := filepath.Join(fs.c.DataDirectory, owner, "files")
|
||||
p := strings.TrimPrefix(ip, trim)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
// root directory
|
||||
if p == "" {
|
||||
p = "."
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (fs *owncloudsqlfs) toStoragePath(ctx context.Context, ip string) (sp string) {
|
||||
if fs.c.EnableHome {
|
||||
u := ctxpkg.ContextMustGetUser(ctx)
|
||||
@@ -523,14 +534,15 @@ func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filec
|
||||
if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok {
|
||||
returnAllKeys = true
|
||||
}
|
||||
|
||||
owner := fs.getOwner(ip)
|
||||
path := fs.toResourcePath(ip, owner)
|
||||
isDir := entry.MimeTypeString == "httpd/unix-directory"
|
||||
ri := &provider.ResourceInfo{
|
||||
Id: &provider.ResourceId{
|
||||
// return ownclouds numeric storage id as the space id!
|
||||
SpaceId: strconv.Itoa(entry.Storage), OpaqueId: strconv.Itoa(entry.ID),
|
||||
},
|
||||
Path: filepath.Base(ip),
|
||||
Path: filepath.Base(path), // we currently only return the name, decomposedfs returns the path if the request was path based. is that even still possible?
|
||||
Type: getResourceType(isDir),
|
||||
Etag: entry.Etag,
|
||||
MimeType: entry.MimeTypeString,
|
||||
@@ -542,8 +554,16 @@ func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filec
|
||||
Metadata: map[string]string{}, // TODO aduffeck: which metadata needs to go in here?
|
||||
},
|
||||
}
|
||||
// omit parentid for root
|
||||
if path != "." {
|
||||
ri.Name = entry.Name
|
||||
ri.ParentId = &provider.ResourceId{
|
||||
// return ownclouds numeric storage id as the space id!
|
||||
SpaceId: strconv.Itoa(entry.Storage), OpaqueId: strconv.Itoa(entry.Parent),
|
||||
}
|
||||
}
|
||||
|
||||
if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil {
|
||||
if owner, err := fs.getUser(ctx, owner); err == nil {
|
||||
ri.Owner = owner.Id
|
||||
} else {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner")
|
||||
@@ -1419,9 +1439,6 @@ func (fs *owncloudsqlfs) listWithNominalHome(ctx context.Context, ip string, mdK
|
||||
finfos := []*provider.ResourceInfo{}
|
||||
for _, entry := range entries {
|
||||
cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info")
|
||||
|
||||
11
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go
generated
vendored
11
vendor/github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/spaces.go
generated
vendored
@@ -31,12 +31,14 @@ import (
|
||||
"github.com/cs3org/reva/v2/pkg/errtypes"
|
||||
"github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql/filecache"
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
)
|
||||
|
||||
// ListStorageSpaces lists storage spaces according to the provided filters
|
||||
func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter, unrestricted bool) ([]*provider.StorageSpace, error) {
|
||||
var (
|
||||
spaceID = "*"
|
||||
// uid *userpb.UserId
|
||||
)
|
||||
|
||||
filteringUnsupportedSpaceTypes := false
|
||||
@@ -49,7 +51,7 @@ func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provid
|
||||
case provider.ListStorageSpacesRequest_Filter_TYPE_ID:
|
||||
_, spaceID, _, _ = storagespace.SplitID(filter[i].GetId().OpaqueId)
|
||||
case provider.ListStorageSpacesRequest_Filter_TYPE_USER:
|
||||
_, spaceID, _, _ = storagespace.SplitID(filter[i].GetId().OpaqueId)
|
||||
// uid = filter[i].GetUser()
|
||||
}
|
||||
}
|
||||
if filteringUnsupportedSpaceTypes {
|
||||
@@ -63,6 +65,9 @@ func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provid
|
||||
if !ok {
|
||||
return nil, errtypes.UserRequired("error getting user from context")
|
||||
}
|
||||
// if uid != nil && utils.UserIDEqual(uid, u.Id) {
|
||||
// return nil, errtypes.PermissionDenied("cannot access space of other user?")
|
||||
// }
|
||||
space, err := fs.getPersonalSpace(ctx, u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -141,6 +146,8 @@ func (fs *owncloudsqlfs) getPersonalSpace(ctx context.Context, owner *userpb.Use
|
||||
Mtime: &types.Timestamp{Seconds: uint64(root.MTime)},
|
||||
Owner: owner,
|
||||
}
|
||||
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "spaceAlias", "personal/"+owner.Username)
|
||||
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "etag", fmt.Sprintf(`"%s"`, root.Etag))
|
||||
return space, nil
|
||||
}
|
||||
|
||||
@@ -179,5 +186,7 @@ func (fs *owncloudsqlfs) storageToSpace(ctx context.Context, storage *filecache.
|
||||
Mtime: &types.Timestamp{Seconds: uint64(root.MTime)},
|
||||
Owner: owner,
|
||||
}
|
||||
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "spaceAlias", "personal/"+owner.Username)
|
||||
space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "etag", fmt.Sprintf(`"%s"`, root.Etag))
|
||||
return space, nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go
generated
vendored
14
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node/xattrs.go
generated
vendored
@@ -21,6 +21,7 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -144,14 +145,21 @@ func (n *Node) Xattrs(ctx context.Context) (Attributes, error) {
|
||||
// Xattr returns an extended attribute of the node. If the attributes have already
|
||||
// been cached it is not read from disk again.
|
||||
func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
|
||||
path := n.InternalPath()
|
||||
|
||||
if path == "" {
|
||||
// Do not try to read the attribute of an non-existing node
|
||||
return []byte{}, fs.ErrNotExist
|
||||
}
|
||||
|
||||
if n.ID == "" {
|
||||
// Do not try to read the attribute of an empty node. The InternalPath points to the
|
||||
// base nodes directory in this case.
|
||||
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR}
|
||||
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR}
|
||||
}
|
||||
|
||||
if n.xattrsCache == nil {
|
||||
attrs, err := n.lu.MetadataBackend().All(ctx, n.InternalPath())
|
||||
attrs, err := n.lu.MetadataBackend().All(ctx, path)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
@@ -162,7 +170,7 @@ func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
|
||||
return val, nil
|
||||
}
|
||||
// wrap the error as xattr does
|
||||
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR}
|
||||
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR}
|
||||
}
|
||||
|
||||
// XattrString returns the string representation of an attribute
|
||||
|
||||
5
vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go
generated
vendored
5
vendor/github.com/cs3org/reva/v2/pkg/user/manager/owncloudsql/owncloudsql.go
generated
vendored
@@ -167,6 +167,11 @@ func (m *manager) convertToCS3User(ctx context.Context, a *accounts.Account, ski
|
||||
GidNumber: m.c.Nobody,
|
||||
UidNumber: m.c.Nobody,
|
||||
}
|
||||
// https://github.com/cs3org/reva/pull/4135
|
||||
// fall back to userid
|
||||
if u.Id.OpaqueId == "" {
|
||||
u.Id.OpaqueId = a.UserID
|
||||
}
|
||||
if u.Username == "" {
|
||||
u.Username = u.Id.OpaqueId
|
||||
}
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -367,7 +367,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
|
||||
# github.com/cs3org/reva/v2 v2.22.1-0.20240809114512-56b26ddd82cc
|
||||
# github.com/cs3org/reva/v2 v2.23.0
|
||||
## explicit; go 1.21
|
||||
github.com/cs3org/reva/v2/cmd/revad/internal/grace
|
||||
github.com/cs3org/reva/v2/cmd/revad/runtime
|
||||
|
||||
Reference in New Issue
Block a user