mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-03-13 18:00:34 -05:00
Merge pull request #8544 from rhafer/backport/5.0/7885
Backport/5.0/7885
This commit is contained in:
6
changelog/unreleased/sharing-ng-mount.md
Normal file
6
changelog/unreleased/sharing-ng-mount.md
Normal file
@@ -0,0 +1,6 @@
|
||||
Enhancement: graphs endpoint for mounting and unmounting shares
|
||||
|
||||
Functionality for mounting (accepting) and unmounting (rejecting) received
|
||||
shares has been added to the graph API.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/7885
|
||||
@@ -5,12 +5,20 @@ outpkg: "mocks"
|
||||
packages:
|
||||
github.com/owncloud/ocis/v2/services/graph/pkg/service/v0:
|
||||
config:
|
||||
dir: "mocks"
|
||||
dir: "mocks"
|
||||
interfaces:
|
||||
DrivesDriveItemProvider:
|
||||
HTTPClient:
|
||||
Permissions:
|
||||
Publisher:
|
||||
RoleService:
|
||||
github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool:
|
||||
config:
|
||||
dir: "mocks"
|
||||
interfaces:
|
||||
Selectable:
|
||||
config:
|
||||
filename: "gateway_selector.go"
|
||||
github.com/owncloud/ocis/v2/services/graph/pkg/identity:
|
||||
config:
|
||||
dir: "pkg/identity/mocks"
|
||||
|
||||
144
services/graph/mocks/drives_drive_item_provider.go
Normal file
144
services/graph/mocks/drives_drive_item_provider.go
Normal file
@@ -0,0 +1,144 @@
|
||||
// Code generated by mockery v2.40.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
)
|
||||
|
||||
// DrivesDriveItemProvider is an autogenerated mock type for the DrivesDriveItemProvider type
|
||||
type DrivesDriveItemProvider struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type DrivesDriveItemProvider_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *DrivesDriveItemProvider) EXPECT() *DrivesDriveItemProvider_Expecter {
|
||||
return &DrivesDriveItemProvider_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// MountShare provides a mock function with given fields: ctx, resourceID, name
|
||||
func (_m *DrivesDriveItemProvider) MountShare(ctx context.Context, resourceID providerv1beta1.ResourceId, name string) (libregraph.DriveItem, error) {
|
||||
ret := _m.Called(ctx, resourceID, name)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MountShare")
|
||||
}
|
||||
|
||||
var r0 libregraph.DriveItem
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, providerv1beta1.ResourceId, string) (libregraph.DriveItem, error)); ok {
|
||||
return rf(ctx, resourceID, name)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, providerv1beta1.ResourceId, string) libregraph.DriveItem); ok {
|
||||
r0 = rf(ctx, resourceID, name)
|
||||
} else {
|
||||
r0 = ret.Get(0).(libregraph.DriveItem)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, providerv1beta1.ResourceId, string) error); ok {
|
||||
r1 = rf(ctx, resourceID, name)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DrivesDriveItemProvider_MountShare_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MountShare'
|
||||
type DrivesDriveItemProvider_MountShare_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// MountShare is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - resourceID providerv1beta1.ResourceId
|
||||
// - name string
|
||||
func (_e *DrivesDriveItemProvider_Expecter) MountShare(ctx interface{}, resourceID interface{}, name interface{}) *DrivesDriveItemProvider_MountShare_Call {
|
||||
return &DrivesDriveItemProvider_MountShare_Call{Call: _e.mock.On("MountShare", ctx, resourceID, name)}
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_MountShare_Call) Run(run func(ctx context.Context, resourceID providerv1beta1.ResourceId, name string)) *DrivesDriveItemProvider_MountShare_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(providerv1beta1.ResourceId), args[2].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_MountShare_Call) Return(_a0 libregraph.DriveItem, _a1 error) *DrivesDriveItemProvider_MountShare_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_MountShare_Call) RunAndReturn(run func(context.Context, providerv1beta1.ResourceId, string) (libregraph.DriveItem, error)) *DrivesDriveItemProvider_MountShare_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UnmountShare provides a mock function with given fields: ctx, resourceID
|
||||
func (_m *DrivesDriveItemProvider) UnmountShare(ctx context.Context, resourceID providerv1beta1.ResourceId) error {
|
||||
ret := _m.Called(ctx, resourceID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for UnmountShare")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, providerv1beta1.ResourceId) error); ok {
|
||||
r0 = rf(ctx, resourceID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DrivesDriveItemProvider_UnmountShare_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnmountShare'
|
||||
type DrivesDriveItemProvider_UnmountShare_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UnmountShare is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - resourceID providerv1beta1.ResourceId
|
||||
func (_e *DrivesDriveItemProvider_Expecter) UnmountShare(ctx interface{}, resourceID interface{}) *DrivesDriveItemProvider_UnmountShare_Call {
|
||||
return &DrivesDriveItemProvider_UnmountShare_Call{Call: _e.mock.On("UnmountShare", ctx, resourceID)}
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_UnmountShare_Call) Run(run func(ctx context.Context, resourceID providerv1beta1.ResourceId)) *DrivesDriveItemProvider_UnmountShare_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(providerv1beta1.ResourceId))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_UnmountShare_Call) Return(_a0 error) *DrivesDriveItemProvider_UnmountShare_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *DrivesDriveItemProvider_UnmountShare_Call) RunAndReturn(run func(context.Context, providerv1beta1.ResourceId) error) *DrivesDriveItemProvider_UnmountShare_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewDrivesDriveItemProvider creates a new instance of DrivesDriveItemProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewDrivesDriveItemProvider(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *DrivesDriveItemProvider {
|
||||
mock := &DrivesDriveItemProvider{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
104
services/graph/mocks/gateway_selector.go
Normal file
104
services/graph/mocks/gateway_selector.go
Normal file
@@ -0,0 +1,104 @@
|
||||
// Code generated by mockery v2.40.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
pool "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Selectable is an autogenerated mock type for the Selectable type
|
||||
type Selectable[T interface{}] struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type Selectable_Expecter[T interface{}] struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *Selectable[T]) EXPECT() *Selectable_Expecter[T] {
|
||||
return &Selectable_Expecter[T]{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// Next provides a mock function with given fields: opts
|
||||
func (_m *Selectable[T]) Next(opts ...pool.Option) (T, error) {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Next")
|
||||
}
|
||||
|
||||
var r0 T
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(...pool.Option) (T, error)); ok {
|
||||
return rf(opts...)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(...pool.Option) T); ok {
|
||||
r0 = rf(opts...)
|
||||
} else {
|
||||
r0 = ret.Get(0).(T)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(...pool.Option) error); ok {
|
||||
r1 = rf(opts...)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Selectable_Next_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Next'
|
||||
type Selectable_Next_Call[T interface{}] struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Next is a helper method to define mock.On call
|
||||
// - opts ...pool.Option
|
||||
func (_e *Selectable_Expecter[T]) Next(opts ...interface{}) *Selectable_Next_Call[T] {
|
||||
return &Selectable_Next_Call[T]{Call: _e.mock.On("Next",
|
||||
append([]interface{}{}, opts...)...)}
|
||||
}
|
||||
|
||||
func (_c *Selectable_Next_Call[T]) Run(run func(opts ...pool.Option)) *Selectable_Next_Call[T] {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
variadicArgs := make([]pool.Option, len(args)-0)
|
||||
for i, a := range args[0:] {
|
||||
if a != nil {
|
||||
variadicArgs[i] = a.(pool.Option)
|
||||
}
|
||||
}
|
||||
run(variadicArgs...)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Selectable_Next_Call[T]) Return(_a0 T, _a1 error) *Selectable_Next_Call[T] {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *Selectable_Next_Call[T]) RunAndReturn(run func(...pool.Option) (T, error)) *Selectable_Next_Call[T] {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewSelectable creates a new instance of Selectable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewSelectable[T interface{}](t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Selectable[T] {
|
||||
mock := &Selectable[T]{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
313
services/graph/pkg/service/v0/api_drives_drive_item.go
Normal file
313
services/graph/pkg/service/v0/api_drives_drive_item.go
Normal file
@@ -0,0 +1,313 @@
|
||||
package svc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
|
||||
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
|
||||
storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/go-chi/render"
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
"google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool"
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/log"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/errorcode"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/identity"
|
||||
)
|
||||
|
||||
const (
|
||||
_fieldMaskPathState = "state"
|
||||
_fieldMaskPathMountPoint = "mount_point"
|
||||
)
|
||||
|
||||
// DrivesDriveItemProvider is the interface that needs to be implemented by the individual space service
|
||||
type DrivesDriveItemProvider interface {
|
||||
MountShare(ctx context.Context, resourceID storageprovider.ResourceId, name string) (libregraph.DriveItem, error)
|
||||
UnmountShare(ctx context.Context, resourceID storageprovider.ResourceId) error
|
||||
}
|
||||
|
||||
// DrivesDriveItemService contains the production business logic for everything that relates to drives
|
||||
type DrivesDriveItemService struct {
|
||||
logger log.Logger
|
||||
gatewaySelector pool.Selectable[gateway.GatewayAPIClient]
|
||||
identityCache identity.IdentityCache
|
||||
resharingEnabled bool
|
||||
}
|
||||
|
||||
// NewDrivesDriveItemService creates a new DrivesDriveItemService
|
||||
func NewDrivesDriveItemService(logger log.Logger, gatewaySelector pool.Selectable[gateway.GatewayAPIClient], identityCache identity.IdentityCache, resharing bool) (DrivesDriveItemService, error) {
|
||||
return DrivesDriveItemService{
|
||||
logger: log.Logger{Logger: logger.With().Str("graph api", "DrivesDriveItemService").Logger()},
|
||||
gatewaySelector: gatewaySelector,
|
||||
identityCache: identityCache,
|
||||
resharingEnabled: resharing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmountShare unmounts a share from the sharejail
|
||||
func (s DrivesDriveItemService) UnmountShare(ctx context.Context, resourceID storageprovider.ResourceId) error {
|
||||
gatewayClient, err := s.gatewaySelector.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This is a a bit of a hack. We should not rely on a specific format of the item id.
|
||||
// But currently there is no other way to get the ShareID.
|
||||
shareId := resourceID.GetOpaqueId()
|
||||
|
||||
// Now, find out the resourceID of the shared resource
|
||||
getReceivedShareResponse, err := gatewayClient.GetReceivedShare(ctx,
|
||||
&collaboration.GetReceivedShareRequest{
|
||||
Ref: &collaboration.ShareReference{
|
||||
Spec: &collaboration.ShareReference_Id{
|
||||
Id: &collaboration.ShareId{
|
||||
OpaqueId: shareId,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if errCode := errorcode.FromCS3Status(getReceivedShareResponse.GetStatus(), err); errCode != nil {
|
||||
s.logger.Debug().Err(errCode).
|
||||
Str("shareid", shareId).
|
||||
Msg("failed to read share")
|
||||
return errCode
|
||||
}
|
||||
|
||||
// Find all accepted shares for this resource
|
||||
receivedSharesResponse, err := gatewayClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{
|
||||
Filters: []*collaboration.Filter{
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_STATE,
|
||||
Term: &collaboration.Filter_State{
|
||||
State: collaboration.ShareState_SHARE_STATE_ACCEPTED,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_RESOURCE_ID,
|
||||
Term: &collaboration.Filter_ResourceId{
|
||||
ResourceId: getReceivedShareResponse.GetShare().GetShare().GetResourceId(),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
|
||||
// Reject all the shares for this resource
|
||||
for _, receivedShare := range receivedSharesResponse.GetShares() {
|
||||
receivedShare.State = collaboration.ShareState_SHARE_STATE_REJECTED
|
||||
|
||||
updateReceivedShareRequest := &collaboration.UpdateReceivedShareRequest{
|
||||
Share: receivedShare,
|
||||
UpdateMask: &fieldmaskpb.FieldMask{Paths: []string{_fieldMaskPathState}},
|
||||
}
|
||||
|
||||
_, err := gatewayClient.UpdateReceivedShare(ctx, updateReceivedShareRequest)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// MountShare mounts a share
|
||||
func (s DrivesDriveItemService) MountShare(ctx context.Context, resourceID storageprovider.ResourceId, name string) (libregraph.DriveItem, error) {
|
||||
if filepath.IsAbs(name) {
|
||||
return libregraph.DriveItem{}, errorcode.New(errorcode.InvalidRequest, "name cannot be an absolute path")
|
||||
}
|
||||
name = filepath.Clean(name)
|
||||
|
||||
gatewayClient, err := s.gatewaySelector.Next()
|
||||
if err != nil {
|
||||
return libregraph.DriveItem{}, err
|
||||
}
|
||||
|
||||
// Get all shares that the user has received for this resource. There might be multiple
|
||||
receivedSharesResponse, err := gatewayClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{
|
||||
Filters: []*collaboration.Filter{
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_STATE,
|
||||
Term: &collaboration.Filter_State{
|
||||
State: collaboration.ShareState_SHARE_STATE_PENDING,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_STATE,
|
||||
Term: &collaboration.Filter_State{
|
||||
State: collaboration.ShareState_SHARE_STATE_REJECTED,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: collaboration.Filter_TYPE_RESOURCE_ID,
|
||||
Term: &collaboration.Filter_ResourceId{
|
||||
ResourceId: &resourceID,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return libregraph.DriveItem{}, err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
|
||||
var acceptedShares []*collaboration.ReceivedShare
|
||||
|
||||
// try to accept all of the received shares for this resource. So that the stat is in sync across all
|
||||
// shares
|
||||
for _, receivedShare := range receivedSharesResponse.GetShares() {
|
||||
updateMask := &fieldmaskpb.FieldMask{Paths: []string{_fieldMaskPathState}}
|
||||
receivedShare.State = collaboration.ShareState_SHARE_STATE_ACCEPTED
|
||||
|
||||
// only update if mountPoint name is not empty and the path has changed
|
||||
if name != "" {
|
||||
mountPoint := receivedShare.GetMountPoint()
|
||||
if mountPoint == nil {
|
||||
mountPoint = &storageprovider.Reference{}
|
||||
}
|
||||
|
||||
if filepath.Clean(mountPoint.GetPath()) != name {
|
||||
mountPoint.Path = name
|
||||
receivedShare.MountPoint = mountPoint
|
||||
updateMask.Paths = append(updateMask.Paths, _fieldMaskPathMountPoint)
|
||||
}
|
||||
}
|
||||
|
||||
updateReceivedShareRequest := &collaboration.UpdateReceivedShareRequest{
|
||||
Share: receivedShare,
|
||||
UpdateMask: updateMask,
|
||||
}
|
||||
|
||||
updateReceivedShareResponse, err := gatewayClient.UpdateReceivedShare(ctx, updateReceivedShareRequest)
|
||||
switch errCode := errorcode.FromCS3Status(updateReceivedShareResponse.GetStatus(), err); {
|
||||
case errCode == nil:
|
||||
acceptedShares = append(acceptedShares, updateReceivedShareResponse.GetShare())
|
||||
default:
|
||||
// Just log at debug level here. If a single accept for any of the received shares failed this
|
||||
// is not a critical problem. We mainly need to handle the case where all accepts fail. (Outside
|
||||
// the loop)
|
||||
s.logger.Debug().Err(errCode).
|
||||
Str("shareid", receivedShare.GetShare().GetId().String()).
|
||||
Str("resourceid", receivedShare.GetShare().GetResourceId().String()).
|
||||
Msg("failed to accept share")
|
||||
errs = append(errs, errCode)
|
||||
}
|
||||
}
|
||||
|
||||
if len(receivedSharesResponse.GetShares()) == len(errs) {
|
||||
// none of the received shares could be accepted. This is an error. Return it.
|
||||
return libregraph.DriveItem{}, errors.Join(errs...)
|
||||
}
|
||||
|
||||
// As the accepted shares are all for the same resource they should collapse to a single driveitem
|
||||
items, err := cs3ReceivedSharesToDriveItems(ctx, &s.logger, gatewayClient, s.identityCache, s.resharingEnabled, acceptedShares)
|
||||
switch {
|
||||
case err != nil:
|
||||
return libregraph.DriveItem{}, nil
|
||||
case len(items) != 1:
|
||||
return libregraph.DriveItem{}, errorcode.New(errorcode.GeneralException, "failed to convert accepted shares into driveitem")
|
||||
}
|
||||
return items[0], nil
|
||||
}
|
||||
|
||||
// DrivesDriveItemApi is the api that registers the http endpoints which expose needed operation to the graph api.
|
||||
// the business logic is delegated to the space service and further down to the cs3 client.
|
||||
type DrivesDriveItemApi struct {
|
||||
logger log.Logger
|
||||
drivesDriveItemService DrivesDriveItemProvider
|
||||
}
|
||||
|
||||
// NewDrivesDriveItemApi creates a new DrivesDriveItemApi
|
||||
func NewDrivesDriveItemApi(drivesDriveItemService DrivesDriveItemProvider, logger log.Logger) (DrivesDriveItemApi, error) {
|
||||
return DrivesDriveItemApi{
|
||||
logger: log.Logger{Logger: logger.With().Str("graph api", "DrivesDriveItemApi").Logger()},
|
||||
drivesDriveItemService: drivesDriveItemService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteDriveItem deletes a drive item
|
||||
func (api DrivesDriveItemApi) DeleteDriveItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
driveID, itemID, err := GetDriveAndItemIDParam(r, &api.logger)
|
||||
if err != nil {
|
||||
msg := "invalid driveID or itemID"
|
||||
api.logger.Debug().Err(err).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, msg)
|
||||
return
|
||||
}
|
||||
|
||||
if !IsShareJail(driveID) {
|
||||
msg := "invalid driveID, must be share jail"
|
||||
api.logger.Debug().Interface("driveID", driveID).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, msg)
|
||||
return
|
||||
}
|
||||
|
||||
if err := api.drivesDriveItemService.UnmountShare(ctx, itemID); err != nil {
|
||||
msg := "unmounting share failed"
|
||||
api.logger.Debug().Err(err).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusFailedDependency, msg)
|
||||
return
|
||||
}
|
||||
|
||||
render.Status(r, http.StatusOK)
|
||||
}
|
||||
|
||||
// CreateDriveItem creates a drive item
|
||||
func (api DrivesDriveItemApi) CreateDriveItem(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
driveID, err := parseIDParam(r, "driveID")
|
||||
if err != nil {
|
||||
api.logger.Debug().Err(err).Msg("invlid driveID")
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, "invalid driveID")
|
||||
return
|
||||
}
|
||||
|
||||
if !IsShareJail(driveID) {
|
||||
msg := "invalid driveID, must be share jail"
|
||||
api.logger.Debug().Interface("driveID", driveID).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, msg)
|
||||
return
|
||||
}
|
||||
|
||||
requestDriveItem := libregraph.DriveItem{}
|
||||
if err := StrictJSONUnmarshal(r.Body, &requestDriveItem); err != nil {
|
||||
msg := "invalid request body"
|
||||
api.logger.Debug().Err(err).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, msg)
|
||||
return
|
||||
}
|
||||
|
||||
remoteItem := requestDriveItem.GetRemoteItem()
|
||||
resourceId, err := storagespace.ParseID(remoteItem.GetId())
|
||||
if err != nil {
|
||||
msg := "invalid remote item id"
|
||||
api.logger.Debug().Err(err).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusUnprocessableEntity, msg)
|
||||
return
|
||||
}
|
||||
|
||||
mountShareResponse, err := api.drivesDriveItemService.
|
||||
MountShare(ctx, resourceId, requestDriveItem.GetName())
|
||||
if err != nil {
|
||||
msg := "mounting share failed"
|
||||
api.logger.Debug().Err(err).Msg(msg)
|
||||
errorcode.InvalidRequest.Render(w, r, http.StatusFailedDependency, msg)
|
||||
return
|
||||
}
|
||||
|
||||
render.Status(r, http.StatusCreated)
|
||||
render.JSON(w, r, mountShareResponse)
|
||||
}
|
||||
716
services/graph/pkg/service/v0/api_drives_drive_item_test.go
Normal file
716
services/graph/pkg/service/v0/api_drives_drive_item_test.go
Normal file
@@ -0,0 +1,716 @@
|
||||
package svc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
|
||||
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
|
||||
collaborationv1beta1 "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
|
||||
storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/go-chi/chi/v5"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/tidwall/gjson"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/rgrpc/status"
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
cs3mocks "github.com/cs3org/reva/v2/tests/cs3mocks/mocks"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/log"
|
||||
"github.com/owncloud/ocis/v2/services/graph/mocks"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/errorcode"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/identity"
|
||||
svc "github.com/owncloud/ocis/v2/services/graph/pkg/service/v0"
|
||||
)
|
||||
|
||||
var _ = Describe("DrivesDriveItemService", func() {
|
||||
var (
|
||||
drivesDriveItemService svc.DrivesDriveItemService
|
||||
gatewayClient *cs3mocks.GatewayAPIClient
|
||||
gatewaySelector *mocks.Selectable[gateway.GatewayAPIClient]
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
logger := log.NewLogger()
|
||||
gatewayClient = cs3mocks.NewGatewayAPIClient(GinkgoT())
|
||||
|
||||
gatewaySelector = mocks.NewSelectable[gateway.GatewayAPIClient](GinkgoT())
|
||||
gatewaySelector.On("Next").Return(gatewayClient, nil)
|
||||
|
||||
cache := identity.NewIdentityCache(identity.IdentityCacheWithGatewaySelector(gatewaySelector))
|
||||
|
||||
service, err := svc.NewDrivesDriveItemService(logger, gatewaySelector, cache, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
drivesDriveItemService = service
|
||||
})
|
||||
|
||||
Describe("UnmountShare", func() {
|
||||
It("handles gateway selector related errors", func() {
|
||||
gatewaySelector.ExpectedCalls = nil
|
||||
|
||||
expectedError := errors.New("obtaining next gatewayClient failed")
|
||||
gatewaySelector.On("Next").Return(gatewayClient, expectedError)
|
||||
|
||||
_, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "")
|
||||
Expect(err).To(MatchError(expectedError))
|
||||
})
|
||||
|
||||
Describe("gateway client share listing", func() {
|
||||
It("handles share listing errors", func() {
|
||||
expectedError := errors.New("listing shares failed")
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(&collaborationv1beta1.ListReceivedSharesResponse{}, expectedError)
|
||||
|
||||
_, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "")
|
||||
Expect(err).To(MatchError(expectedError))
|
||||
})
|
||||
|
||||
It("uses the correct filters to get the shares", func() {
|
||||
expectedResourceID := storageprovider.ResourceId{
|
||||
StorageId: "1",
|
||||
OpaqueId: "2",
|
||||
SpaceId: "3",
|
||||
}
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
Expect(in.Filters).To(HaveLen(3))
|
||||
|
||||
var shareStates []collaborationv1beta1.ShareState
|
||||
var resourceIDs []*storageprovider.ResourceId
|
||||
|
||||
for _, filter := range in.Filters {
|
||||
switch filter.Term.(type) {
|
||||
case *collaborationv1beta1.Filter_State:
|
||||
shareStates = append(shareStates, filter.GetState())
|
||||
case *collaborationv1beta1.Filter_ResourceId:
|
||||
resourceIDs = append(resourceIDs, filter.GetResourceId())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(shareStates).To(HaveLen(2))
|
||||
Expect(shareStates).To(ContainElements(
|
||||
collaborationv1beta1.ShareState_SHARE_STATE_PENDING,
|
||||
collaborationv1beta1.ShareState_SHARE_STATE_REJECTED,
|
||||
))
|
||||
|
||||
Expect(resourceIDs).To(HaveLen(1))
|
||||
Expect(resourceIDs[0]).To(Equal(&expectedResourceID))
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
_, err := drivesDriveItemService.MountShare(context.Background(), expectedResourceID, "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("gateway client share update", func() {
|
||||
It("updates the share state to be accepted", func() {
|
||||
expectedShareID := collaborationv1beta1.ShareId{
|
||||
OpaqueId: "1:2:3",
|
||||
}
|
||||
expectedResourceID := storageprovider.ResourceId{
|
||||
StorageId: "1",
|
||||
SpaceId: "2",
|
||||
OpaqueId: "3",
|
||||
}
|
||||
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_PENDING,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &expectedShareID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
Expect(in.GetUpdateMask().GetPaths()).To(Equal([]string{"state"}))
|
||||
Expect(in.GetShare().GetState()).To(Equal(collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED))
|
||||
Expect(in.GetShare().GetShare().GetId().GetOpaqueId()).To(Equal(expectedShareID.GetOpaqueId()))
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &expectedShareID,
|
||||
ResourceId: &expectedResourceID,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("Stat", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *storageprovider.StatRequest, opts ...grpc.CallOption) (*storageprovider.StatResponse, error) {
|
||||
return &storageprovider.StatResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Info: &storageprovider.ResourceInfo{
|
||||
Id: &expectedResourceID,
|
||||
Name: "name",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
_, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("updates the mountPoint", func() {
|
||||
expectedShareID := collaborationv1beta1.ShareId{
|
||||
OpaqueId: "1:2:3",
|
||||
}
|
||||
expectedResourceID := storageprovider.ResourceId{
|
||||
StorageId: "1",
|
||||
SpaceId: "2",
|
||||
OpaqueId: "3",
|
||||
}
|
||||
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
Expect(in.GetUpdateMask().GetPaths()).To(HaveLen(2))
|
||||
Expect(in.GetUpdateMask().GetPaths()).To(ContainElements("mount_point"))
|
||||
Expect(in.GetShare().GetMountPoint().GetPath()).To(Equal("new name"))
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &expectedShareID,
|
||||
ResourceId: &expectedResourceID,
|
||||
},
|
||||
MountPoint: &storageprovider.Reference{
|
||||
Path: "new name",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("Stat", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *storageprovider.StatRequest, opts ...grpc.CallOption) (*storageprovider.StatResponse, error) {
|
||||
return &storageprovider.StatResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Info: &storageprovider.ResourceInfo{
|
||||
Id: &expectedResourceID,
|
||||
Name: "name",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
di, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "new name")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(di.GetName()).To(Equal("new name"))
|
||||
})
|
||||
|
||||
It("succeeds when any of the shares was accepted", func() {
|
||||
expectedResourceID := storageprovider.ResourceId{
|
||||
StorageId: "1",
|
||||
SpaceId: "2",
|
||||
OpaqueId: "3",
|
||||
}
|
||||
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
var calls int
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
calls++
|
||||
Expect(calls).To(BeNumerically("<=", 3))
|
||||
|
||||
if calls <= 2 {
|
||||
return nil, fmt.Errorf("error %d", calls)
|
||||
}
|
||||
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &collaborationv1beta1.ShareId{
|
||||
OpaqueId: strconv.Itoa(calls),
|
||||
},
|
||||
ResourceId: &expectedResourceID,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("Stat", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *storageprovider.StatRequest, opts ...grpc.CallOption) (*storageprovider.StatResponse, error) {
|
||||
return &storageprovider.StatResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Info: &storageprovider.ResourceInfo{
|
||||
Id: &expectedResourceID,
|
||||
Name: "name",
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
di, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "new name")
|
||||
Expect(err).To(BeNil())
|
||||
Expect(di.GetId()).ToNot(BeEmpty())
|
||||
})
|
||||
It("errors when none of the shares can be accepted", func() {
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
var calls int
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
calls++
|
||||
Expect(calls).To(BeNumerically("<=", 3))
|
||||
return nil, fmt.Errorf("error %d", calls)
|
||||
})
|
||||
|
||||
_, err := drivesDriveItemService.MountShare(context.Background(), storageprovider.ResourceId{}, "new name")
|
||||
Expect(fmt.Sprint(err)).To(ContainSubstring("error 1"))
|
||||
Expect(fmt.Sprint(err)).To(ContainSubstring("error 2"))
|
||||
Expect(fmt.Sprint(err)).To(ContainSubstring("error 3"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("UnmountShare", func() {
|
||||
It("handles gateway selector related errors", func() {
|
||||
gatewaySelector.ExpectedCalls = nil
|
||||
|
||||
expectedError := errors.New("obtaining next gatewayClient failed")
|
||||
gatewaySelector.On("Next").Return(gatewayClient, expectedError)
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), storageprovider.ResourceId{})
|
||||
Expect(err).To(MatchError(expectedError))
|
||||
})
|
||||
|
||||
Describe("gateway client share listing", func() {
|
||||
It("handles share listing errors", func() {
|
||||
expectedError := errorcode.New(errorcode.GeneralException, "listing shares failed")
|
||||
gatewayClient.
|
||||
On("GetReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(&collaborationv1beta1.GetReceivedShareResponse{}, errors.New("listing shares failed"))
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), storageprovider.ResourceId{})
|
||||
Expect(err).To(MatchError(&expectedError))
|
||||
})
|
||||
|
||||
It("uses the correct filters to get the shares", func() {
|
||||
driveItemResourceID := storageprovider.ResourceId{
|
||||
StorageId: "1",
|
||||
SpaceId: "2",
|
||||
OpaqueId: "3:4:5",
|
||||
}
|
||||
expectedResourceID := storageprovider.ResourceId{
|
||||
StorageId: "3",
|
||||
SpaceId: "4",
|
||||
OpaqueId: "5",
|
||||
}
|
||||
gatewayClient.
|
||||
On("GetReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) {
|
||||
Expect(in.Ref.GetId().GetOpaqueId()).To(Equal(driveItemResourceID.GetOpaqueId()))
|
||||
return &collaborationv1beta1.GetReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &collaborationv1beta1.ShareId{
|
||||
OpaqueId: driveItemResourceID.GetOpaqueId(),
|
||||
},
|
||||
ResourceId: &expectedResourceID,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
Expect(in.Filters).To(HaveLen(2))
|
||||
|
||||
var shareStates []collaborationv1beta1.ShareState
|
||||
var resourceIDs []*storageprovider.ResourceId
|
||||
|
||||
for _, filter := range in.Filters {
|
||||
switch filter.Term.(type) {
|
||||
case *collaborationv1beta1.Filter_State:
|
||||
shareStates = append(shareStates, filter.GetState())
|
||||
case *collaborationv1beta1.Filter_ResourceId:
|
||||
resourceIDs = append(resourceIDs, filter.GetResourceId())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(shareStates).To(HaveLen(1))
|
||||
Expect(shareStates).To(ContainElements(
|
||||
collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
))
|
||||
|
||||
Expect(resourceIDs).To(HaveLen(1))
|
||||
Expect(resourceIDs[0]).To(Equal(&expectedResourceID))
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), driveItemResourceID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("gateway client share update", func() {
|
||||
It("updates the share state to be rejected", func() {
|
||||
expectedShareID := collaborationv1beta1.ShareId{
|
||||
OpaqueId: "1$2!3",
|
||||
}
|
||||
gatewayClient.
|
||||
On("GetReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) {
|
||||
return &collaborationv1beta1.GetReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &expectedShareID,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_PENDING,
|
||||
Share: &collaborationv1beta1.Share{
|
||||
Id: &expectedShareID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
Expect(in.GetUpdateMask().GetPaths()).To(Equal([]string{"state"}))
|
||||
Expect(in.GetShare().GetState()).To(Equal(collaborationv1beta1.ShareState_SHARE_STATE_REJECTED))
|
||||
Expect(in.GetShare().GetShare().GetId().GetOpaqueId()).To(Equal(expectedShareID.GetOpaqueId()))
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{}, nil
|
||||
})
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), storageprovider.ResourceId{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("succeeds when all shares could be rejected", func() {
|
||||
gatewayClient.
|
||||
On("GetReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) {
|
||||
return &collaborationv1beta1.GetReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
var calls int
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
calls++
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{}, nil
|
||||
})
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), storageprovider.ResourceId{})
|
||||
Expect(calls).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("bubbles errors when any share fails rejecting", func() {
|
||||
gatewayClient.
|
||||
On("GetReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.GetReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.GetReceivedShareResponse, error) {
|
||||
return &collaborationv1beta1.GetReceivedShareResponse{
|
||||
Status: status.NewOK(ctx),
|
||||
Share: &collaborationv1beta1.ReceivedShare{
|
||||
State: collaborationv1beta1.ShareState_SHARE_STATE_ACCEPTED,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
gatewayClient.
|
||||
On("ListReceivedShares", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.ListReceivedSharesRequest, opts ...grpc.CallOption) (*collaborationv1beta1.ListReceivedSharesResponse, error) {
|
||||
return &collaborationv1beta1.ListReceivedSharesResponse{
|
||||
Shares: []*collaborationv1beta1.ReceivedShare{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
|
||||
var calls int
|
||||
gatewayClient.
|
||||
On("UpdateReceivedShare", mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(func(ctx context.Context, in *collaborationv1beta1.UpdateReceivedShareRequest, opts ...grpc.CallOption) (*collaborationv1beta1.UpdateReceivedShareResponse, error) {
|
||||
calls++
|
||||
Expect(calls).To(BeNumerically("<=", 3))
|
||||
|
||||
if calls <= 2 {
|
||||
return nil, fmt.Errorf("error %d", calls)
|
||||
}
|
||||
|
||||
return &collaborationv1beta1.UpdateReceivedShareResponse{}, nil
|
||||
})
|
||||
|
||||
err := drivesDriveItemService.UnmountShare(context.Background(), storageprovider.ResourceId{})
|
||||
Expect(fmt.Sprint(err)).To(Equal("error 1\nerror 2"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("DrivesDriveItemApi", func() {
|
||||
var (
|
||||
mockProvider *mocks.DrivesDriveItemProvider
|
||||
httpAPI svc.DrivesDriveItemApi
|
||||
rCTX *chi.Context
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
logger := log.NewLogger()
|
||||
|
||||
mockProvider = mocks.NewDrivesDriveItemProvider(GinkgoT())
|
||||
api, err := svc.NewDrivesDriveItemApi(mockProvider, logger)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
httpAPI = api
|
||||
|
||||
rCTX = chi.NewRouteContext()
|
||||
rCTX.URLParams.Add("driveID", "a0ca6a90-a365-4782-871e-d44447bbc668$a0ca6a90-a365-4782-871e-d44447bbc668")
|
||||
})
|
||||
|
||||
checkDriveIDAndItemIDValidation := func(handler http.HandlerFunc) {
|
||||
rCTX.URLParams.Add("driveID", "1$2")
|
||||
rCTX.URLParams.Add("itemID", "3$4!5")
|
||||
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
request := httptest.NewRequest(http.MethodPost, "/", nil).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
handler(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusUnprocessableEntity))
|
||||
|
||||
jsonData := gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(Equal("invalid driveID or itemID"))
|
||||
}
|
||||
|
||||
Describe("DeleteDriveItem", func() {
|
||||
It("validates the driveID and itemID url param", func() {
|
||||
checkDriveIDAndItemIDValidation(httpAPI.DeleteDriveItem)
|
||||
})
|
||||
|
||||
It("uses the UnmountShare provider implementation", func() {
|
||||
rCTX.URLParams.Add("itemID", "a0ca6a90-a365-4782-871e-d44447bbc668$a0ca6a90-a365-4782-871e-d44447bbc668!a0ca6a90-a365-4782-871e-d44447bbc668")
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
|
||||
request := httptest.NewRequest(http.MethodDelete, "/", nil).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
onUnmountShare := mockProvider.On("UnmountShare", mock.Anything, mock.Anything)
|
||||
onUnmountShare.
|
||||
Return(func(ctx context.Context, resourceID storageprovider.ResourceId) error {
|
||||
return errors.New("any")
|
||||
}).Once()
|
||||
|
||||
httpAPI.DeleteDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusFailedDependency))
|
||||
|
||||
jsonData := gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(Equal("unmounting share failed"))
|
||||
|
||||
// happy path
|
||||
responseRecorder = httptest.NewRecorder()
|
||||
|
||||
onUnmountShare.
|
||||
Return(func(ctx context.Context, resourceID storageprovider.ResourceId) error {
|
||||
Expect(storagespace.FormatResourceID(resourceID)).To(Equal("a0ca6a90-a365-4782-871e-d44447bbc668$a0ca6a90-a365-4782-871e-d44447bbc668!a0ca6a90-a365-4782-871e-d44447bbc668"))
|
||||
return nil
|
||||
}).Once()
|
||||
|
||||
httpAPI.DeleteDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusOK))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateDriveItem", func() {
|
||||
It("checks if the idemID and driveID is in share jail", func() {
|
||||
rCTX.URLParams.Add("driveID", "1$2")
|
||||
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
request := httptest.NewRequest(http.MethodPost, "/", nil).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
httpAPI.CreateDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusUnprocessableEntity))
|
||||
|
||||
jsonData := gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(ContainSubstring("must be share jail"))
|
||||
})
|
||||
|
||||
It("checks that the request body is valid", func() {
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
request := httptest.NewRequest(http.MethodPost, "/", nil).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
httpAPI.CreateDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusUnprocessableEntity))
|
||||
|
||||
jsonData := gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(Equal("invalid request body"))
|
||||
|
||||
// valid drive item, but invalid remote item id
|
||||
driveItem := libregraph.DriveItem{}
|
||||
|
||||
driveItemJson, err := json.Marshal(driveItem)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
responseRecorder = httptest.NewRecorder()
|
||||
|
||||
request = httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer(driveItemJson)).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
httpAPI.CreateDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusUnprocessableEntity))
|
||||
|
||||
jsonData = gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(Equal("invalid remote item id"))
|
||||
})
|
||||
|
||||
It("uses the MountShare provider implementation", func() {
|
||||
driveItemName := "a name"
|
||||
remoteItemID := "d66d28d8-3558-4f0f-ba2a-34a7185b806d$831997cf-a531-491b-ae72-9037739f04e9!c131a84c-7506-46b4-8e5e-60c56382da3b"
|
||||
driveItem := libregraph.DriveItem{
|
||||
Name: &driveItemName,
|
||||
RemoteItem: &libregraph.RemoteItem{
|
||||
Id: &remoteItemID,
|
||||
},
|
||||
}
|
||||
|
||||
driveItemJson, err := json.Marshal(driveItem)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
responseRecorder := httptest.NewRecorder()
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer(driveItemJson)).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
onMountShare := mockProvider.On("MountShare", mock.Anything, mock.Anything, mock.Anything)
|
||||
onMountShare.
|
||||
Return(func(ctx context.Context, resourceID storageprovider.ResourceId, name string) (libregraph.DriveItem, error) {
|
||||
return libregraph.DriveItem{}, errors.New("any")
|
||||
}).Once()
|
||||
|
||||
httpAPI.CreateDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusFailedDependency))
|
||||
|
||||
jsonData := gjson.Get(responseRecorder.Body.String(), "error")
|
||||
Expect(jsonData.Get("message").String()).To(Equal("mounting share failed"))
|
||||
|
||||
// happy path
|
||||
responseRecorder = httptest.NewRecorder()
|
||||
|
||||
request = httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer(driveItemJson)).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rCTX),
|
||||
)
|
||||
|
||||
onMountShare.
|
||||
Return(func(ctx context.Context, resourceID storageprovider.ResourceId, name string) (libregraph.DriveItem, error) {
|
||||
Expect(storagespace.FormatResourceID(resourceID)).To(Equal(remoteItemID))
|
||||
Expect(driveItemName).To(Equal(name))
|
||||
return libregraph.DriveItem{}, nil
|
||||
}).Once()
|
||||
|
||||
httpAPI.CreateDriveItem(responseRecorder, request)
|
||||
|
||||
Expect(responseRecorder.Code).To(Equal(http.StatusCreated))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -366,7 +366,7 @@ func (g Graph) ListPermissions(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_, itemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, itemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
@@ -439,7 +439,7 @@ func (g Graph) Invite(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_, itemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, itemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
@@ -580,7 +580,7 @@ func (g Graph) Invite(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// UpdatePermission updates a Permission of a Drive item
|
||||
func (g Graph) UpdatePermission(w http.ResponseWriter, r *http.Request) {
|
||||
_, itemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, itemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
@@ -646,7 +646,7 @@ func (g Graph) UpdatePermission(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// DeletePermission removes a Permission from a Drive item
|
||||
func (g Graph) DeletePermission(w http.ResponseWriter, r *http.Request) {
|
||||
_, itemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, itemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
|
||||
@@ -932,17 +932,17 @@ func (g Graph) cs3PermissionsToLibreGraph(ctx context.Context, space *storagepro
|
||||
tmp := id
|
||||
var identitySet libregraph.IdentitySet
|
||||
if _, ok := groupsMap[id]; ok {
|
||||
group, err := g.identityCache.GetGroup(ctx, tmp)
|
||||
identity, err := groupIdToIdentity(ctx, g.identityCache, tmp)
|
||||
if err != nil {
|
||||
g.logger.Warn().Str("groupid", tmp).Msg("Group not found by id")
|
||||
}
|
||||
identitySet = libregraph.IdentitySet{Group: &libregraph.Identity{Id: &tmp, DisplayName: group.GetDisplayName()}}
|
||||
identitySet = libregraph.IdentitySet{Group: &identity}
|
||||
} else {
|
||||
user, err := g.identityCache.GetUser(ctx, tmp)
|
||||
identity, err := userIdToIdentity(ctx, g.identityCache, tmp)
|
||||
if err != nil {
|
||||
g.logger.Warn().Str("userid", tmp).Msg("User not found by id")
|
||||
}
|
||||
identitySet = libregraph.IdentitySet{User: &libregraph.Identity{Id: &tmp, DisplayName: user.GetDisplayName()}}
|
||||
identitySet = libregraph.IdentitySet{User: &identity}
|
||||
}
|
||||
|
||||
p := libregraph.Permission{
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/render"
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/errorcode"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/linktype"
|
||||
)
|
||||
@@ -27,7 +28,7 @@ func (g Graph) CreateLink(w http.ResponseWriter, r *http.Request) {
|
||||
logger := g.logger.SubloggerWithRequestID(r.Context())
|
||||
logger.Info().Msg("calling create link")
|
||||
|
||||
_, driveItemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, driveItemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
@@ -60,7 +61,7 @@ func (g Graph) CreateLink(w http.ResponseWriter, r *http.Request) {
|
||||
// SetLinkPassword sets public link password on the cs3 api
|
||||
func (g Graph) SetLinkPassword(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
_, itemID, err := g.GetDriveAndItemIDParam(r)
|
||||
_, itemID, err := GetDriveAndItemIDParam(r, g.logger)
|
||||
if err != nil {
|
||||
errorcode.RenderError(w, r, err)
|
||||
return
|
||||
|
||||
@@ -204,8 +204,19 @@ func NewService(opts ...Option) (Graph, error) {
|
||||
requireAdmin = options.RequireAdminMiddleware
|
||||
}
|
||||
|
||||
drivesDriveItemService, err := NewDrivesDriveItemService(options.Logger, options.GatewaySelector, identityCache, options.Config.FilesSharing.EnableResharing)
|
||||
if err != nil {
|
||||
return svc, err
|
||||
}
|
||||
|
||||
drivesDriveItemApi, err := NewDrivesDriveItemApi(drivesDriveItemService, options.Logger)
|
||||
if err != nil {
|
||||
return svc, err
|
||||
}
|
||||
|
||||
m.Route(options.Config.HTTP.Root, func(r chi.Router) {
|
||||
r.Use(middleware.StripSlashes)
|
||||
|
||||
r.Route("/v1beta1", func(r chi.Router) {
|
||||
r.Route("/me", func(r chi.Router) {
|
||||
r.Get("/drives", svc.GetDrives(APIVersion_1_Beta_1))
|
||||
@@ -214,29 +225,25 @@ func NewService(opts ...Option) (Graph, error) {
|
||||
r.Get("/sharedWithMe", svc.ListSharedWithMe)
|
||||
})
|
||||
})
|
||||
r.Route("/drives/{driveID}/items/{itemID}", func(r chi.Router) {
|
||||
r.Post("/invite", svc.Invite)
|
||||
r.Route("/permissions", func(r chi.Router) {
|
||||
r.Get("/", svc.ListPermissions)
|
||||
r.Route("/{permissionID}", func(r chi.Router) {
|
||||
r.Delete("/", svc.DeletePermission)
|
||||
r.Patch("/", svc.UpdatePermission)
|
||||
r.Post("/setPassword", svc.SetLinkPassword)
|
||||
})
|
||||
})
|
||||
r.Post("/createLink", svc.CreateLink)
|
||||
})
|
||||
|
||||
r.Route("/drives", func(r chi.Router) {
|
||||
r.Get("/", svc.GetAllDrives(APIVersion_1_Beta_1))
|
||||
r.Route("/{driveID}/items/{itemID}", func(r chi.Router) {
|
||||
r.Post("/invite", svc.Invite)
|
||||
r.Get("/permissions", svc.ListPermissions)
|
||||
r.Delete("/permissions/{permissionID}", svc.DeletePermission)
|
||||
r.Post("/createLink", svc.CreateLink)
|
||||
r.Route("/{driveID}", func(r chi.Router) {
|
||||
r.Post("/root/children", drivesDriveItemApi.CreateDriveItem)
|
||||
r.Route("/items/{itemID}", func(r chi.Router) {
|
||||
r.Delete("/", drivesDriveItemApi.DeleteDriveItem)
|
||||
r.Post("/invite", svc.Invite)
|
||||
r.Route("/permissions", func(r chi.Router) {
|
||||
r.Get("/", svc.ListPermissions)
|
||||
r.Route("/{permissionID}", func(r chi.Router) {
|
||||
r.Delete("/", svc.DeletePermission)
|
||||
r.Patch("/", svc.UpdatePermission)
|
||||
r.Post("/setPassword", svc.SetLinkPassword)
|
||||
})
|
||||
})
|
||||
r.Post("/createLink", svc.CreateLink)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
r.Route("/roleManagement/permissions/roleDefinitions", func(r chi.Router) {
|
||||
r.Get("/", svc.GetRoleDefinitions)
|
||||
r.Get("/{roleID}", svc.GetRoleDefinition)
|
||||
|
||||
@@ -145,10 +145,9 @@ func (g Graph) cs3UserShareToPermission(ctx context.Context, share *collaboratio
|
||||
perm.SetRoles([]string{})
|
||||
perm.SetId(share.Id.OpaqueId)
|
||||
grantedTo := libregraph.SharePointIdentitySet{}
|
||||
var li libregraph.Identity
|
||||
switch share.GetGrantee().GetType() {
|
||||
case storageprovider.GranteeType_GRANTEE_TYPE_USER:
|
||||
user, err := g.identityCache.GetUser(ctx, share.Grantee.GetUserId().GetOpaqueId())
|
||||
user, err := cs3UserIdToIdentity(ctx, g.identityCache, share.Grantee.GetUserId())
|
||||
switch {
|
||||
case errors.Is(err, identity.ErrNotFound):
|
||||
g.logger.Warn().Str("userid", share.Grantee.GetUserId().GetOpaqueId()).Msg("User not found by id")
|
||||
@@ -157,12 +156,10 @@ func (g Graph) cs3UserShareToPermission(ctx context.Context, share *collaboratio
|
||||
case err != nil:
|
||||
return nil, errorcode.New(errorcode.GeneralException, err.Error())
|
||||
default:
|
||||
li.SetDisplayName(user.GetDisplayName())
|
||||
li.SetId(user.GetId())
|
||||
grantedTo.SetUser(li)
|
||||
grantedTo.SetUser(user)
|
||||
}
|
||||
case storageprovider.GranteeType_GRANTEE_TYPE_GROUP:
|
||||
group, err := g.identityCache.GetGroup(ctx, share.Grantee.GetGroupId().GetOpaqueId())
|
||||
group, err := groupIdToIdentity(ctx, g.identityCache, share.Grantee.GetGroupId().GetOpaqueId())
|
||||
switch {
|
||||
case errors.Is(err, identity.ErrNotFound):
|
||||
g.logger.Warn().Str("groupid", share.Grantee.GetGroupId().GetOpaqueId()).Msg("Group not found by id")
|
||||
@@ -171,9 +168,7 @@ func (g Graph) cs3UserShareToPermission(ctx context.Context, share *collaboratio
|
||||
case err != nil:
|
||||
return nil, errorcode.New(errorcode.GeneralException, err.Error())
|
||||
default:
|
||||
li.SetDisplayName(group.GetDisplayName())
|
||||
li.SetId(group.GetId())
|
||||
grantedTo.SetGroup(li)
|
||||
grantedTo.SetGroup(group)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,20 +3,12 @@ package svc
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
cs3User "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
|
||||
storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/go-chi/render"
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/errorcode"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/unifiedrole"
|
||||
)
|
||||
|
||||
// ListSharedWithMe lists the files shared with the current user.
|
||||
@@ -47,334 +39,5 @@ func (g Graph) listSharedWithMe(ctx context.Context) ([]libregraph.DriveItem, er
|
||||
return nil, *errCode
|
||||
}
|
||||
|
||||
return g.cs3ReceivedSharesToDriveItems(ctx, listReceivedSharesResponse.GetShares())
|
||||
}
|
||||
|
||||
func (g Graph) cs3ReceivedSharesToDriveItems(ctx context.Context, receivedShares []*collaboration.ReceivedShare) ([]libregraph.DriveItem, error) {
|
||||
gatewayClient, err := g.gatewaySelector.Next()
|
||||
if err != nil {
|
||||
g.logger.Error().Err(err).Msg("could not select next gateway client")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// doStat is a helper function that stat a resource.
|
||||
doStat := func(resourceId *storageprovider.ResourceId) (*storageprovider.StatResponse, error) {
|
||||
shareStat, err := gatewayClient.Stat(ctx, &storageprovider.StatRequest{
|
||||
Ref: &storageprovider.Reference{ResourceId: resourceId},
|
||||
})
|
||||
switch errCode := errorcode.FromCS3Status(shareStat.GetStatus(), err); {
|
||||
case errCode == nil:
|
||||
break
|
||||
// skip ItemNotFound shares, they might have been deleted in the meantime or orphans.
|
||||
case errCode.GetCode() == errorcode.ItemNotFound:
|
||||
return nil, nil
|
||||
default:
|
||||
g.logger.Error().Err(errCode).Msg("could not stat")
|
||||
return nil, errCode
|
||||
}
|
||||
|
||||
return shareStat, nil
|
||||
}
|
||||
|
||||
ch := make(chan libregraph.DriveItem)
|
||||
group := new(errgroup.Group)
|
||||
// Set max concurrency
|
||||
group.SetLimit(10)
|
||||
|
||||
receivedSharesByResourceID := make(map[string][]*collaboration.ReceivedShare, len(receivedShares))
|
||||
for _, receivedShare := range receivedShares {
|
||||
rIDStr := storagespace.FormatResourceID(*receivedShare.GetShare().GetResourceId())
|
||||
receivedSharesByResourceID[rIDStr] = append(receivedSharesByResourceID[rIDStr], receivedShare)
|
||||
}
|
||||
|
||||
for _, receivedSharesForResource := range receivedSharesByResourceID {
|
||||
receivedShares := receivedSharesForResource
|
||||
|
||||
group.Go(func() error {
|
||||
var err error // redeclare
|
||||
shareStat, err := doStat(receivedShares[0].GetShare().GetResourceId())
|
||||
if shareStat == nil || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
driveItem := libregraph.NewDriveItem()
|
||||
|
||||
permissions := make([]libregraph.Permission, 0, len(receivedShares))
|
||||
|
||||
var oldestReceivedShare *collaboration.ReceivedShare
|
||||
for _, receivedShare := range receivedShares {
|
||||
switch {
|
||||
case oldestReceivedShare == nil:
|
||||
fallthrough
|
||||
case utils.TSToTime(receivedShare.GetShare().GetCtime()).Before(utils.TSToTime(oldestReceivedShare.GetShare().GetCtime())):
|
||||
oldestReceivedShare = receivedShare
|
||||
}
|
||||
|
||||
permission, err := g.cs3ReceivedShareToLibreGraphPermissions(ctx, receivedShare)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If at least one of the shares was accepted, we consider the driveItem's synchronized
|
||||
// flag enabled.
|
||||
// Also we use the Mountpoint name of the first accepted mountpoint as the name of
|
||||
// of the driveItem
|
||||
if receivedShare.GetState() == collaboration.ShareState_SHARE_STATE_ACCEPTED {
|
||||
driveItem.SetClientSynchronize(true)
|
||||
if name := receivedShare.GetMountPoint().GetPath(); name != "" && driveItem.GetName() == "" {
|
||||
driveItem.SetName(receivedShare.GetMountPoint().GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
// if at least one share is marked as hidden, consider the whole driveItem to be hidden
|
||||
if receivedShare.GetHidden() {
|
||||
driveItem.SetUIHidden(true)
|
||||
}
|
||||
|
||||
if userID := receivedShare.GetShare().GetCreator(); userID != nil {
|
||||
identity, err := g.cs3UserIdToIdentity(ctx, userID)
|
||||
if err != nil {
|
||||
g.logger.Warn().Err(err).Str("userid", userID.String()).Msg("could not get creator of the share")
|
||||
}
|
||||
|
||||
permission.SetInvitation(
|
||||
libregraph.SharingInvitation{
|
||||
InvitedBy: &libregraph.IdentitySet{
|
||||
User: &identity,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
permissions = append(permissions, *permission)
|
||||
|
||||
}
|
||||
|
||||
// To stay compatible with the usershareprovider and the webdav
|
||||
// service the id of the driveItem is composed of the StorageID and
|
||||
// SpaceID of the sharestorage appended with the opaque ID of
|
||||
// the oldest share for the resource:
|
||||
// '<sharestorageid>$<sharespaceid>!<share-opaque-id>
|
||||
// Note: This means that the driveitem ID will change when the oldest
|
||||
// shared is removed. It would be good to have are more stable ID here (e.g.
|
||||
// derived from the shared resource's ID. But as we need to use the same
|
||||
// ID across all services this means we needed to make similar adjustments
|
||||
// to the sharejail (usershareprovider, webdav). Which we can't currently do
|
||||
// as some clients rely on the IDs used there having a special format.
|
||||
driveItem.SetId(storagespace.FormatResourceID(storageprovider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
OpaqueId: oldestReceivedShare.GetShare().GetId().GetOpaqueId(),
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}))
|
||||
|
||||
if !driveItem.HasUIHidden() {
|
||||
driveItem.SetUIHidden(false)
|
||||
}
|
||||
if !driveItem.HasClientSynchronize() {
|
||||
driveItem.SetClientSynchronize(false)
|
||||
if name := shareStat.GetInfo().GetName(); name != "" {
|
||||
driveItem.SetName(name)
|
||||
}
|
||||
}
|
||||
|
||||
remoteItem := libregraph.NewRemoteItem()
|
||||
{
|
||||
if id := shareStat.GetInfo().GetId(); id != nil {
|
||||
remoteItem.SetId(storagespace.FormatResourceID(*id))
|
||||
}
|
||||
|
||||
if name := shareStat.GetInfo().GetName(); name != "" {
|
||||
remoteItem.SetName(name)
|
||||
}
|
||||
|
||||
if etag := shareStat.GetInfo().GetEtag(); etag != "" {
|
||||
remoteItem.SetETag(etag)
|
||||
}
|
||||
|
||||
if mTime := shareStat.GetInfo().GetMtime(); mTime != nil {
|
||||
remoteItem.SetLastModifiedDateTime(cs3TimestampToTime(mTime))
|
||||
}
|
||||
|
||||
if size := shareStat.GetInfo().GetSize(); size != 0 {
|
||||
remoteItem.SetSize(int64(size))
|
||||
}
|
||||
|
||||
parentReference := libregraph.NewItemReference()
|
||||
if spaceType := shareStat.GetInfo().GetSpace().GetSpaceType(); spaceType != "" {
|
||||
parentReference.SetDriveType(spaceType)
|
||||
}
|
||||
|
||||
if root := shareStat.GetInfo().GetSpace().GetRoot(); root != nil {
|
||||
parentReference.SetDriveId(storagespace.FormatResourceID(*root))
|
||||
}
|
||||
if !reflect.ValueOf(*parentReference).IsZero() {
|
||||
remoteItem.ParentReference = parentReference
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// the parentReference of the outer driveItem should be the drive
|
||||
// containing the mountpoint i.e. the share jail
|
||||
driveItem.ParentReference = libregraph.NewItemReference()
|
||||
driveItem.ParentReference.SetDriveType("virtual")
|
||||
driveItem.ParentReference.SetDriveId(storagespace.FormatStorageID(utils.ShareStorageProviderID, utils.ShareStorageSpaceID))
|
||||
driveItem.ParentReference.SetId(storagespace.FormatResourceID(storageprovider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
OpaqueId: utils.ShareStorageSpaceID,
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}))
|
||||
if etag := shareStat.GetInfo().GetEtag(); etag != "" {
|
||||
driveItem.SetETag(etag)
|
||||
}
|
||||
|
||||
// connect the dots
|
||||
{
|
||||
if mTime := shareStat.GetInfo().GetMtime(); mTime != nil {
|
||||
t := cs3TimestampToTime(mTime)
|
||||
|
||||
driveItem.SetLastModifiedDateTime(t)
|
||||
remoteItem.SetLastModifiedDateTime(t)
|
||||
}
|
||||
|
||||
if size := shareStat.GetInfo().GetSize(); size != 0 {
|
||||
s := int64(size)
|
||||
|
||||
driveItem.SetSize(s)
|
||||
remoteItem.SetSize(s)
|
||||
}
|
||||
|
||||
if userID := shareStat.GetInfo().GetOwner(); userID != nil && userID.Type != cs3User.UserType_USER_TYPE_SPACE_OWNER {
|
||||
identity, err := g.cs3UserIdToIdentity(ctx, userID)
|
||||
if err != nil {
|
||||
// TODO: define a proper error behavior here. We don't
|
||||
// want the whole request to fail just because a single
|
||||
// resource owner couldn't be resolved. But, should be
|
||||
// really return the affect share in the response?
|
||||
// For now we just log a warning. The returned
|
||||
// identitySet will just contain the userid.
|
||||
g.logger.Warn().Err(err).Str("userid", userID.String()).Msg("could not get owner of shared resource")
|
||||
}
|
||||
|
||||
remoteItem.SetCreatedBy(libregraph.IdentitySet{User: &identity})
|
||||
driveItem.SetCreatedBy(libregraph.IdentitySet{User: &identity})
|
||||
}
|
||||
switch info := shareStat.GetInfo(); {
|
||||
case info.GetType() == storageprovider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
folder := libregraph.NewFolder()
|
||||
|
||||
remoteItem.Folder = folder
|
||||
driveItem.Folder = folder
|
||||
case info.GetType() == storageprovider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
file := libregraph.NewOpenGraphFile()
|
||||
|
||||
if mimeType := info.GetMimeType(); mimeType != "" {
|
||||
file.MimeType = &mimeType
|
||||
}
|
||||
|
||||
remoteItem.File = file
|
||||
driveItem.File = file
|
||||
}
|
||||
|
||||
if len(permissions) > 0 {
|
||||
remoteItem.Permissions = permissions
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(*remoteItem).IsZero() {
|
||||
driveItem.RemoteItem = remoteItem
|
||||
}
|
||||
}
|
||||
|
||||
ch <- *driveItem
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// wait for concurrent requests to finish
|
||||
go func() {
|
||||
err = group.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
driveItems := make([]libregraph.DriveItem, 0, len(receivedSharesByResourceID))
|
||||
for di := range ch {
|
||||
driveItems = append(driveItems, di)
|
||||
}
|
||||
|
||||
return driveItems, err
|
||||
}
|
||||
|
||||
func (g Graph) cs3ReceivedShareToLibreGraphPermissions(ctx context.Context, receivedShare *collaboration.ReceivedShare) (*libregraph.Permission, error) {
|
||||
permission := libregraph.NewPermission()
|
||||
if id := receivedShare.GetShare().GetId().GetOpaqueId(); id != "" {
|
||||
permission.SetId(id)
|
||||
}
|
||||
|
||||
if expiration := receivedShare.GetShare().GetExpiration(); expiration != nil {
|
||||
permission.SetExpirationDateTime(cs3TimestampToTime(expiration))
|
||||
}
|
||||
|
||||
if permissionSet := receivedShare.GetShare().GetPermissions().GetPermissions(); permissionSet != nil {
|
||||
role := unifiedrole.CS3ResourcePermissionsToUnifiedRole(
|
||||
*permissionSet,
|
||||
unifiedrole.UnifiedRoleConditionGrantee,
|
||||
g.config.FilesSharing.EnableResharing,
|
||||
)
|
||||
|
||||
if role != nil {
|
||||
permission.SetRoles([]string{role.GetId()})
|
||||
}
|
||||
|
||||
actions := unifiedrole.CS3ResourcePermissionsToLibregraphActions(*permissionSet)
|
||||
|
||||
// actions only make sense if no role is set
|
||||
if role == nil && len(actions) > 0 {
|
||||
permission.SetLibreGraphPermissionsActions(actions)
|
||||
}
|
||||
}
|
||||
|
||||
switch grantee := receivedShare.GetShare().GetGrantee(); {
|
||||
case grantee.GetType() == storageprovider.GranteeType_GRANTEE_TYPE_USER:
|
||||
user, err := g.identityCache.GetUser(ctx, grantee.GetUserId().GetOpaqueId())
|
||||
if err != nil {
|
||||
g.logger.Error().Err(err).Msg("could not get user")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
permission.SetGrantedToV2(libregraph.SharePointIdentitySet{
|
||||
User: &libregraph.Identity{
|
||||
DisplayName: user.GetDisplayName(),
|
||||
Id: user.Id,
|
||||
},
|
||||
})
|
||||
case grantee.GetType() == storageprovider.GranteeType_GRANTEE_TYPE_GROUP:
|
||||
group, err := g.identityCache.GetGroup(ctx, grantee.GetGroupId().GetOpaqueId())
|
||||
if err != nil {
|
||||
g.logger.Error().Err(err).Msg("could not get group")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
permission.SetGrantedToV2(libregraph.SharePointIdentitySet{
|
||||
Group: &libregraph.Identity{
|
||||
DisplayName: group.GetDisplayName(),
|
||||
Id: group.Id,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return permission, nil
|
||||
}
|
||||
|
||||
func (g Graph) cs3UserIdToIdentity(ctx context.Context, cs3UserID *cs3User.UserId) (libregraph.Identity, error) {
|
||||
identity := libregraph.Identity{
|
||||
Id: libregraph.PtrString(cs3UserID.GetOpaqueId()),
|
||||
}
|
||||
var err error
|
||||
if cs3UserID.GetType() != cs3User.UserType_USER_TYPE_SPACE_OWNER {
|
||||
var user libregraph.User
|
||||
user, err = g.identityCache.GetUser(ctx, cs3UserID.GetOpaqueId())
|
||||
if err == nil {
|
||||
identity.SetDisplayName(user.GetDisplayName())
|
||||
}
|
||||
}
|
||||
return identity, err
|
||||
return cs3ReceivedSharesToDriveItems(ctx, g.logger, gatewayClient, g.identityCache, g.config.FilesSharing.EnableResharing, listReceivedSharesResponse.GetShares())
|
||||
}
|
||||
|
||||
@@ -1,14 +1,25 @@
|
||||
package svc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
|
||||
cs3User "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
|
||||
storageprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
libregraph "github.com/owncloud/libre-graph-api-go"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/log"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/errorcode"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/identity"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/unifiedrole"
|
||||
)
|
||||
|
||||
// StrictJSONUnmarshal is a wrapper around json.Unmarshal that returns an error if the json contains unknown fields.
|
||||
@@ -32,28 +43,28 @@ func IsSpaceRoot(rid *storageprovider.ResourceId) bool {
|
||||
|
||||
// GetDriveAndItemIDParam parses the driveID and itemID from the request,
|
||||
// validates the common fields and returns the parsed IDs if ok.
|
||||
func (g Graph) GetDriveAndItemIDParam(r *http.Request) (storageprovider.ResourceId, storageprovider.ResourceId, error) {
|
||||
func GetDriveAndItemIDParam(r *http.Request, logger *log.Logger) (storageprovider.ResourceId, storageprovider.ResourceId, error) {
|
||||
empty := storageprovider.ResourceId{}
|
||||
|
||||
driveID, err := parseIDParam(r, "driveID")
|
||||
if err != nil {
|
||||
g.logger.Debug().Err(err).Msg("could not parse driveID")
|
||||
logger.Debug().Err(err).Msg("could not parse driveID")
|
||||
return empty, empty, errorcode.New(errorcode.InvalidRequest, "invalid driveID")
|
||||
}
|
||||
|
||||
itemID, err := parseIDParam(r, "itemID")
|
||||
if err != nil {
|
||||
g.logger.Debug().Err(err).Msg("could not parse itemID")
|
||||
logger.Debug().Err(err).Msg("could not parse itemID")
|
||||
return empty, empty, errorcode.New(errorcode.InvalidRequest, "invalid itemID")
|
||||
}
|
||||
|
||||
if itemID.GetOpaqueId() == "" {
|
||||
g.logger.Debug().Interface("driveID", driveID).Interface("itemID", itemID).Msg("empty item opaqueID")
|
||||
logger.Debug().Interface("driveID", driveID).Interface("itemID", itemID).Msg("empty item opaqueID")
|
||||
return empty, empty, errorcode.New(errorcode.InvalidRequest, "invalid itemID")
|
||||
}
|
||||
|
||||
if driveID.GetStorageId() != itemID.GetStorageId() || driveID.GetSpaceId() != itemID.GetSpaceId() {
|
||||
g.logger.Debug().Interface("driveID", driveID).Interface("itemID", itemID).Msg("driveID and itemID do not match")
|
||||
logger.Debug().Interface("driveID", driveID).Interface("itemID", itemID).Msg("driveID and itemID do not match")
|
||||
return empty, empty, errorcode.New(errorcode.ItemNotFound, "driveID and itemID do not match")
|
||||
}
|
||||
|
||||
@@ -71,3 +82,346 @@ func (g Graph) GetGatewayClient(w http.ResponseWriter, r *http.Request) (gateway
|
||||
|
||||
return gatewayClient, true
|
||||
}
|
||||
|
||||
// IsShareJail returns true if given id is a share jail id.
|
||||
func IsShareJail(id storageprovider.ResourceId) bool {
|
||||
return id.GetStorageId() == utils.ShareStorageProviderID && id.GetSpaceId() == utils.ShareStorageSpaceID
|
||||
}
|
||||
|
||||
// userIdToIdentity looks the user for the supplied id using the cache and returns it
|
||||
// as a libregraph.Identity
|
||||
func userIdToIdentity(ctx context.Context, cache identity.IdentityCache, userID string) (libregraph.Identity, error) {
|
||||
identity := libregraph.Identity{
|
||||
Id: libregraph.PtrString(userID),
|
||||
}
|
||||
user, err := cache.GetUser(ctx, userID)
|
||||
if err == nil {
|
||||
identity.SetDisplayName(user.GetDisplayName())
|
||||
}
|
||||
return identity, err
|
||||
}
|
||||
|
||||
// cs3UserIdToIdentity looks up the user for the supplied cs3 userid using the cache and returns it
|
||||
// as a libregraph.Identity. Skips the user lookup if the id type is USER_TYPE_SPACE_OWNER
|
||||
func cs3UserIdToIdentity(ctx context.Context, cache identity.IdentityCache, cs3UserID *cs3User.UserId) (libregraph.Identity, error) {
|
||||
if cs3UserID.GetType() != cs3User.UserType_USER_TYPE_SPACE_OWNER {
|
||||
return userIdToIdentity(ctx, cache, cs3UserID.GetOpaqueId())
|
||||
}
|
||||
return libregraph.Identity{Id: libregraph.PtrString(cs3UserID.GetOpaqueId())}, nil
|
||||
}
|
||||
|
||||
// groupIdToIdentity looks up the group for the supplied cs3 groupid using the cache and returns it
|
||||
// as a libregraph.Identity.
|
||||
func groupIdToIdentity(ctx context.Context, cache identity.IdentityCache, groupID string) (libregraph.Identity, error) {
|
||||
identity := libregraph.Identity{
|
||||
Id: libregraph.PtrString(groupID),
|
||||
}
|
||||
group, err := cache.GetGroup(ctx, groupID)
|
||||
if err == nil {
|
||||
identity.SetDisplayName(group.GetDisplayName())
|
||||
}
|
||||
return identity, err
|
||||
}
|
||||
|
||||
func cs3ReceivedSharesToDriveItems(ctx context.Context,
|
||||
logger *log.Logger,
|
||||
gatewayClient gateway.GatewayAPIClient,
|
||||
identityCache identity.IdentityCache,
|
||||
resharing bool,
|
||||
receivedShares []*collaboration.ReceivedShare) ([]libregraph.DriveItem, error) {
|
||||
|
||||
// doStat is a helper function that stat a resource.
|
||||
doStat := func(resourceId *storageprovider.ResourceId) (*storageprovider.StatResponse, error) {
|
||||
shareStat, err := gatewayClient.Stat(ctx, &storageprovider.StatRequest{
|
||||
Ref: &storageprovider.Reference{ResourceId: resourceId},
|
||||
})
|
||||
switch errCode := errorcode.FromCS3Status(shareStat.GetStatus(), err); {
|
||||
case errCode == nil:
|
||||
break
|
||||
// skip ItemNotFound shares, they might have been deleted in the meantime or orphans.
|
||||
case errCode.GetCode() == errorcode.ItemNotFound:
|
||||
return nil, nil
|
||||
default:
|
||||
logger.Error().Err(errCode).Msg("could not stat")
|
||||
return nil, errCode
|
||||
}
|
||||
|
||||
return shareStat, nil
|
||||
}
|
||||
|
||||
ch := make(chan libregraph.DriveItem)
|
||||
group := new(errgroup.Group)
|
||||
// Set max concurrency
|
||||
group.SetLimit(10)
|
||||
|
||||
receivedSharesByResourceID := make(map[string][]*collaboration.ReceivedShare, len(receivedShares))
|
||||
for _, receivedShare := range receivedShares {
|
||||
rIDStr := storagespace.FormatResourceID(*receivedShare.GetShare().GetResourceId())
|
||||
receivedSharesByResourceID[rIDStr] = append(receivedSharesByResourceID[rIDStr], receivedShare)
|
||||
}
|
||||
|
||||
for _, receivedSharesForResource := range receivedSharesByResourceID {
|
||||
receivedShares := receivedSharesForResource
|
||||
|
||||
group.Go(func() error {
|
||||
var err error // redeclare
|
||||
shareStat, err := doStat(receivedShares[0].GetShare().GetResourceId())
|
||||
if shareStat == nil || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
driveItem := libregraph.NewDriveItem()
|
||||
|
||||
permissions := make([]libregraph.Permission, 0, len(receivedShares))
|
||||
|
||||
var oldestReceivedShare *collaboration.ReceivedShare
|
||||
for _, receivedShare := range receivedShares {
|
||||
switch {
|
||||
case oldestReceivedShare == nil:
|
||||
fallthrough
|
||||
case utils.TSToTime(receivedShare.GetShare().GetCtime()).Before(utils.TSToTime(oldestReceivedShare.GetShare().GetCtime())):
|
||||
oldestReceivedShare = receivedShare
|
||||
}
|
||||
|
||||
permission, err := cs3ReceivedShareToLibreGraphPermissions(ctx, logger, identityCache, resharing, receivedShare)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If at least one of the shares was accepted, we consider the driveItem's synchronized
|
||||
// flag enabled.
|
||||
// Also we use the Mountpoint name of the first accepted mountpoint as the name of
|
||||
// of the driveItem
|
||||
if receivedShare.GetState() == collaboration.ShareState_SHARE_STATE_ACCEPTED {
|
||||
driveItem.SetClientSynchronize(true)
|
||||
if name := receivedShare.GetMountPoint().GetPath(); name != "" && driveItem.GetName() == "" {
|
||||
driveItem.SetName(receivedShare.GetMountPoint().GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
// if at least one share is marked as hidden, consider the whole driveItem to be hidden
|
||||
if receivedShare.GetHidden() {
|
||||
driveItem.SetUIHidden(true)
|
||||
}
|
||||
|
||||
if userID := receivedShare.GetShare().GetCreator(); userID != nil {
|
||||
identity, err := cs3UserIdToIdentity(ctx, identityCache, userID)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Str("userid", userID.String()).Msg("could not get creator of the share")
|
||||
}
|
||||
|
||||
permission.SetInvitation(
|
||||
libregraph.SharingInvitation{
|
||||
InvitedBy: &libregraph.IdentitySet{
|
||||
User: &identity,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
permissions = append(permissions, *permission)
|
||||
|
||||
}
|
||||
|
||||
// To stay compatible with the usershareprovider and the webdav
|
||||
// service the id of the driveItem is composed of the StorageID and
|
||||
// SpaceID of the sharestorage appended with the opaque ID of
|
||||
// the oldest share for the resource:
|
||||
// '<sharestorageid>$<sharespaceid>!<share-opaque-id>
|
||||
// Note: This means that the driveitem ID will change when the oldest
|
||||
// share is removed. It would be good to have are more stable ID here (e.g.
|
||||
// derived from the shared resource's ID. But as we need to use the same
|
||||
// ID across all services this means we needed to make similar adjustments
|
||||
// to the sharejail (usershareprovider, webdav). Which we can't currently do
|
||||
// as some clients rely on the IDs used there having a special format.
|
||||
driveItem.SetId(storagespace.FormatResourceID(storageprovider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
OpaqueId: oldestReceivedShare.GetShare().GetId().GetOpaqueId(),
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}))
|
||||
|
||||
if !driveItem.HasUIHidden() {
|
||||
driveItem.SetUIHidden(false)
|
||||
}
|
||||
if !driveItem.HasClientSynchronize() {
|
||||
driveItem.SetClientSynchronize(false)
|
||||
if name := shareStat.GetInfo().GetName(); name != "" {
|
||||
driveItem.SetName(name)
|
||||
}
|
||||
}
|
||||
|
||||
remoteItem := libregraph.NewRemoteItem()
|
||||
{
|
||||
if id := shareStat.GetInfo().GetId(); id != nil {
|
||||
remoteItem.SetId(storagespace.FormatResourceID(*id))
|
||||
}
|
||||
|
||||
if name := shareStat.GetInfo().GetName(); name != "" {
|
||||
remoteItem.SetName(name)
|
||||
}
|
||||
|
||||
if etag := shareStat.GetInfo().GetEtag(); etag != "" {
|
||||
remoteItem.SetETag(etag)
|
||||
}
|
||||
|
||||
if mTime := shareStat.GetInfo().GetMtime(); mTime != nil {
|
||||
remoteItem.SetLastModifiedDateTime(cs3TimestampToTime(mTime))
|
||||
}
|
||||
|
||||
if size := shareStat.GetInfo().GetSize(); size != 0 {
|
||||
remoteItem.SetSize(int64(size))
|
||||
}
|
||||
|
||||
parentReference := libregraph.NewItemReference()
|
||||
if spaceType := shareStat.GetInfo().GetSpace().GetSpaceType(); spaceType != "" {
|
||||
parentReference.SetDriveType(spaceType)
|
||||
}
|
||||
|
||||
if root := shareStat.GetInfo().GetSpace().GetRoot(); root != nil {
|
||||
parentReference.SetDriveId(storagespace.FormatResourceID(*root))
|
||||
}
|
||||
if !reflect.ValueOf(*parentReference).IsZero() {
|
||||
remoteItem.ParentReference = parentReference
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// the parentReference of the outer driveItem should be the drive
|
||||
// containing the mountpoint i.e. the share jail
|
||||
driveItem.ParentReference = libregraph.NewItemReference()
|
||||
driveItem.ParentReference.SetDriveType("virtual")
|
||||
driveItem.ParentReference.SetDriveId(storagespace.FormatStorageID(utils.ShareStorageProviderID, utils.ShareStorageSpaceID))
|
||||
driveItem.ParentReference.SetId(storagespace.FormatResourceID(storageprovider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
OpaqueId: utils.ShareStorageSpaceID,
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}))
|
||||
if etag := shareStat.GetInfo().GetEtag(); etag != "" {
|
||||
driveItem.SetETag(etag)
|
||||
}
|
||||
|
||||
// connect the dots
|
||||
{
|
||||
if mTime := shareStat.GetInfo().GetMtime(); mTime != nil {
|
||||
t := cs3TimestampToTime(mTime)
|
||||
|
||||
driveItem.SetLastModifiedDateTime(t)
|
||||
remoteItem.SetLastModifiedDateTime(t)
|
||||
}
|
||||
|
||||
if size := shareStat.GetInfo().GetSize(); size != 0 {
|
||||
s := int64(size)
|
||||
|
||||
driveItem.SetSize(s)
|
||||
remoteItem.SetSize(s)
|
||||
}
|
||||
|
||||
if userID := shareStat.GetInfo().GetOwner(); userID != nil && userID.Type != cs3User.UserType_USER_TYPE_SPACE_OWNER {
|
||||
identity, err := cs3UserIdToIdentity(ctx, identityCache, userID)
|
||||
if err != nil {
|
||||
// TODO: define a proper error behavior here. We don't
|
||||
// want the whole request to fail just because a single
|
||||
// resource owner couldn't be resolved. But, should we
|
||||
// really return the affected share in the response?
|
||||
// For now we just log a warning. The returned
|
||||
// identitySet will just contain the userid.
|
||||
logger.Warn().Err(err).Str("userid", userID.String()).Msg("could not get owner of shared resource")
|
||||
}
|
||||
|
||||
remoteItem.SetCreatedBy(libregraph.IdentitySet{User: &identity})
|
||||
driveItem.SetCreatedBy(libregraph.IdentitySet{User: &identity})
|
||||
}
|
||||
switch info := shareStat.GetInfo(); {
|
||||
case info.GetType() == storageprovider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
folder := libregraph.NewFolder()
|
||||
|
||||
remoteItem.Folder = folder
|
||||
driveItem.Folder = folder
|
||||
case info.GetType() == storageprovider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
file := libregraph.NewOpenGraphFile()
|
||||
|
||||
if mimeType := info.GetMimeType(); mimeType != "" {
|
||||
file.MimeType = &mimeType
|
||||
}
|
||||
|
||||
remoteItem.File = file
|
||||
driveItem.File = file
|
||||
}
|
||||
|
||||
if len(permissions) > 0 {
|
||||
remoteItem.Permissions = permissions
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(*remoteItem).IsZero() {
|
||||
driveItem.RemoteItem = remoteItem
|
||||
}
|
||||
}
|
||||
|
||||
ch <- *driveItem
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
var err error
|
||||
// wait for concurrent requests to finish
|
||||
go func() {
|
||||
err = group.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
driveItems := make([]libregraph.DriveItem, 0, len(receivedSharesByResourceID))
|
||||
for di := range ch {
|
||||
driveItems = append(driveItems, di)
|
||||
}
|
||||
|
||||
return driveItems, err
|
||||
}
|
||||
|
||||
func cs3ReceivedShareToLibreGraphPermissions(ctx context.Context, logger *log.Logger,
|
||||
identityCache identity.IdentityCache, resharing bool, receivedShare *collaboration.ReceivedShare) (*libregraph.Permission, error) {
|
||||
permission := libregraph.NewPermission()
|
||||
if id := receivedShare.GetShare().GetId().GetOpaqueId(); id != "" {
|
||||
permission.SetId(id)
|
||||
}
|
||||
|
||||
if expiration := receivedShare.GetShare().GetExpiration(); expiration != nil {
|
||||
permission.SetExpirationDateTime(cs3TimestampToTime(expiration))
|
||||
}
|
||||
|
||||
if permissionSet := receivedShare.GetShare().GetPermissions().GetPermissions(); permissionSet != nil {
|
||||
role := unifiedrole.CS3ResourcePermissionsToUnifiedRole(
|
||||
*permissionSet,
|
||||
unifiedrole.UnifiedRoleConditionGrantee,
|
||||
resharing,
|
||||
)
|
||||
|
||||
if role != nil {
|
||||
permission.SetRoles([]string{role.GetId()})
|
||||
}
|
||||
|
||||
actions := unifiedrole.CS3ResourcePermissionsToLibregraphActions(*permissionSet)
|
||||
|
||||
// actions only make sense if no role is set
|
||||
if role == nil && len(actions) > 0 {
|
||||
permission.SetLibreGraphPermissionsActions(actions)
|
||||
}
|
||||
}
|
||||
switch grantee := receivedShare.GetShare().GetGrantee(); {
|
||||
case grantee.GetType() == storageprovider.GranteeType_GRANTEE_TYPE_USER:
|
||||
user, err := cs3UserIdToIdentity(ctx, identityCache, grantee.GetUserId())
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("could not get user")
|
||||
return nil, err
|
||||
}
|
||||
permission.SetGrantedToV2(libregraph.SharePointIdentitySet{User: &user})
|
||||
case grantee.GetType() == storageprovider.GranteeType_GRANTEE_TYPE_GROUP:
|
||||
group, err := groupIdToIdentity(ctx, identityCache, grantee.GetGroupId().GetOpaqueId())
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msg("could not get group")
|
||||
return nil, err
|
||||
}
|
||||
permission.SetGrantedToV2(libregraph.SharePointIdentitySet{Group: &group})
|
||||
}
|
||||
|
||||
return permission, nil
|
||||
}
|
||||
|
||||
@@ -5,46 +5,32 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
"github.com/go-chi/chi/v5"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/cs3org/reva/v2/pkg/storagespace"
|
||||
"github.com/owncloud/ocis/v2/services/graph/pkg/config/defaults"
|
||||
identitymocks "github.com/owncloud/ocis/v2/services/graph/pkg/identity/mocks"
|
||||
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/shared"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/conversions"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/log"
|
||||
service "github.com/owncloud/ocis/v2/services/graph/pkg/service/v0"
|
||||
)
|
||||
|
||||
var _ = Describe("Utils", func() {
|
||||
var (
|
||||
svc service.Graph
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
cfg := defaults.FullDefaultConfig()
|
||||
cfg.GRPCClientTLS = &shared.GRPCClientTLS{}
|
||||
|
||||
identityBackend := &identitymocks.Backend{}
|
||||
svc, _ = service.NewService(
|
||||
service.Config(cfg),
|
||||
service.WithIdentityBackend(identityBackend),
|
||||
)
|
||||
})
|
||||
|
||||
DescribeTable("GetDriveAndItemIDParam",
|
||||
func(driveID, itemID string, shouldPass bool) {
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("driveID", driveID)
|
||||
rctx.URLParams.Add("itemID", itemID)
|
||||
|
||||
extractedDriveID, extractedItemID, err := svc.GetDriveAndItemIDParam(
|
||||
extractedDriveID, extractedItemID, err := service.GetDriveAndItemIDParam(
|
||||
httptest.NewRequest(http.MethodGet, "/", nil).
|
||||
WithContext(
|
||||
context.WithValue(context.Background(), chi.RouteCtxKey, rctx),
|
||||
),
|
||||
conversions.ToPointer(log.NopLogger()),
|
||||
)
|
||||
|
||||
switch shouldPass {
|
||||
@@ -89,4 +75,33 @@ var _ = Describe("Utils", func() {
|
||||
SpaceId: "3",
|
||||
}, false),
|
||||
)
|
||||
|
||||
DescribeTable("IsShareJail",
|
||||
func(resourceID provider.ResourceId, isShareJail bool) {
|
||||
Expect(service.IsShareJail(resourceID)).To(Equal(isShareJail))
|
||||
},
|
||||
Entry("valid: share jail", provider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}, true),
|
||||
Entry("invalid: empty storageId", provider.ResourceId{
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}, false),
|
||||
Entry("invalid: empty spaceId", provider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
}, false),
|
||||
Entry("invalid: empty storageId and spaceId", provider.ResourceId{}, false),
|
||||
Entry("invalid: non share jail storageId", provider.ResourceId{
|
||||
StorageId: "123",
|
||||
SpaceId: utils.ShareStorageSpaceID,
|
||||
}, false),
|
||||
Entry("invalid: non share jail spaceId", provider.ResourceId{
|
||||
StorageId: utils.ShareStorageProviderID,
|
||||
SpaceId: "123",
|
||||
}, false),
|
||||
Entry("invalid: non share jail storageID and spaceId", provider.ResourceId{
|
||||
StorageId: "123",
|
||||
SpaceId: "123",
|
||||
}, false),
|
||||
)
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user