Merge pull request #130 from opencloud-eu/mod-vendor-and-tidy

Fix vendor tree and 'go mod tidy'
This commit is contained in:
Ralf Haferkamp
2025-01-28 09:26:14 +01:00
committed by GitHub
62 changed files with 13459 additions and 32 deletions

2
go.sum
View File

@@ -871,8 +871,6 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250121094357-24f23b6a27ed h1:0rVMOlcGXgFRNrNgjeCGTyuGKXwD+Y+wQGzm/uVe3CU=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250121094357-24f23b6a27ed/go.mod h1:lk0GfBt0cLaOcc1nWJikinTK5ibFtKRxp10ATxtCalU=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206 h1:sTbtA2hU40r6eh24aswG0oP7NiJrVyEiqM1nn72TrHA=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206/go.mod h1:lk0GfBt0cLaOcc1nWJikinTK5ibFtKRxp10ATxtCalU=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=

View File

@@ -0,0 +1,144 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package blobstore
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/pkg/errors"
)
// ErrBlobIDEmpty is returned when the BlobID is empty
var ErrBlobIDEmpty = fmt.Errorf("blobstore: BlobID is empty")
// Blobstore provides an interface to an filesystem based blobstore
type Blobstore struct {
root string
}
// New returns a new Blobstore
func New(root string) (*Blobstore, error) {
err := os.MkdirAll(root, 0700)
if err != nil {
return nil, err
}
return &Blobstore{
root: root,
}, nil
}
// Upload stores some data in the blobstore under the given key
func (bs *Blobstore) Upload(node *node.Node, source string) error {
if node.BlobID == "" {
return ErrBlobIDEmpty
}
dest := bs.Path(node)
// ensure parent path exists
if err := os.MkdirAll(filepath.Dir(dest), 0700); err != nil {
return errors.Wrap(err, "Decomposed blobstore: error creating parent folders for blob")
}
if err := os.Rename(source, dest); err == nil {
return nil
}
// Rename failed, file needs to be copied.
file, err := os.Open(source)
if err != nil {
return errors.Wrap(err, "Decomposed blobstore: Can not open source file to upload")
}
defer file.Close()
f, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, 0700)
if err != nil {
return errors.Wrapf(err, "could not open blob '%s' for writing", dest)
}
w := bufio.NewWriter(f)
_, err = w.ReadFrom(file)
if err != nil {
return errors.Wrapf(err, "could not write blob '%s'", dest)
}
return w.Flush()
}
// Download retrieves a blob from the blobstore for reading
func (bs *Blobstore) Download(node *node.Node) (io.ReadCloser, error) {
if node.BlobID == "" {
return nil, ErrBlobIDEmpty
}
dest := bs.Path(node)
file, err := os.Open(dest)
if err != nil {
return nil, errors.Wrapf(err, "could not read blob '%s'", dest)
}
return file, nil
}
// Delete deletes a blob from the blobstore
func (bs *Blobstore) Delete(node *node.Node) error {
if node.BlobID == "" {
return ErrBlobIDEmpty
}
dest := bs.Path(node)
if err := utils.RemoveItem(dest); err != nil {
return errors.Wrapf(err, "could not delete blob '%s'", dest)
}
return nil
}
// List lists all blobs in the Blobstore
func (bs *Blobstore) List() ([]*node.Node, error) {
dirs, err := filepath.Glob(filepath.Join(bs.root, "spaces", "*", "*", "blobs", "*", "*", "*", "*", "*"))
if err != nil {
return nil, err
}
blobids := make([]*node.Node, 0, len(dirs))
for _, d := range dirs {
_, s, _ := strings.Cut(d, "spaces")
spaceraw, blobraw, _ := strings.Cut(s, "blobs")
blobids = append(blobids, &node.Node{
SpaceID: strings.ReplaceAll(spaceraw, "/", ""),
BlobID: strings.ReplaceAll(blobraw, "/", ""),
})
}
return blobids, nil
}
func (bs *Blobstore) Path(node *node.Node) string {
return filepath.Join(
bs.root,
filepath.Clean(filepath.Join(
"/", "spaces", lookup.Pathify(node.SpaceID, 1, 2), "blobs", lookup.Pathify(node.BlobID, 4, 2)),
),
)
}

View File

@@ -0,0 +1,51 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposed
import (
"path"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed/blobstore"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/registry"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/rs/zerolog"
)
func init() {
registry.Register("decomposed", New)
}
// New returns an implementation to of the storage.FS interface that talk to
// a local filesystem.
func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (storage.FS, error) {
o, err := options.New(m)
if err != nil {
return nil, err
}
bs, err := blobstore.New(path.Join(o.Root))
if err != nil {
return nil, err
}
return decomposedfs.NewDefault(m, bs, stream, log)
}

View File

@@ -0,0 +1,161 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package blobstore
import (
"context"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/pkg/errors"
)
// Blobstore provides an interface to an s3 compatible blobstore
type Blobstore struct {
client *minio.Client
defaultPutOptions Options
bucket string
}
type Options struct {
DisableContentSha256 bool
DisableMultipart bool
SendContentMd5 bool
ConcurrentStreamParts bool
NumThreads uint
PartSize uint64
}
// New returns a new Blobstore
func New(endpoint, region, bucket, accessKey, secretKey string, defaultPutOptions Options) (*Blobstore, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to parse s3 endpoint")
}
useSSL := u.Scheme != "http"
client, err := minio.New(u.Host, &minio.Options{
Region: region,
Creds: credentials.NewStaticV4(accessKey, secretKey, ""),
Secure: useSSL,
})
if err != nil {
return nil, errors.Wrap(err, "failed to setup s3 client")
}
return &Blobstore{
client: client,
bucket: bucket,
defaultPutOptions: defaultPutOptions,
}, nil
}
// Upload stores some data in the blobstore under the given key
func (bs *Blobstore) Upload(node *node.Node, source string) error {
reader, err := os.Open(source)
if err != nil {
return errors.Wrap(err, "can not open source file to upload")
}
defer reader.Close()
_, err = bs.client.PutObject(context.Background(), bs.bucket, bs.Path(node), reader, node.Blobsize, minio.PutObjectOptions{
ContentType: "application/octet-stream",
SendContentMd5: bs.defaultPutOptions.SendContentMd5,
ConcurrentStreamParts: bs.defaultPutOptions.ConcurrentStreamParts,
NumThreads: bs.defaultPutOptions.NumThreads,
PartSize: bs.defaultPutOptions.PartSize,
DisableMultipart: bs.defaultPutOptions.DisableMultipart,
DisableContentSha256: bs.defaultPutOptions.DisableContentSha256,
})
if err != nil {
return errors.Wrapf(err, "could not store object '%s' into bucket '%s'", bs.Path(node), bs.bucket)
}
return nil
}
// Download retrieves a blob from the blobstore for reading
func (bs *Blobstore) Download(node *node.Node) (io.ReadCloser, error) {
reader, err := bs.client.GetObject(context.Background(), bs.bucket, bs.Path(node), minio.GetObjectOptions{})
if err != nil {
return nil, errors.Wrapf(err, "could not download object '%s' from bucket '%s'", bs.Path(node), bs.bucket)
}
stat, err := reader.Stat()
if err != nil {
return nil, errors.Wrapf(err, "blob path: %s", bs.Path(node))
}
if node.Blobsize != stat.Size {
return nil, fmt.Errorf("blob has unexpected size. %d bytes expected, got %d bytes", node.Blobsize, stat.Size)
}
return reader, nil
}
// Delete deletes a blob from the blobstore
func (bs *Blobstore) Delete(node *node.Node) error {
err := bs.client.RemoveObject(context.Background(), bs.bucket, bs.Path(node), minio.RemoveObjectOptions{})
if err != nil {
return errors.Wrapf(err, "could not delete object '%s' from bucket '%s'", bs.Path(node), bs.bucket)
}
return nil
}
// List lists all blobs in the Blobstore
func (bs *Blobstore) List() ([]*node.Node, error) {
ch := bs.client.ListObjects(context.Background(), bs.bucket, minio.ListObjectsOptions{Recursive: true})
var err error
ids := make([]*node.Node, 0)
for oi := range ch {
if oi.Err != nil {
err = oi.Err
continue
}
spaceid, blobid, _ := strings.Cut(oi.Key, "/")
ids = append(ids, &node.Node{
SpaceID: strings.ReplaceAll(spaceid, "/", ""),
BlobID: strings.ReplaceAll(blobid, "/", ""),
})
}
return ids, err
}
func (bs *Blobstore) Path(node *node.Node) string {
// https://aws.amazon.com/de/premiumsupport/knowledge-center/s3-prefix-nested-folders-difference/
// Prefixes are used to partion a bucket. A prefix is everything except the filename.
// For a file `BucketName/foo/bar/lorem.ipsum`, `BucketName/foo/bar/` is the prefix.
// There are request limits per prefix, therefore you should have many prefixes.
// There are no limits to prefixes per bucket, so in general it's better to have more then less.
//
// Since the spaceID is always the same for a space, we don't need to pathify that, because it would
// not yield any performance gains
return filepath.Clean(filepath.Join(node.SpaceID, lookup.Pathify(node.BlobID, 4, 2)))
}

View File

@@ -0,0 +1,63 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposed_s3
import (
"fmt"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3/blobstore"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/registry"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs"
"github.com/rs/zerolog"
)
func init() {
registry.Register("decomposed-s3", New)
}
// New returns an implementation to of the storage.FS interface that talk to
// a local filesystem.
func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (storage.FS, error) {
o, err := parseConfig(m)
if err != nil {
return nil, err
}
if !o.S3ConfigComplete() {
return nil, fmt.Errorf("S3 configuration incomplete")
}
defaultPutOptions := blobstore.Options{
DisableContentSha256: o.DisableContentSha256,
DisableMultipart: o.DisableMultipart,
SendContentMd5: o.SendContentMd5,
ConcurrentStreamParts: o.ConcurrentStreamParts,
NumThreads: o.NumThreads,
PartSize: o.PartSize,
}
bs, err := blobstore.New(o.S3Endpoint, o.S3Region, o.S3Bucket, o.S3AccessKey, o.S3SecretKey, defaultPutOptions)
if err != nil {
return nil, err
}
return decomposedfs.NewDefault(m, bs, stream, log)
}

View File

@@ -0,0 +1,93 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposed_s3
import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
)
// Option defines a single option function.
type Option func(o *Options)
// Options defines the available options for this package.
type Options struct {
// Endpoint of the s3 blobstore
S3Endpoint string `mapstructure:"s3.endpoint"`
// Region of the s3 blobstore
S3Region string `mapstructure:"s3.region"`
// Bucket of the s3 blobstore
S3Bucket string `mapstructure:"s3.bucket"`
// Access key for the s3 blobstore
S3AccessKey string `mapstructure:"s3.access_key"`
// Secret key for the s3 blobstore
S3SecretKey string `mapstructure:"s3.secret_key"`
// disable sending content sha256
DisableContentSha256 bool `mapstructure:"s3.disable_content_sha254"`
// disable multipart uploads
DisableMultipart bool `mapstructure:"s3.disable_multipart"`
// enable sending content md5, defaults to true if unset
SendContentMd5 bool `mapstructure:"s3.send_content_md5"`
// use concurrent stream parts
ConcurrentStreamParts bool `mapstructure:"s3.concurrent_stream_parts"`
// number of concurrent uploads
NumThreads uint `mapstructure:"s3.num_threads"`
// part size for concurrent uploads
PartSize uint64 `mapstructure:"s3.part_size"`
}
// S3ConfigComplete return true if all required s3 fields are set
func (o *Options) S3ConfigComplete() bool {
return o.S3Endpoint != "" &&
o.S3Region != "" &&
o.S3Bucket != "" &&
o.S3AccessKey != "" &&
o.S3SecretKey != ""
}
func parseConfig(m map[string]interface{}) (*Options, error) {
o := &Options{}
if err := mapstructure.Decode(m, o); err != nil {
err = errors.Wrap(err, "error decoding conf")
return nil, err
}
// if unset we set these defaults
if m["s3.send_content_md5"] == nil {
o.SendContentMd5 = true
}
if m["s3.concurrent_stream_parts"] == nil {
o.ConcurrentStreamParts = true
}
if m["s3.num_threads"] == nil {
o.NumThreads = 4
}
return o, nil
}

View File

@@ -22,6 +22,8 @@ import (
// Load core storage filesystem backends.
_ "github.com/opencloud-eu/reva/v2/pkg/ocm/storage/received"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/eos"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/eosgrpc"
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/eosgrpchome"

View File

@@ -23,7 +23,7 @@ import (
"io"
"os"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/pkg/errors"
)

View File

@@ -31,10 +31,10 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/pkg/errors"
@@ -49,7 +49,7 @@ var _spaceTypePersonal = "personal"
var _spaceTypeProject = "project"
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/lookup")
}
// IDCache is a cache for node ids

View File

@@ -25,7 +25,7 @@ import (
microstore "go-micro.dev/v4/store"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/opencloud-eu/reva/v2/pkg/store"
)

View File

@@ -22,7 +22,7 @@ import (
"time"
"github.com/mitchellh/mapstructure"
decomposedoptions "github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/options"
decomposedoptions "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/pkg/errors"
)

View File

@@ -41,13 +41,13 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/registry"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/aspects"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/permissions"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/upload"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/aspects"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/middleware"
"github.com/opencloud-eu/reva/v2/pkg/store"
"github.com/pkg/errors"

View File

@@ -24,8 +24,8 @@ import (
"syscall"
"time"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
)
// Manager is responsible for managing time-related operations on files and directories.

View File

@@ -34,8 +34,8 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)

View File

@@ -37,9 +37,9 @@ import (
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)

View File

@@ -44,19 +44,19 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/tree/propagator"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/pkg/decomposedfs/tree")
}
// Blobstore defines an interface for storing blobs in a blobstore

View File

@@ -0,0 +1,38 @@
// Copyright 2018-2024 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package aspects
import (
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/trashbin"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
)
// Aspects holds dependencies for handling aspects of the decomposedfs
type Aspects struct {
Lookup node.PathLookup
Tree node.Tree
Trashbin trashbin.Trashbin
Permissions permissions.Permissions
EventStream events.Stream
DisableVersioning bool
UserMapper usermapper.Mapper
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,83 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
revactx "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
func (fs *Decomposedfs) publishEvent(ctx context.Context, evf func() (any, error)) {
log := appctx.GetLogger(ctx)
if fs.stream == nil {
log.Error().Msg("Failed to publish event, stream is undefined")
return
}
ev, err := evf()
if err != nil || ev == nil {
log.Error().Err(err).Msg("Failed to crete the event")
return
}
if err := events.Publish(ctx, fs.stream, ev); err != nil {
log.Error().Err(err).Msg("Failed to publish event")
}
}
func (fs *Decomposedfs) moveEvent(ctx context.Context, oldRef, newRef *provider.Reference, oldNode, newNode *node.Node, orp, nrp *provider.ResourcePermissions) func() (any, error) {
return func() (any, error) {
executant, _ := revactx.ContextGetUser(ctx)
ev := events.ItemMoved{
SpaceOwner: newNode.Owner(),
Executant: executant.GetId(),
Ref: newRef,
OldReference: oldRef,
Timestamp: utils.TSNow(),
ImpersonatingUser: extractImpersonator(executant),
}
log := appctx.GetLogger(ctx)
if nref, err := fs.refFromNode(ctx, newNode, newRef.GetResourceId().GetStorageId(), nrp); err == nil {
ev.Ref = nref
} else {
log.Error().Err(err).Msg("Failed to get destination reference")
}
if oref, err := fs.refFromNode(ctx, oldNode, oldRef.GetResourceId().GetStorageId(), orp); err == nil {
ev.OldReference = oref
} else {
log.Error().Err(err).Msg("Failed to get source reference")
}
return ev, nil
}
}
func extractImpersonator(u *user.User) *user.User {
var impersonator user.User
if err := utils.ReadJSONFromOpaque(u.Opaque, "impersonating-user", &impersonator); err != nil {
return nil
}
return &impersonator
}

View File

@@ -0,0 +1,351 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
"path/filepath"
"strings"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/internal/grpc/services/storageprovider"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/ace"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
// DenyGrant denies access to a resource.
func (fs *Decomposedfs) DenyGrant(ctx context.Context, ref *provider.Reference, grantee *provider.Grantee) error {
_, span := tracer.Start(ctx, "DenyGrant")
defer span.End()
log := appctx.GetLogger(ctx)
log.Debug().Interface("ref", ref).Interface("grantee", grantee).Msg("DenyGrant()")
grantNode, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return err
}
if !grantNode.Exists {
return errtypes.NotFound(filepath.Join(grantNode.ParentID, grantNode.Name))
}
// set all permissions to false
grant := &provider.Grant{
Grantee: grantee,
Permissions: &provider.ResourcePermissions{},
}
// add acting user
u := ctxpkg.ContextMustGetUser(ctx)
grant.Creator = u.GetId()
rp, err := fs.p.AssemblePermissions(ctx, grantNode)
switch {
case err != nil:
return err
case !rp.DenyGrant:
return errtypes.PermissionDenied(filepath.Join(grantNode.ParentID, grantNode.Name))
}
return fs.storeGrant(ctx, grantNode, grant)
}
// AddGrant adds a grant to a resource
func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) {
_, span := tracer.Start(ctx, "AddGrant")
defer span.End()
log := appctx.GetLogger(ctx)
log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()")
grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g)
if err != nil {
return err
}
defer func() {
_ = unlockFunc()
}()
if grant != nil {
return errtypes.AlreadyExists(filepath.Join(grantNode.ParentID, grantNode.Name))
}
owner := grantNode.Owner()
grants, err := grantNode.ListGrants(ctx)
if err != nil {
return err
}
// If the owner is empty and there are no grantees then we are dealing with a just created project space.
// In this case we don't need to check for permissions and just add the grant since this will be the project
// manager.
// When the owner is empty but grants are set then we do want to check the grants.
// However, if we are trying to edit an existing grant we do not have to check for permission if the user owns the grant
// TODO: find a better to check this
if !(len(grants) == 0 && (owner == nil || owner.OpaqueId == "" || (owner.OpaqueId == grantNode.SpaceID && owner.Type == 8))) {
rp, err := fs.p.AssemblePermissions(ctx, grantNode)
switch {
case err != nil:
return err
case !rp.AddGrant:
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
}
return fs.storeGrant(ctx, grantNode, g)
}
// ListGrants lists the grants on the specified resource
func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) {
_, span := tracer.Start(ctx, "ListGrants")
defer span.End()
var grantNode *node.Node
if grantNode, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
}
if !grantNode.Exists {
err = errtypes.NotFound(filepath.Join(grantNode.ParentID, grantNode.Name))
return
}
rp, err := fs.p.AssemblePermissions(ctx, grantNode)
switch {
case err != nil:
return nil, err
case !rp.ListGrants && !rp.Stat:
f, _ := storagespace.FormatReference(ref)
return nil, errtypes.NotFound(f)
}
log := appctx.GetLogger(ctx)
var attrs node.Attributes
if attrs, err = grantNode.Xattrs(ctx); err != nil {
log.Error().Err(err).Msg("error listing attributes")
return nil, err
}
aces := []*ace.ACE{}
for k, v := range attrs {
if strings.HasPrefix(k, prefixes.GrantPrefix) {
var err error
var e *ace.ACE
principal := k[len(prefixes.GrantPrefix):]
if e, err = ace.Unmarshal(principal, v); err != nil {
log.Error().Err(err).Str("principal", principal).Str("attr", k).Msg("could not unmarshal ace")
continue
}
aces = append(aces, e)
}
}
uid := ctxpkg.ContextMustGetUser(ctx).GetId()
grants = make([]*provider.Grant, 0, len(aces))
for i := range aces {
g := aces[i].Grant()
// you may list your own grants even without listgrants permission
if !rp.ListGrants && !utils.UserIDEqual(g.Creator, uid) && !utils.UserIDEqual(g.Grantee.GetUserId(), uid) {
continue
}
grants = append(grants, g)
}
return grants, nil
}
// RemoveGrant removes a grant from resource
func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) {
_, span := tracer.Start(ctx, "RemoveGrant")
defer span.End()
grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g)
if err != nil {
return err
}
defer func() {
_ = unlockFunc()
}()
if grant == nil {
return errtypes.NotFound("grant not found")
}
// you are allowed to remove grants if you created them yourself or have the proper permission
if !utils.UserIDEqual(grant.Creator, ctxpkg.ContextMustGetUser(ctx).GetId()) {
rp, err := fs.p.AssemblePermissions(ctx, grantNode)
switch {
case err != nil:
return err
case !rp.RemoveGrant:
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
}
if err := grantNode.DeleteGrant(ctx, g, false); err != nil {
return err
}
if isShareGrant(ctx) {
// do not invalidate by user or group indexes
// FIXME we should invalidate the by-type index, but that requires reference counting
} else {
// invalidate space grant
switch {
case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER:
// remove from user index
if err := fs.userSpaceIndex.Remove(g.Grantee.GetUserId().GetOpaqueId(), grantNode.SpaceID); err != nil {
return err
}
case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP:
// remove from group index
if err := fs.groupSpaceIndex.Remove(g.Grantee.GetGroupId().GetOpaqueId(), grantNode.SpaceID); err != nil {
return err
}
}
}
return fs.tp.Propagate(ctx, grantNode, 0)
}
func isShareGrant(ctx context.Context) bool {
_, ok := storageprovider.SpaceTypeFromContext(ctx)
return !ok
}
// UpdateGrant updates a grant on a resource
// TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92
func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error {
_, span := tracer.Start(ctx, "UpdateGrant")
defer span.End()
log := appctx.GetLogger(ctx)
log.Debug().Interface("ref", ref).Interface("grant", g).Msg("UpdateGrant()")
grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g)
if err != nil {
return err
}
defer func() {
_ = unlockFunc()
}()
if grant == nil {
// grant not found
// TODO: fallback to AddGrant?
return errtypes.NotFound(g.Grantee.GetUserId().GetOpaqueId())
}
// You may update a grant when you have the UpdateGrant permission or created the grant (regardless what your permissions are now)
if !utils.UserIDEqual(grant.Creator, ctxpkg.ContextMustGetUser(ctx).GetId()) {
rp, err := fs.p.AssemblePermissions(ctx, grantNode)
switch {
case err != nil:
return err
case !rp.UpdateGrant:
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
}
return fs.storeGrant(ctx, grantNode, g)
}
// checks if the given grant exists and returns it. Nil grant means it doesn't exist
func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (*node.Node, metadata.UnlockFunc, *provider.Grant, error) {
_, span := tracer.Start(ctx, "loadGrant")
defer span.End()
n, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return nil, nil, nil, err
}
if !n.Exists {
return nil, nil, nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
}
// lock the metadata file
unlockFunc, err := fs.lu.MetadataBackend().Lock(n.InternalPath())
if err != nil {
return nil, nil, nil, err
}
grants, err := n.ListGrants(ctx)
if err != nil {
return nil, nil, nil, err
}
for _, grant := range grants {
switch grant.Grantee.GetType() {
case provider.GranteeType_GRANTEE_TYPE_USER:
if g.Grantee.GetUserId().GetOpaqueId() == grant.Grantee.GetUserId().GetOpaqueId() {
return n, unlockFunc, grant, nil
}
case provider.GranteeType_GRANTEE_TYPE_GROUP:
if g.Grantee.GetGroupId().GetOpaqueId() == grant.Grantee.GetGroupId().GetOpaqueId() {
return n, unlockFunc, grant, nil
}
}
}
return n, unlockFunc, nil, nil
}
func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provider.Grant) error {
_, span := tracer.Start(ctx, "storeGrant")
defer span.End()
// if is a grant to a space root, the receiver needs the space type to update the indexes
spaceType, ok := storageprovider.SpaceTypeFromContext(ctx)
if !ok {
// this is not a grant on a space root we are just adding a share
spaceType = spaceTypeShare
}
// set the grant
e := ace.FromGrant(g)
principal, value := e.Marshal()
attribs := node.Attributes{
prefixes.GrantPrefix + principal: value,
}
if err := n.SetXattrsWithContext(ctx, attribs, false); err != nil {
appctx.GetLogger(ctx).Error().Err(err).
Str("principal", principal).Msg("Could not set grant for principal")
return err
}
// update the indexes only after successfully setting the grant
err := fs.updateIndexes(ctx, g.GetGrantee(), spaceType, n.SpaceID, n.ID)
if err != nil {
return err
}
return fs.tp.Propagate(ctx, n, 0)
}

View File

@@ -0,0 +1,412 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package lookup
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
const (
_spaceTypePersonal = "personal"
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup")
}
// Lookup implements transformations from filepath to node and back
type Lookup struct {
Options *options.Options
metadataBackend metadata.Backend
tm node.TimeManager
}
// New returns a new Lookup instance
func New(b metadata.Backend, o *options.Options, tm node.TimeManager) *Lookup {
return &Lookup{
Options: o,
metadataBackend: b,
tm: tm,
}
}
// MetadataBackend returns the metadata backend
func (lu *Lookup) MetadataBackend() metadata.Backend {
return lu.metadataBackend
}
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs node.Attributes) (string, int64, error) {
blobID := ""
blobSize := int64(0)
var err error
if attrs != nil {
blobID = attrs.String(prefixes.BlobIDAttr)
if blobID != "" {
blobSize, err = attrs.Int64(prefixes.BlobsizeAttr)
if err != nil {
return "", 0, err
}
}
} else {
attrs, err := lu.metadataBackend.All(ctx, path)
if err != nil {
return "", 0, errors.Wrapf(err, "error reading blobid xattr")
}
nodeAttrs := node.Attributes(attrs)
blobID = nodeAttrs.String(prefixes.BlobIDAttr)
blobSize, err = nodeAttrs.Int64(prefixes.BlobsizeAttr)
if err != nil {
return "", 0, errors.Wrapf(err, "error reading blobsize xattr")
}
}
return blobID, blobSize, nil
}
func readChildNodeFromLink(path string) (string, error) {
link, err := os.Readlink(path)
if err != nil {
return "", err
}
nodeID := strings.TrimLeft(link, "/.")
nodeID = strings.ReplaceAll(nodeID, "/", "")
return nodeID, nil
}
func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node, name string) (string, error) {
nodeID, err := readChildNodeFromLink(filepath.Join(parent.InternalPath(), name))
if err != nil {
return "", errors.Wrap(err, "decomposedfs: Wrap: readlink error")
}
return nodeID, nil
}
// TypeFromPath returns the type of the node at the given path
func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.ResourceType {
// Try to read from xattrs
typeAttr, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.TypeAttr)
if err == nil {
return provider.ResourceType(int32(typeAttr))
}
t := provider.ResourceType_RESOURCE_TYPE_INVALID
// Fall back to checking on disk
fi, err := os.Lstat(path)
if err != nil {
return t
}
switch {
case fi.IsDir():
if _, err = lu.metadataBackend.Get(ctx, path, prefixes.ReferenceAttr); err == nil {
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
} else {
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
}
case fi.Mode().IsRegular():
t = provider.ResourceType_RESOURCE_TYPE_FILE
case fi.Mode()&os.ModeSymlink != 0:
t = provider.ResourceType_RESOURCE_TYPE_SYMLINK
// TODO reference using ext attr on a symlink
// nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE
}
return t
}
// NodeFromResource takes in a request path or request id and converts it to a Node
func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) {
ctx, span := tracer.Start(ctx, "NodeFromResource")
defer span.End()
if ref.ResourceId != nil {
// check if a storage space reference is used
// currently, the decomposed fs uses the root node id as the space id
n, err := lu.NodeFromID(ctx, ref.ResourceId)
if err != nil {
return nil, err
}
// is this a relative reference?
if ref.Path != "" {
p := filepath.Clean(ref.Path)
if p != "." && p != "/" {
// walk the relative path
n, err = lu.WalkPath(ctx, n, p, false, func(ctx context.Context, n *node.Node) error { return nil })
if err != nil {
return nil, err
}
n.SpaceID = ref.ResourceId.SpaceId
}
}
return n, nil
}
// reference is invalid
return nil, fmt.Errorf("invalid reference %+v. resource_id must be set", ref)
}
// NodeFromID returns the internal path for the id
func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) {
ctx, span := tracer.Start(ctx, "NodeFromID")
defer span.End()
if id == nil {
return nil, fmt.Errorf("invalid resource id %+v", id)
}
if id.OpaqueId == "" {
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
}
// Pathify segments the beginning of a string into depth segments of width length
// Pathify("aabbccdd", 3, 1) will return "a/a/b/bccdd"
func Pathify(id string, depth, width int) string {
b := strings.Builder{}
i := 0
for ; i < depth; i++ {
if len(id) <= i*width+width {
break
}
b.WriteString(id[i*width : i*width+width])
b.WriteRune(filepath.Separator)
}
b.WriteString(id[i*width:])
return b.String()
}
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
if err != nil {
return nil, err
}
node.SpaceRoot = node
return node, nil
}
// GenerateSpaceID generates a new space id and alias
func (lu *Lookup) GenerateSpaceID(spaceType string, owner *user.User) (string, error) {
switch spaceType {
case _spaceTypePersonal:
return owner.Id.OpaqueId, nil
default:
return uuid.New().String(), nil
}
}
// Path returns the path for node
func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.PermissionFunc) (p string, err error) {
root := n.SpaceRoot
var child *node.Node
for n.ID != root.ID {
p = filepath.Join(n.Name, p)
child = n
if n, err = n.Parent(ctx); err != nil {
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
Str("spaceid", child.SpaceID).
Str("nodeid", child.ID).
Str("parentid", child.ParentID).
Msg("Path()")
return
}
if !hasPermission(n) {
break
}
}
p = filepath.Join("/", p)
return
}
// WalkPath calls n.Child(segment) on every path segment in p starting at the node r.
// If a function f is given it will be executed for every segment node, but not the root node r.
// If followReferences is given the current visited reference node is replaced by the referenced node.
func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followReferences bool, f func(ctx context.Context, n *node.Node) error) (*node.Node, error) {
segments := strings.Split(strings.Trim(p, "/"), "/")
var err error
for i := range segments {
if r, err = r.Child(ctx, segments[i]); err != nil {
return r, err
}
if followReferences {
if attrBytes, err := r.Xattr(ctx, prefixes.ReferenceAttr); err == nil {
realNodeID := attrBytes
ref, err := refFromCS3(realNodeID)
if err != nil {
return nil, err
}
r, err = lu.NodeFromID(ctx, ref.ResourceId)
if err != nil {
return nil, err
}
}
}
if r.IsSpaceRoot(ctx) {
r.SpaceRoot = r
}
if !r.Exists && i < len(segments)-1 {
return r, errtypes.NotFound(segments[i])
}
if f != nil {
if err = f(ctx, r); err != nil {
return r, err
}
}
}
return r, nil
}
// InternalRoot returns the internal storage root directory
func (lu *Lookup) InternalRoot() string {
return lu.Options.Root
}
// InternalPath returns the internal path for a given ID
func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
return filepath.Join(lu.Options.Root, "spaces", Pathify(spaceID, 1, 2), "nodes", Pathify(nodeID, 4, 2))
}
// // ReferenceFromAttr returns a CS3 reference from xattr of a node.
// // Supported formats are: "cs3:storageid/nodeid"
// func ReferenceFromAttr(b []byte) (*provider.Reference, error) {
// return refFromCS3(b)
// }
// refFromCS3 creates a CS3 reference from a set of bytes. This method should remain private
// and only be called after validation because it can potentially panic.
func refFromCS3(b []byte) (*provider.Reference, error) {
parts := string(b[4:])
return &provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: strings.Split(parts, "/")[0],
OpaqueId: strings.Split(parts, "/")[1],
},
}, nil
}
// CopyMetadata copies all extended attributes from source to target.
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a shared lock is acquired.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) {
// Acquire a read log on the source node
// write lock existing node before reading treesize or tree time
lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(src), os.O_RDONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
if err != nil {
return errors.Wrap(err, "xattrs: Unable to lock source to read")
}
defer func() {
rerr := lock.Close()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, lock, acquireTargetLock)
}
// CopyMetadataWithSourceLock copies all extended attributes from source to target.
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
// For the source file, a matching lockedfile is required.
// NOTE: target resource will be write locked!
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) {
switch {
case lockedSource == nil:
return errors.New("no lock provided")
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(sourcePath):
return errors.New("lockpath does not match filepath")
}
attrs, err := lu.metadataBackend.All(ctx, sourcePath)
if err != nil {
return err
}
newAttrs := make(map[string][]byte, 0)
for attrName, val := range attrs {
if filter != nil {
var ok bool
if val, ok = filter(attrName, val); !ok {
continue
}
}
newAttrs[attrName] = val
}
return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, acquireTargetLock)
}
// TimeManager returns the time manager
func (lu *Lookup) TimeManager() node.TimeManager {
return lu.tm
}
// DetectBackendOnDisk returns the name of the metadata backend being used on disk
func DetectBackendOnDisk(root string) string {
matches, _ := filepath.Glob(filepath.Join(root, "spaces", "*", "*"))
if len(matches) > 0 {
base := matches[len(matches)-1]
spaceid := strings.ReplaceAll(
strings.TrimPrefix(base, filepath.Join(root, "spaces")),
"/", "")
spaceRoot := Pathify(spaceid, 4, 2)
_, err := os.Stat(filepath.Join(base, "nodes", spaceRoot+".mpk"))
if err == nil {
return "mpk"
}
_, err = os.Stat(filepath.Join(base, "nodes", spaceRoot+".ini"))
if err == nil {
return "ini"
}
}
return "xattrs"
}

View File

@@ -0,0 +1,224 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
"fmt"
"path/filepath"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/pkg/errors"
)
// SetArbitraryMetadata sets the metadata on a resource
func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) {
_, span := tracer.Start(ctx, "SetArbitraryMetadata")
defer span.End()
n, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
}
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return err
}
rp, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return err
case !rp.InitiateFileUpload: // TODO add explicit SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
// Set space owner in context
storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
// check lock
if err := n.CheckLock(ctx); err != nil {
return err
}
errs := []error{}
// TODO should we really continue updating when an error occurs?
if md.Metadata != nil {
if val, ok := md.Metadata["mtime"]; ok {
delete(md.Metadata, "mtime")
if err := n.SetMtimeString(ctx, val); err != nil {
errs = append(errs, errors.Wrap(err, "could not set mtime"))
}
}
// TODO(jfd) special handling for atime?
// TODO(jfd) allow setting birth time (btime)?
// TODO(jfd) any other metadata that is interesting? fileid?
// TODO unset when file is updated
// TODO unset when folder is updated or add timestamp to etag?
if val, ok := md.Metadata["etag"]; ok {
delete(md.Metadata, "etag")
if err := n.SetEtag(ctx, val); err != nil {
errs = append(errs, errors.Wrap(err, "could not set etag"))
}
}
if val, ok := md.Metadata[node.FavoriteKey]; ok {
delete(md.Metadata, node.FavoriteKey)
if u, ok := ctxpkg.ContextGetUser(ctx); ok {
if uid := u.GetId(); uid != nil {
if err := n.SetFavorite(ctx, uid, val); err != nil {
sublog.Error().Err(err).
Interface("user", u).
Msg("could not set favorite flag")
errs = append(errs, errors.Wrap(err, "could not set favorite flag"))
}
} else {
sublog.Error().Interface("user", u).Msg("user has no id")
errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id"))
}
} else {
sublog.Error().Interface("user", u).Msg("error getting user from ctx")
errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx"))
}
}
}
for k, v := range md.Metadata {
attrName := prefixes.MetadataPrefix + k
if err = n.SetXattrString(ctx, attrName, v); err != nil {
errs = append(errs, errors.Wrap(err, "Decomposedfs: could not set metadata attribute "+attrName+" to "+k))
}
}
switch len(errs) {
case 0:
return fs.tp.Propagate(ctx, n, 0)
case 1:
// TODO Propagate if anything changed
return errs[0]
default:
// TODO Propagate if anything changed
// TODO how to return multiple errors?
return errors.New("multiple errors occurred, see log for details")
}
}
// UnsetArbitraryMetadata unsets the metadata on the given resource
func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) {
_, span := tracer.Start(ctx, "UnsetArbitraryMetadata")
defer span.End()
n, err := fs.lu.NodeFromResource(ctx, ref)
if err != nil {
return errors.Wrap(err, "Decomposedfs: error resolving ref")
}
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return err
}
rp, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return err
case !rp.InitiateFileUpload: // TODO use SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
// Set space owner in context
storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
// check lock
if err := n.CheckLock(ctx); err != nil {
return err
}
errs := []error{}
for _, k := range keys {
switch k {
case node.FavoriteKey:
// the favorite flag is specific to the user, so we need to incorporate the userid
u, ok := ctxpkg.ContextGetUser(ctx)
if !ok {
sublog.Error().
Interface("user", u).
Msg("error getting user from ctx")
errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx"))
continue
}
var uid *userpb.UserId
if uid = u.GetId(); uid == nil || uid.OpaqueId == "" {
sublog.Error().
Interface("user", u).
Msg("user has no id")
errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id"))
continue
}
fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp())
if err := n.RemoveXattr(ctx, fa, true); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}
sublog.Error().Err(err).
Interface("user", u).
Str("key", fa).
Msg("could not unset favorite flag")
errs = append(errs, errors.Wrap(err, "could not unset favorite flag"))
}
default:
if err = n.RemoveXattr(ctx, prefixes.MetadataPrefix+k, true); err != nil {
if metadata.IsAttrUnset(err) {
continue // already gone, ignore
}
sublog.Error().Err(err).
Str("key", k).
Msg("could not unset metadata")
errs = append(errs, errors.Wrap(err, "could not unset metadata"))
}
}
}
switch len(errs) {
case 0:
return fs.tp.Propagate(ctx, n, 0)
case 1:
// TODO Propagate if anything changed
return errs[0]
default:
// TODO Propagate if anything changed
// TODO how to return multiple errors?
return errors.New("multiple errors occurred, see log for details")
}
}

View File

@@ -0,0 +1,72 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package metadata
import (
"io/fs"
"os"
"syscall"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/pkg/errors"
"github.com/pkg/xattr"
)
// IsNotExist checks if there is a os not exists error buried inside the xattr error,
// as we cannot just use os.IsNotExist().
func IsNotExist(err error) bool {
if _, ok := err.(errtypes.IsNotFound); ok {
return true
}
if os.IsNotExist(errors.Cause(err)) {
return true
}
if xerr, ok := errors.Cause(err).(*xattr.Error); ok {
if serr, ok2 := xerr.Err.(syscall.Errno); ok2 {
return serr == syscall.ENOENT
}
}
return false
}
// IsAttrUnset checks the xattr.ENOATTR from the xattr package which redifines it as ENODATA on platforms that do not natively support it (eg. linux)
// see https://github.com/pkg/xattr/blob/8725d4ccc0fcef59c8d9f0eaf606b3c6f962467a/xattr_linux.go#L19-L22
func IsAttrUnset(err error) bool {
if xerr, ok := errors.Cause(err).(*xattr.Error); ok {
if serr, ok2 := xerr.Err.(syscall.Errno); ok2 {
return serr == xattr.ENOATTR
}
}
return false
}
// The os error is buried inside the fs.PathError error
func IsNotDir(err error) bool {
if perr, ok := errors.Cause(err).(*fs.PathError); ok {
if serr, ok2 := perr.Err.(syscall.Errno); ok2 {
return serr == syscall.ENOTDIR
}
}
if xerr, ok := errors.Cause(err).(*xattr.Error); ok {
if serr, ok2 := xerr.Err.(syscall.Errno); ok2 {
return serr == syscall.ENOTDIR
}
}
return false
}

View File

@@ -0,0 +1,328 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package metadata
import (
"context"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/google/renameio/v2"
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
"github.com/pkg/xattr"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
"go.opentelemetry.io/otel/codes"
)
// MessagePackBackend persists the attributes in messagepack format inside the file
type MessagePackBackend struct {
rootPath string
metaCache cache.FileMetadataCache
}
type readWriteCloseSeekTruncater interface {
io.ReadWriteCloser
io.Seeker
Truncate(int64) error
}
// NewMessagePackBackend returns a new MessagePackBackend instance
func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
return MessagePackBackend{
rootPath: filepath.Clean(rootPath),
metaCache: cache.GetFileMetadataCache(o),
}
}
// Name returns the name of the backend
func (MessagePackBackend) Name() string { return "messagepack" }
// All reads all extended attributes for a node
func (b MessagePackBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, nil)
}
// Get an extended attribute value for the given key
func (b MessagePackBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return []byte{}, err
}
val, ok := attribs[key]
if !ok {
return []byte{}, &xattr.Error{Op: "mpk.get", Path: path, Name: key, Err: xattr.ENOATTR}
}
return val, nil
}
// GetInt64 reads a string as int64 from the xattrs
func (b MessagePackBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return 0, err
}
val, ok := attribs[key]
if !ok {
return 0, &xattr.Error{Op: "mpk.get", Path: path, Name: key, Err: xattr.ENOATTR}
}
i, err := strconv.ParseInt(string(val), 10, 64)
if err != nil {
return 0, err
}
return i, nil
}
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (b MessagePackBackend) List(ctx context.Context, path string) ([]string, error) {
attribs, err := b.loadAttributes(ctx, path, nil)
if err != nil {
return nil, err
}
keys := []string{}
for k := range attribs {
keys = append(keys, k)
}
return keys, nil
}
// Set sets one attribute for the given path
func (b MessagePackBackend) Set(ctx context.Context, path, key string, val []byte) error {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (b MessagePackBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return b.saveAttributes(ctx, path, attribs, nil, acquireLock)
}
// Remove an extended attribute key
func (b MessagePackBackend) Remove(ctx context.Context, path, key string, acquireLock bool) error {
return b.saveAttributes(ctx, path, nil, []string{key}, acquireLock)
}
// AllWithLockedSource reads all extended attributes from the given reader (if possible).
// The path argument is used for storing the data in the cache
func (b MessagePackBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return b.loadAttributes(ctx, path, source)
}
func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
var (
err error
f readWriteCloseSeekTruncater
)
ctx, span := tracer.Start(ctx, "saveAttributes")
defer func() {
if err != nil {
span.SetStatus(codes.Error, err.Error())
} else {
span.SetStatus(codes.Ok, "")
}
span.End()
}()
lockPath := b.LockfilePath(path)
metaPath := b.MetadataPath(path)
if acquireLock {
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
f, err = lockedfile.OpenFile(lockPath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
if err != nil {
return err
}
defer f.Close()
}
// Read current state
_, subspan := tracer.Start(ctx, "os.ReadFile")
var msgBytes []byte
msgBytes, err = os.ReadFile(metaPath)
subspan.End()
attribs := map[string][]byte{}
switch {
case err != nil:
if !errors.Is(err, fs.ErrNotExist) {
return err
}
case len(msgBytes) == 0:
// ugh. an empty file? bail out
return errors.New("encountered empty metadata file")
default:
// only unmarshal if we read data
err = msgpack.Unmarshal(msgBytes, &attribs)
if err != nil {
return err
}
}
// prepare metadata
for key, val := range setAttribs {
attribs[key] = val
}
for _, key := range deleteAttribs {
delete(attribs, key)
}
var d []byte
d, err = msgpack.Marshal(attribs)
if err != nil {
return err
}
// overwrite file atomically
_, subspan = tracer.Start(ctx, "renameio.Writefile")
err = renameio.WriteFile(metaPath, d, 0600)
if err != nil {
return err
}
subspan.End()
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
return err
}
func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
ctx, span := tracer.Start(ctx, "loadAttributes")
defer span.End()
attribs := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
if err == nil {
return attribs, err
}
metaPath := b.MetadataPath(path)
var msgBytes []byte
if source == nil {
// // No cached entry found. Read from storage and store in cache
_, subspan := tracer.Start(ctx, "os.OpenFile")
// source, err = lockedfile.Open(metaPath)
source, err = os.Open(metaPath)
subspan.End()
// // No cached entry found. Read from storage and store in cache
if err != nil {
if os.IsNotExist(err) {
// some of the caller rely on ENOTEXISTS to be returned when the
// actual file (not the metafile) does not exist in order to
// determine whether a node exists or not -> stat the actual node
_, subspan := tracer.Start(ctx, "os.Stat")
_, err := os.Stat(path)
subspan.End()
if err != nil {
return nil, err
}
return attribs, nil // no attributes set yet
}
}
_, subspan = tracer.Start(ctx, "io.ReadAll")
msgBytes, err = io.ReadAll(source)
source.(*os.File).Close()
subspan.End()
} else {
_, subspan := tracer.Start(ctx, "io.ReadAll")
msgBytes, err = io.ReadAll(source)
subspan.End()
}
if err != nil {
return nil, err
}
if len(msgBytes) > 0 {
err = msgpack.Unmarshal(msgBytes, &attribs)
if err != nil {
return nil, err
}
}
_, subspan := tracer.Start(ctx, "metaCache.PushToCache")
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
subspan.End()
if err != nil {
return nil, err
}
return attribs, nil
}
// IsMetaFile returns whether the given path represents a meta file
func (MessagePackBackend) IsMetaFile(path string) bool {
return strings.HasSuffix(path, ".mpk") || strings.HasSuffix(path, ".mlock")
}
// Purge purges the data of a given path
func (b MessagePackBackend) Purge(_ context.Context, path string) error {
if err := b.metaCache.RemoveMetadata(b.cacheKey(path)); err != nil {
return err
}
return os.Remove(b.MetadataPath(path))
}
// Rename moves the data for a given path to a new path
func (b MessagePackBackend) Rename(oldPath, newPath string) error {
data := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(oldPath), &data)
if err == nil {
err = b.metaCache.PushToCache(b.cacheKey(newPath), data)
if err != nil {
return err
}
}
err = b.metaCache.RemoveMetadata(b.cacheKey(oldPath))
if err != nil {
return err
}
return os.Rename(b.MetadataPath(oldPath), b.MetadataPath(newPath))
}
// MetadataPath returns the path of the file holding the metadata for the given path
func (MessagePackBackend) MetadataPath(path string) string { return path + ".mpk" }
// LockfilePath returns the path of the lock file
func (MessagePackBackend) LockfilePath(path string) string { return path + ".mlock" }
// Lock locks the metadata for the given path
func (b MessagePackBackend) Lock(path string) (UnlockFunc, error) {
metaLockPath := b.LockfilePath(path)
mlock, err := lockedfile.OpenFile(metaLockPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
return func() error {
err := mlock.Close()
if err != nil {
return err
}
return os.Remove(metaLockPath)
}, nil
}
func (b MessagePackBackend) cacheKey(path string) string {
// rootPath is guaranteed to have no trailing slash
// the cache key shouldn't begin with a slash as some stores drop it which can cause
// confusion
return strings.TrimPrefix(path, b.rootPath+"/")
}

View File

@@ -0,0 +1,129 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package metadata
import (
"context"
"errors"
"io"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/metadata")
}
var errUnconfiguredError = errors.New("no metadata backend configured. Bailing out")
type UnlockFunc func() error
// Backend defines the interface for file attribute backends
type Backend interface {
Name() string
All(ctx context.Context, path string) (map[string][]byte, error)
Get(ctx context.Context, path, key string) ([]byte, error)
GetInt64(ctx context.Context, path, key string) (int64, error)
List(ctx context.Context, path string) (attribs []string, err error)
Set(ctx context.Context, path, key string, val []byte) error
SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error
Remove(ctx context.Context, path, key string, acquireLock bool) error
Lock(path string) (UnlockFunc, error)
Purge(ctx context.Context, path string) error
Rename(oldPath, newPath string) error
IsMetaFile(path string) bool
MetadataPath(path string) string
LockfilePath(path string) string
AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error)
}
// NullBackend is the default stub backend, used to enforce the configuration of a proper backend
type NullBackend struct{}
// Name returns the name of the backend
func (NullBackend) Name() string { return "null" }
// All reads all extended attributes for a node
func (NullBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return nil, errUnconfiguredError
}
// Get an extended attribute value for the given key
func (NullBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
return []byte{}, errUnconfiguredError
}
// GetInt64 reads a string as int64 from the xattrs
func (NullBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
return 0, errUnconfiguredError
}
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (NullBackend) List(ctx context.Context, path string) ([]string, error) {
return nil, errUnconfiguredError
}
// Set sets one attribute for the given path
func (NullBackend) Set(ctx context.Context, path string, key string, val []byte) error {
return errUnconfiguredError
}
// SetMultiple sets a set of attribute for the given path
func (NullBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
return errUnconfiguredError
}
// Remove removes an extended attribute key
func (NullBackend) Remove(ctx context.Context, path string, key string, acquireLock bool) error {
return errUnconfiguredError
}
// Lock locks the metadata for the given path
func (NullBackend) Lock(path string) (UnlockFunc, error) {
return nil, nil
}
// IsMetaFile returns whether the given path represents a meta file
func (NullBackend) IsMetaFile(path string) bool { return false }
// Purge purges the data of a given path from any cache that might hold it
func (NullBackend) Purge(_ context.Context, purges string) error { return errUnconfiguredError }
// Rename moves the data for a given path to a new path
func (NullBackend) Rename(oldPath, newPath string) error { return errUnconfiguredError }
// MetadataPath returns the path of the file holding the metadata for the given path
func (NullBackend) MetadataPath(path string) string { return "" }
// LockfilePath returns the path of the lock file
func (NullBackend) LockfilePath(path string) string { return "" }
// AllWithLockedSource reads all extended attributes from the given reader
// The path argument is used for storing the data in the cache
func (NullBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
return nil, errUnconfiguredError
}

View File

@@ -0,0 +1,29 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
//go:build !freebsd
package prefixes
// The default namespace for ocis. As non root users can only manipulate
// the user. namespace, which is what is used to store ownCloud specific
// metadata. To prevent name collisions with other apps, we are going to
// introduce a sub namespace "user.ocis."
const (
OcPrefix string = "user.oc."
)

View File

@@ -0,0 +1,28 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
//go:build freebsd
package prefixes
// On FreeBSD the `user` namespace is implied through a separate syscall argument
// and will fail with invalid argument when you try to start an xattr name with user. or system.
// For that reason we drop the superfluous user. prefix for FreeBSD specifically.
const (
OcisPrefix string = "ocis."
)

View File

@@ -0,0 +1,103 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package prefixes
// Declare a list of xattr keys
// TODO the below comment is currently copied from the owncloud driver, revisit
// Currently,extended file attributes have four separated
// namespaces (user, trusted, security and system) followed by a dot.
// A non root user can only manipulate the user. namespace, which is what
// we will use to store ownCloud specific metadata. To prevent name
// collisions with other apps We are going to introduce a sub namespace
// "user.ocis." in the xattrs_prefix*.go files.
const (
TypeAttr string = OcPrefix + "type"
IDAttr string = OcPrefix + "id"
ParentidAttr string = OcPrefix + "parentid"
OwnerIDAttr string = OcPrefix + "owner.id"
OwnerIDPAttr string = OcPrefix + "owner.idp"
OwnerTypeAttr string = OcPrefix + "owner.type"
// the base name of the node
// updated when the file is renamed or moved
NameAttr string = OcPrefix + "name"
BlobIDAttr string = OcPrefix + "blobid"
BlobsizeAttr string = OcPrefix + "blobsize"
// statusPrefix is the prefix for the node status
StatusPrefix string = OcPrefix + "nodestatus"
// scanPrefix is the prefix for the virus scan status and date
ScanStatusPrefix string = OcPrefix + "scanstatus"
ScanDatePrefix string = OcPrefix + "scandate"
// grantPrefix is the prefix for sharing related extended attributes
GrantPrefix string = OcPrefix + "grant."
GrantUserAcePrefix string = OcPrefix + "grant." + UserAcePrefix
GrantGroupAcePrefix string = OcPrefix + "grant." + GroupAcePrefix
MetadataPrefix string = OcPrefix + "md."
// favorite flag, per user
FavPrefix string = OcPrefix + "fav."
// a temporary etag for a folder that is removed when the mtime propagation happens
TmpEtagAttr string = OcPrefix + "tmp.etag"
ReferenceAttr string = OcPrefix + "cs3.ref" // arbitrary metadata
ChecksumPrefix string = OcPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1
TrashOriginAttr string = OcPrefix + "trash.origin" // trash origin
// we use a single attribute to enable or disable propagation of both: synctime and treesize
// The propagation attribute is set to '1' at the top of the (sub)tree. Propagation will stop at
// that node.
PropagationAttr string = OcPrefix + "propagation"
// we need mtime to keep mtime in sync with the metadata
MTimeAttr string = OcPrefix + "mtime"
// the tree modification time of the tree below this node,
// propagated when synctime_accounting is true and
// user.ocis.propagation=1 is set
// stored as a readable time.RFC3339Nano
TreeMTimeAttr string = OcPrefix + "tmtime"
// the deletion/disabled time of a space or node
// used to mark space roots as disabled
// stored as a readable time.RFC3339Nano
DTimeAttr string = OcPrefix + "dtime"
// the size of the tree below this node,
// propagated when treesize_accounting is true and
// user.ocis.propagation=1 is set
// stored as uint64, little endian
TreesizeAttr string = OcPrefix + "treesize"
// the quota for the storage space / tree, regardless who accesses it
QuotaAttr string = OcPrefix + "quota"
// the name given to a storage space. It should not contain any semantics as its only purpose is to be read.
SpaceIDAttr string = OcPrefix + "space.id"
SpaceNameAttr string = OcPrefix + "space.name"
SpaceTypeAttr string = OcPrefix + "space.type"
SpaceDescriptionAttr string = OcPrefix + "space.description"
SpaceReadmeAttr string = OcPrefix + "space.readme"
SpaceImageAttr string = OcPrefix + "space.image"
SpaceAliasAttr string = OcPrefix + "space.alias"
UserAcePrefix string = "u:"
GroupAcePrefix string = "g:"
)

View File

@@ -0,0 +1,295 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package metadata
import (
"context"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/filelocks"
"github.com/pkg/errors"
"github.com/pkg/xattr"
"github.com/rogpeppe/go-internal/lockedfile"
)
// XattrsBackend stores the file attributes in extended attributes
type XattrsBackend struct {
rootPath string
metaCache cache.FileMetadataCache
}
// NewMessageBackend returns a new XattrsBackend instance
func NewXattrsBackend(rootPath string, o cache.Config) XattrsBackend {
return XattrsBackend{
metaCache: cache.GetFileMetadataCache(o),
}
}
// Name returns the name of the backend
func (XattrsBackend) Name() string { return "xattrs" }
// Get an extended attribute value for the given key
// No file locking is involved here as reading a single xattr is
// considered to be atomic.
func (b XattrsBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
attribs := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
if err == nil && len(attribs[key]) > 0 {
return attribs[key], err
}
return xattr.Get(path, key)
}
// GetInt64 reads a string as int64 from the xattrs
func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int64, error) {
attr, err := b.Get(ctx, filePath, key)
if err != nil {
return 0, err
}
v, err := strconv.ParseInt(string(attr), 10, 64)
if err != nil {
return 0, err
}
return v, nil
}
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (b XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
return b.list(ctx, filePath, true)
}
func (b XattrsBackend) list(ctx context.Context, filePath string, acquireLock bool) (attribs []string, err error) {
attrs, err := xattr.List(filePath)
if err == nil {
return attrs, nil
}
// listing xattrs failed, try again, either with lock or without
if acquireLock {
f, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
defer cleanupLockfile(ctx, f)
}
return xattr.List(filePath)
}
// All reads all extended attributes for a node, protected by a
// shared file lock
func (b XattrsBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return b.getAll(ctx, path, false, true)
}
func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acquireLock bool) (map[string][]byte, error) {
attribs := map[string][]byte{}
if !skipCache {
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
if err == nil {
return attribs, err
}
}
attrNames, err := b.list(ctx, path, acquireLock)
if err != nil {
return nil, err
}
if len(attrNames) == 0 {
return attribs, nil
}
var (
xerrs = 0
xerr error
)
// error handling: Count if there are errors while reading all attribs.
// if there were any, return an error.
attribs = make(map[string][]byte, len(attrNames))
for _, name := range attrNames {
var val []byte
if val, xerr = xattr.Get(path, name); xerr != nil && !IsAttrUnset(xerr) {
xerrs++
} else {
attribs[name] = val
}
}
if xerrs > 0 {
return nil, errors.Wrap(xerr, "Failed to read all xattrs")
}
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
if err != nil {
return nil, err
}
return attribs, nil
}
// Set sets one attribute for the given path
func (b XattrsBackend) Set(ctx context.Context, path string, key string, val []byte) (err error) {
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
}
// SetMultiple sets a set of attribute for the given path
func (b XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) (err error) {
if acquireLock {
err := os.MkdirAll(filepath.Dir(path), 0600)
if err != nil {
return err
}
lockedFile, err := lockedfile.OpenFile(b.LockfilePath(path), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer cleanupLockfile(ctx, lockedFile)
}
// error handling: Count if there are errors while setting the attribs.
// if there were any, return an error.
var (
xerrs = 0
xerr error
)
for key, val := range attribs {
if xerr = xattr.Set(path, key, val); xerr != nil {
// log
xerrs++
}
}
if xerrs > 0 {
return errors.Wrap(xerr, "Failed to set all xattrs")
}
attribs, err = b.getAll(ctx, path, true, false)
if err != nil {
return err
}
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
}
// Remove an extended attribute key
func (b XattrsBackend) Remove(ctx context.Context, path string, key string, acquireLock bool) error {
if acquireLock {
lockedFile, err := lockedfile.OpenFile(path+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer cleanupLockfile(ctx, lockedFile)
}
err := xattr.Remove(path, key)
if err != nil {
return err
}
attribs, err := b.getAll(ctx, path, true, false)
if err != nil {
return err
}
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
}
// IsMetaFile returns whether the given path represents a meta file
func (XattrsBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".meta.lock") }
// Purge purges the data of a given path
func (b XattrsBackend) Purge(ctx context.Context, path string) error {
_, err := os.Stat(path)
if err == nil {
attribs, err := b.getAll(ctx, path, true, true)
if err != nil {
return err
}
for attr := range attribs {
if strings.HasPrefix(attr, prefixes.OcPrefix) {
err := xattr.Remove(path, attr)
if err != nil {
return err
}
}
}
}
return b.metaCache.RemoveMetadata(b.cacheKey(path))
}
// Rename moves the data for a given path to a new path
func (b XattrsBackend) Rename(oldPath, newPath string) error {
data := map[string][]byte{}
err := b.metaCache.PullFromCache(b.cacheKey(oldPath), &data)
if err == nil {
err = b.metaCache.PushToCache(b.cacheKey(newPath), data)
if err != nil {
return err
}
}
return b.metaCache.RemoveMetadata(b.cacheKey(oldPath))
}
// MetadataPath returns the path of the file holding the metadata for the given path
func (XattrsBackend) MetadataPath(path string) string { return path }
// LockfilePath returns the path of the lock file
func (XattrsBackend) LockfilePath(path string) string { return path + ".mlock" }
// Lock locks the metadata for the given path
func (b XattrsBackend) Lock(path string) (UnlockFunc, error) {
metaLockPath := b.LockfilePath(path)
mlock, err := lockedfile.OpenFile(metaLockPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
return func() error {
err := mlock.Close()
if err != nil {
return err
}
return os.Remove(metaLockPath)
}, nil
}
func cleanupLockfile(ctx context.Context, f *lockedfile.File) {
_ = f.Close()
_ = os.Remove(f.Name())
}
// AllWithLockedSource reads all extended attributes from the given reader.
// The path argument is used for storing the data in the cache
func (b XattrsBackend) AllWithLockedSource(ctx context.Context, path string, _ io.Reader) (map[string][]byte, error) {
return b.All(ctx, path)
}
func (b XattrsBackend) cacheKey(path string) string {
// rootPath is guaranteed to have no trailing slash
// the cache key shouldn't begin with a slash as some stores drop it which can cause
// confusion
return strings.TrimPrefix(path, b.rootPath+"/")
}

View File

@@ -0,0 +1,138 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"context"
"errors"
"os"
"path/filepath"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
)
func init() {
registerMigration("0001", Migration0001{})
}
type Migration0001 struct{}
// Migrate creates the spaces directory structure
func (m Migration0001) Migrate(migrator *Migrator) (Result, error) {
migrator.log.Info().Msg("Migrating spaces directory structure...")
// create spaces folder and iterate over existing nodes to populate it
nodesPath := filepath.Join(migrator.lu.InternalRoot(), "nodes")
fi, err := os.Stat(nodesPath)
if err == nil && fi.IsDir() {
f, err := os.Open(nodesPath)
if err != nil {
return stateFailed, err
}
nodes, err := f.Readdir(0)
if err != nil {
return stateFailed, err
}
for _, n := range nodes {
nodePath := filepath.Join(nodesPath, n.Name())
attr, err := migrator.lu.MetadataBackend().Get(context.Background(), nodePath, prefixes.ParentidAttr)
if err == nil && string(attr) == node.RootID {
if err := m.moveNode(migrator, n.Name(), n.Name()); err != nil {
migrator.log.Error().Err(err).
Str("space", n.Name()).
Msg("could not move space")
continue
}
m.linkSpaceNode(migrator, "personal", n.Name())
}
}
// TODO delete nodesPath if empty
}
return stateSucceeded, nil
}
// Rollback is not implemented
func (Migration0001) Rollback(_ *Migrator) (Result, error) {
return stateFailed, errors.New("rollback not implemented")
}
func (m Migration0001) moveNode(migrator *Migrator, spaceID, nodeID string) error {
dirPath := filepath.Join(migrator.lu.InternalRoot(), "nodes", nodeID)
f, err := os.Open(dirPath)
if err != nil {
return err
}
children, err := f.Readdir(0)
if err != nil {
return err
}
for _, child := range children {
old := filepath.Join(migrator.lu.InternalRoot(), "nodes", child.Name())
new := filepath.Join(migrator.lu.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(child.Name(), 4, 2))
if err := os.Rename(old, new); err != nil {
migrator.log.Error().Err(err).
Str("space", spaceID).
Str("nodes", child.Name()).
Str("oldpath", old).
Str("newpath", new).
Msg("could not rename node")
}
if child.IsDir() {
if err := m.moveNode(migrator, spaceID, child.Name()); err != nil {
return err
}
}
}
return nil
}
// linkSpace creates a new symbolic link for a space with the given type st, and node id
func (m Migration0001) linkSpaceNode(migrator *Migrator, spaceType, spaceID string) {
spaceTypesPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType, spaceID)
expectedTarget := "../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
linkTarget, err := os.Readlink(spaceTypesPath)
if errors.Is(err, os.ErrNotExist) {
err = os.Symlink(expectedTarget, spaceTypesPath)
if err != nil {
migrator.log.Error().Err(err).
Str("space_type", spaceType).
Str("space", spaceID).
Msg("could not create symlink")
}
} else {
if err != nil {
migrator.log.Error().Err(err).
Str("space_type", spaceType).
Str("space", spaceID).
Msg("could not read symlink")
}
if linkTarget != expectedTarget {
migrator.log.Warn().
Str("space_type", spaceType).
Str("space", spaceID).
Str("expected", expectedTarget).
Str("actual", linkTarget).
Msg("expected a different link target")
}
}
}

View File

@@ -0,0 +1,150 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"errors"
"io"
"os"
"path/filepath"
"github.com/opencloud-eu/reva/v2/pkg/logger"
)
func init() {
registerMigration("0002", Migration0002{})
}
type Migration0002 struct{}
// Migrate migrates spacetypes to indexes
func (m Migration0002) Migrate(migrator *Migrator) (Result, error) {
migrator.log.Info().Msg("Migrating space types indexes...")
spaceTypesPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes")
fi, err := os.Stat(spaceTypesPath)
if err == nil && fi.IsDir() {
f, err := os.Open(spaceTypesPath)
if err != nil {
return stateFailed, err
}
spaceTypes, err := f.Readdir(0)
if err != nil {
return stateFailed, err
}
for _, st := range spaceTypes {
err := m.moveSpaceType(migrator, st.Name())
if err != nil {
logger.New().Error().Err(err).
Str("space", st.Name()).
Msg("could not move space")
continue
}
}
// delete spacetypespath
d, err := os.Open(spaceTypesPath)
if err != nil {
logger.New().Error().Err(err).
Str("spacetypesdir", spaceTypesPath).
Msg("could not open spacetypesdir")
return stateFailed, nil
}
defer d.Close()
_, err = d.Readdirnames(1) // Or f.Readdir(1)
if err == io.EOF {
// directory is empty we can delete
err := os.Remove(spaceTypesPath)
if err != nil {
logger.New().Error().Err(err).
Str("spacetypesdir", d.Name()).
Msg("could not delete")
}
} else {
logger.New().Error().Err(err).
Str("spacetypesdir", d.Name()).
Msg("could not delete, not empty")
}
}
return stateSucceeded, nil
}
// Rollback is not implemented
func (Migration0002) Rollback(_ *Migrator) (Result, error) {
return stateFailed, errors.New("rollback not implemented")
}
func (m Migration0002) moveSpaceType(migrator *Migrator, spaceType string) error {
dirPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType)
f, err := os.Open(dirPath)
if err != nil {
return err
}
children, err := f.Readdir(0)
if err != nil {
return err
}
for _, child := range children {
old := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType, child.Name())
target, err := os.Readlink(old)
if err != nil {
logger.New().Error().Err(err).
Str("space", spaceType).
Str("nodes", child.Name()).
Str("oldLink", old).
Msg("could not read old symlink")
continue
}
newDir := filepath.Join(migrator.lu.InternalRoot(), "indexes", "by-type", spaceType)
if err := os.MkdirAll(newDir, 0700); err != nil {
logger.New().Error().Err(err).
Str("space", spaceType).
Str("nodes", child.Name()).
Str("targetDir", newDir).
Msg("could not read old symlink")
}
newLink := filepath.Join(newDir, child.Name())
if err := os.Symlink(filepath.Join("..", target), newLink); err != nil {
logger.New().Error().Err(err).
Str("space", spaceType).
Str("nodes", child.Name()).
Str("oldpath", old).
Str("newpath", newLink).
Msg("could not rename node")
continue
}
if err := os.Remove(old); err != nil {
logger.New().Error().Err(err).
Str("space", spaceType).
Str("nodes", child.Name()).
Str("oldLink", old).
Msg("could not remove old symlink")
continue
}
}
if err := os.Remove(dirPath); err != nil {
logger.New().Error().Err(err).
Str("space", spaceType).
Str("dir", dirPath).
Msg("could not remove spaces folder, folder probably not empty")
}
return nil
}

View File

@@ -0,0 +1,120 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"context"
"errors"
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
)
func init() {
registerMigration("0003", Migration0003{})
}
type Migration0003 struct{}
// Migrate migrates the file metadata to the current backend.
// Only the xattrs -> messagepack path is supported.
func (m Migration0003) Migrate(migrator *Migrator) (Result, error) {
bod := lookup.DetectBackendOnDisk(migrator.lu.InternalRoot())
if bod == "" {
return stateFailed, errors.New("could not detect metadata backend on disk")
}
if bod != "xattrs" || migrator.lu.MetadataBackend().Name() != "messagepack" {
return stateSucceededRunAgain, nil
}
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating to messagepack metadata backend...")
xattrs := metadata.NewXattrsBackend(migrator.lu.InternalRoot(), cache.Config{})
mpk := metadata.NewMessagePackBackend(migrator.lu.InternalRoot(), cache.Config{})
spaces, _ := filepath.Glob(filepath.Join(migrator.lu.InternalRoot(), "spaces", "*", "*"))
for _, space := range spaces {
err := filepath.WalkDir(filepath.Join(space, "nodes"), func(path string, _ fs.DirEntry, err error) error {
// Do not continue on error
if err != nil {
return err
}
if strings.HasSuffix(path, ".mpk") || strings.HasSuffix(path, ".flock") {
// None of our business
return nil
}
fi, err := os.Lstat(path)
if err != nil {
return err
}
if !fi.IsDir() && !fi.Mode().IsRegular() {
return nil
}
mpkPath := mpk.MetadataPath(path)
_, err = os.Stat(mpkPath)
if err == nil {
return nil
}
attribs, err := xattrs.All(context.Background(), path)
if err != nil {
migrator.log.Error().Err(err).Str("path", path).Msg("error converting file")
return err
}
if len(attribs) == 0 {
return nil
}
err = mpk.SetMultiple(context.Background(), path, attribs, false)
if err != nil {
migrator.log.Error().Err(err).Str("path", path).Msg("error setting attributes")
return err
}
for k := range attribs {
err = xattrs.Remove(context.Background(), path, k, false)
if err != nil {
migrator.log.Debug().Err(err).Str("path", path).Msg("error removing xattr")
}
}
return nil
})
if err != nil {
migrator.log.Error().Err(err).Msg("error migrating nodes to messagepack metadata backend")
}
}
migrator.log.Info().Msg("done.")
return stateSucceeded, nil
}
// Rollback is not implemented
func (Migration0003) Rollback(_ *Migrator) (Result, error) {
return stateFailed, errors.New("rollback not implemented")
}

View File

@@ -0,0 +1,203 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"os"
"path/filepath"
"strings"
"github.com/shamaton/msgpack/v2"
)
func init() {
registerMigration("0004", Migration0004{})
}
type Migration0004 struct{}
// Migrate migrates the directory tree based space indexes to messagepack
func (Migration0004) Migrate(migrator *Migrator) (Result, error) {
root := migrator.lu.InternalRoot()
// migrate user indexes
users, err := os.ReadDir(filepath.Join(root, "indexes", "by-user-id"))
if err != nil {
migrator.log.Warn().Err(err).Msg("error listing user indexes")
}
for _, user := range users {
if !user.IsDir() {
continue
}
id := user.Name()
indexPath := filepath.Join(root, "indexes", "by-user-id", id+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-user-id", id)
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err := migrateSpaceIndex(indexPath, dirIndexPath)
if err != nil {
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate group indexes
groups, err := os.ReadDir(filepath.Join(root, "indexes", "by-group-id"))
if err != nil {
migrator.log.Warn().Err(err).Msg("error listing group indexes")
}
for _, group := range groups {
if !group.IsDir() {
continue
}
id := group.Name()
indexPath := filepath.Join(root, "indexes", "by-group-id", id+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-group-id", id)
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err := migrateSpaceIndex(indexPath, dirIndexPath)
if err != nil {
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate project indexes
for _, spaceType := range []string{"personal", "project", "share"} {
indexPath := filepath.Join(root, "indexes", "by-type", spaceType+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-type", spaceType)
_, err := os.Stat(dirIndexPath)
if err != nil {
continue
}
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
err = migrateSpaceIndex(indexPath, dirIndexPath)
if err != nil {
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
migrator.log.Info().Msg("done.")
return stateSucceeded, nil
}
func migrateSpaceIndex(indexPath, dirIndexPath string) error {
links := map[string][]byte{}
m, err := filepath.Glob(dirIndexPath + "/*")
if err != nil {
return err
}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
links[filepath.Base(match)] = []byte(link)
}
// rewrite index as file
d, err := msgpack.Marshal(links)
if err != nil {
return err
}
err = os.WriteFile(indexPath, d, 0600)
if err != nil {
return err
}
return os.RemoveAll(dirIndexPath)
}
// Rollback migrates the directory messagepack indexes to symlinks
func (Migration0004) Rollback(m *Migrator) (Result, error) {
root := m.lu.InternalRoot()
// migrate user indexes
users, err := filepath.Glob(filepath.Join(root, "indexes", "by-user-id", "*.mpk"))
if err != nil {
m.log.Warn().Err(err).Msg("error listing user indexes")
}
for _, indexPath := range users {
dirIndexPath := strings.TrimSuffix(indexPath, ".mpk")
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
err := downSpaceIndex(indexPath, dirIndexPath)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate group indexes
groups, err := filepath.Glob(filepath.Join(root, "indexes", "by-group-id", "*.mpk"))
if err != nil {
m.log.Warn().Err(err).Msg("error listing group indexes")
}
for _, indexPath := range groups {
dirIndexPath := strings.TrimSuffix(indexPath, ".mpk")
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
err := downSpaceIndex(indexPath, dirIndexPath)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
// migrate project indexes
for _, spaceType := range []string{"personal", "project", "share"} {
indexPath := filepath.Join(root, "indexes", "by-type", spaceType+".mpk")
dirIndexPath := filepath.Join(root, "indexes", "by-type", spaceType)
_, err := os.Stat(indexPath)
if err != nil || os.IsNotExist(err) {
continue
}
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
err = downSpaceIndex(indexPath, dirIndexPath)
if err != nil {
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
}
}
return stateDown, nil
}
func downSpaceIndex(indexPath, dirIndexPath string) error {
d, err := os.ReadFile(indexPath)
if err != nil {
return err
}
links := map[string][]byte{}
err = msgpack.Unmarshal(d, &links)
if err != nil {
return err
}
err = os.MkdirAll(dirIndexPath, 0700)
if err != nil {
return err
}
for link, target := range links {
err = os.Symlink(string(target), filepath.Join(dirIndexPath, link))
if err != nil {
return err
}
}
return os.Remove(indexPath)
}

View File

@@ -0,0 +1,114 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"os"
"path/filepath"
"github.com/shamaton/msgpack/v2"
)
func init() {
registerMigration("0005", Migration0005{})
}
type Migration0005 struct{}
// Migrate fixes the messagepack space index data structure
func (Migration0005) Migrate(migrator *Migrator) (Result, error) {
root := migrator.lu.InternalRoot()
indexes, err := filepath.Glob(filepath.Join(root, "indexes", "**", "*.mpk"))
if err != nil {
return stateFailed, err
}
for _, i := range indexes {
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Fixing index format of " + i)
// Read old-format index
oldData, err := os.ReadFile(i)
if err != nil {
return stateFailed, err
}
oldIndex := map[string][]byte{}
err = msgpack.Unmarshal(oldData, &oldIndex)
if err != nil {
// likely already migrated -> skip
migrator.log.Warn().Str("root", migrator.lu.InternalRoot()).Msg("Invalid index format found in " + i)
continue
}
// Write new-format index
newIndex := map[string]string{}
for k, v := range oldIndex {
newIndex[k] = string(v)
}
newData, err := msgpack.Marshal(newIndex)
if err != nil {
return stateFailed, err
}
err = os.WriteFile(i, newData, 0600)
if err != nil {
return stateFailed, err
}
}
migrator.log.Info().Msg("done.")
return stateSucceeded, nil
}
// Rollback rolls back the migration
func (Migration0005) Rollback(migrator *Migrator) (Result, error) {
root := migrator.lu.InternalRoot()
indexes, err := filepath.Glob(filepath.Join(root, "indexes", "**", "*.mpk"))
if err != nil {
return stateFailed, err
}
for _, i := range indexes {
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Fixing index format of " + i)
oldData, err := os.ReadFile(i)
if err != nil {
return stateFailed, err
}
oldIndex := map[string]string{}
err = msgpack.Unmarshal(oldData, &oldIndex)
if err != nil {
migrator.log.Warn().Str("root", migrator.lu.InternalRoot()).Msg("Invalid index format found in " + i)
continue
}
// Write new-format index
newIndex := map[string][]byte{}
for k, v := range oldIndex {
newIndex[k] = []byte(v)
}
newData, err := msgpack.Marshal(newIndex)
if err != nil {
return stateFailed, err
}
err = os.WriteFile(i, newData, 0600)
if err != nil {
return stateFailed, err
}
}
migrator.log.Info().Msg("done.")
return stateDown, nil
}

View File

@@ -0,0 +1,217 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package migrator
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog"
)
const (
statePending = "pending"
stateFailed = "failed"
stateSucceeded = "succeeded"
stateDown = "down"
stateSucceededRunAgain = "runagain"
)
type migration interface {
Migrate(*Migrator) (Result, error)
Rollback(*Migrator) (Result, error)
}
var migrations = map[string]migration{}
type migrationStates map[string]MigrationState
func registerMigration(name string, migration migration) {
migrations[name] = migration
}
func allMigrations() []string {
ms := []string{}
for k := range migrations {
ms = append(ms, k)
}
sort.Strings(ms)
return ms
}
// MigrationState holds the state of a migration
type MigrationState struct {
State string
Message string
}
// Result represents the result of a migration run
type Result string
// Migrator runs migrations on an existing decomposedfs
type Migrator struct {
lu node.PathLookup
states migrationStates
log *zerolog.Logger
}
// New returns a new Migrator instance
func New(lu node.PathLookup, log *zerolog.Logger) Migrator {
return Migrator{
lu: lu,
log: log,
}
}
// Migrations returns the list of migrations and their states
func (m *Migrator) Migrations() (map[string]MigrationState, error) {
err := m.readStates()
if err != nil {
return nil, err
}
states := map[string]MigrationState{}
for _, migration := range allMigrations() {
if s, ok := m.states[migration]; ok {
states[migration] = s
} else {
states[migration] = MigrationState{
State: statePending,
}
}
}
return states, nil
}
// RunMigration runs or rolls back a migration
func (m *Migrator) RunMigration(id string, rollback bool) error {
if _, ok := migrations[id]; !ok {
return fmt.Errorf("invalid migration '%s'", id)
}
lock, err := lockedfile.OpenFile(filepath.Join(m.lu.InternalRoot(), ".migrations.lock"), os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
defer lock.Close()
err = m.readStates()
if err != nil {
return err
}
var res Result
if !rollback {
m.log.Info().Msg("Running migration " + id + "...")
res, err = migrations[id].Migrate(m)
} else {
m.log.Info().Msg("Rolling back migration " + id + "...")
res, err = migrations[id].Rollback(m)
}
// write back state
s := m.states[id]
s.State = string(res)
if err != nil {
m.log.Error().Err(err).Msg("migration " + id + " failed")
s.Message = err.Error()
}
m.states[id] = s
err = m.writeStates()
if err != nil {
return err
}
m.log.Info().Msg("done")
return nil
}
// RunMigrations runs all migrations in sequence. Note this sequence must not be changed or it might
// damage existing decomposed fs.
func (m *Migrator) RunMigrations() error {
lock, err := lockedfile.OpenFile(filepath.Join(m.lu.InternalRoot(), ".migrations.lock"), os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return err
}
defer lock.Close()
err = m.readStates()
if err != nil {
return err
}
for _, migration := range allMigrations() {
s := m.states[migration]
if s.State == stateSucceeded || s.State == stateDown {
continue
}
res, err := migrations[migration].Migrate(m)
s.State = string(res)
if err != nil {
m.log.Error().Err(err).Msg("migration " + migration + " failed")
s.Message = err.Error()
}
m.states[migration] = s
err = m.writeStates()
if err != nil {
return err
}
}
return nil
}
func (m *Migrator) readStates() error {
m.states = migrationStates{}
d, err := os.ReadFile(filepath.Join(m.lu.InternalRoot(), ".migrations"))
if err != nil {
if !os.IsNotExist(err) {
return err
}
}
if len(d) > 0 {
err = json.Unmarshal(d, &m.states)
if err != nil {
return err
}
}
return nil
}
func (m *Migrator) writeStates() error {
d, err := json.Marshal(m.states)
if err != nil {
m.log.Error().Err(err).Msg("could not marshal migration states")
return nil
}
return os.WriteFile(filepath.Join(m.lu.InternalRoot(), ".migrations"), d, 0600)
}

View File

@@ -0,0 +1,45 @@
package mtimesyncedcache
import "sync"
type Map[K comparable, V any] struct {
m sync.Map
}
func (m *Map[K, V]) Delete(key K) { m.m.Delete(key) }
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
v, ok := m.m.Load(key)
if !ok {
return value, ok
}
return v.(V), ok
}
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
v, loaded := m.m.LoadAndDelete(key)
if !loaded {
return value, loaded
}
return v.(V), loaded
}
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
a, loaded := m.m.LoadOrStore(key, value)
return a.(V), loaded
}
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
m.m.Range(func(key, value any) bool { return f(key.(K), value.(V)) })
}
func (m *Map[K, V]) Store(key K, value V) { m.m.Store(key, value) }
func (m *Map[K, V]) Count() int {
l := 0
m.Range(func(_ K, _ V) bool {
l++
return true
})
return l
}

View File

@@ -0,0 +1,59 @@
package mtimesyncedcache
import (
"sync"
"time"
)
type Cache[K comparable, T any] struct {
entries Map[K, *entry[T]]
}
type entry[T any] struct {
mtime time.Time
value T
mu sync.Mutex
}
func New[K comparable, T any]() Cache[K, T] {
return Cache[K, T]{
entries: Map[K, *entry[T]]{},
}
}
func (c *Cache[K, T]) Store(key K, mtime time.Time, value T) error {
c.entries.Store(key, &entry[T]{
mtime: mtime,
value: value,
})
return nil
}
func (c *Cache[K, T]) Load(key K) (T, bool) {
entry, ok := c.entries.Load(key)
if !ok {
var t T
return t, false
}
return entry.value, true
}
func (c *Cache[K, T]) LoadOrStore(key K, mtime time.Time, f func() (T, error)) (T, error) {
e, _ := c.entries.LoadOrStore(key, &entry[T]{})
e.mu.Lock()
defer e.mu.Unlock()
if mtime.After(e.mtime) {
e.mtime = mtime
v, err := f()
if err != nil {
var t T
return t, err
}
e.value = v
c.entries.Store(key, e)
}
return e.value, nil
}

View File

@@ -0,0 +1,360 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package node
import (
"context"
"encoding/json"
"io/fs"
"os"
"path/filepath"
"time"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/filelocks"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/pkg/errors"
)
// SetLock sets a lock on the node
func (n *Node) SetLock(ctx context.Context, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "SetLock")
defer span.End()
lockFilePath := n.LockFilePath()
// ensure parent path exists
if err := os.MkdirAll(filepath.Dir(lockFilePath), 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock")
}
// get file lock, so that nobody can create the lock in the meantime
fileLock, err := filelocks.AcquireWriteLock(n.InternalPath())
if err != nil {
return err
}
defer func() {
rerr := filelocks.ReleaseLock(fileLock)
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
// check if already locked
l, err := n.ReadLock(ctx, true) // we already have a write file lock, so ReadLock() would fail to acquire a read file lock -> skip it
switch err.(type) {
case errtypes.NotFound:
// file not locked, continue
case nil:
if l != nil {
return errtypes.PreconditionFailed("already locked")
}
default:
return errors.Wrap(err, "Decomposedfs: could check if file already is locked")
}
// O_EXCL to make open fail when the file already exists
f, err := os.OpenFile(lockFilePath, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not create lock file")
}
defer f.Close()
if err := json.NewEncoder(f).Encode(lock); err != nil {
return errors.Wrap(err, "Decomposedfs: could not write lock file")
}
return err
}
// ReadLock reads the lock id for a node
func (n Node) ReadLock(ctx context.Context, skipFileLock bool) (*provider.Lock, error) {
ctx, span := tracer.Start(ctx, "ReadLock")
defer span.End()
// ensure parent path exists
_, subspan := tracer.Start(ctx, "os.MkdirAll")
err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700)
subspan.End()
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: error creating parent folder for lock")
}
// the caller of ReadLock already may hold a file lock
if !skipFileLock {
_, subspan := tracer.Start(ctx, "filelocks.AcquireReadLock")
fileLock, err := filelocks.AcquireReadLock(n.InternalPath())
subspan.End()
if err != nil {
return nil, err
}
defer func() {
_, subspan := tracer.Start(ctx, "filelocks.ReleaseLock")
rerr := filelocks.ReleaseLock(fileLock)
subspan.End()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
}
_, subspan = tracer.Start(ctx, "os.Open")
f, err := os.Open(n.LockFilePath())
subspan.End()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, errtypes.NotFound("no lock found")
}
return nil, errors.Wrap(err, "Decomposedfs: could not open lock file")
}
defer f.Close()
lock := &provider.Lock{}
if err := json.NewDecoder(f).Decode(lock); err != nil {
appctx.GetLogger(ctx).Error().Err(err).Msg("Decomposedfs: could not decode lock file, ignoring")
return nil, errors.Wrap(err, "Decomposedfs: could not read lock file")
}
// lock already expired
if lock.Expiration != nil && time.Now().After(time.Unix(int64(lock.Expiration.Seconds), int64(lock.Expiration.Nanos))) {
_, subspan = tracer.Start(ctx, "os.Remove")
err = os.Remove(f.Name())
subspan.End()
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: could not remove expired lock file")
}
// we successfully deleted the expired lock
return nil, errtypes.NotFound("no lock found")
}
return lock, nil
}
// RefreshLock refreshes the node's lock
func (n *Node) RefreshLock(ctx context.Context, lock *provider.Lock, existingLockID string) error {
ctx, span := tracer.Start(ctx, "RefreshLock")
defer span.End()
// ensure parent path exists
if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock")
}
fileLock, err := filelocks.AcquireWriteLock(n.InternalPath())
if err != nil {
return err
}
defer func() {
rerr := filelocks.ReleaseLock(fileLock)
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
f, err := os.OpenFile(n.LockFilePath(), os.O_RDWR, os.ModeExclusive)
switch {
case errors.Is(err, fs.ErrNotExist):
return errtypes.PreconditionFailed("lock does not exist")
case err != nil:
return errors.Wrap(err, "Decomposedfs: could not open lock file")
}
defer f.Close()
readLock := &provider.Lock{}
if err := json.NewDecoder(f).Decode(readLock); err != nil {
return errors.Wrap(err, "Decomposedfs: could not read lock")
}
// check refresh lockID match
if existingLockID == "" && readLock.LockId != lock.LockId {
return errtypes.Aborted("mismatching lock ID")
}
// check if UnlockAndRelock sends the correct lockID
if existingLockID != "" && readLock.LockId != existingLockID {
return errtypes.Aborted("mismatching existing lock ID")
}
if ok, err := isLockModificationAllowed(ctx, readLock, lock); !ok {
return err
}
// Rewind to the beginning of the file before writing a refreshed lock
_, err = f.Seek(0, 0)
if err != nil {
return errors.Wrap(err, "could not seek to the beginning of the lock file")
}
if err := json.NewEncoder(f).Encode(lock); err != nil {
return errors.Wrap(err, "Decomposedfs: could not write lock file")
}
return err
}
// Unlock unlocks the node
func (n *Node) Unlock(ctx context.Context, lock *provider.Lock) error {
ctx, span := tracer.Start(ctx, "Unlock")
defer span.End()
// ensure parent path exists
if err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating parent folder for lock")
}
fileLock, err := filelocks.AcquireWriteLock(n.InternalPath())
if err != nil {
return err
}
defer func() {
rerr := filelocks.ReleaseLock(fileLock)
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
f, err := os.OpenFile(n.LockFilePath(), os.O_RDONLY, os.ModeExclusive)
switch {
case errors.Is(err, fs.ErrNotExist):
return errtypes.Aborted("lock does not exist")
case err != nil:
return errors.Wrap(err, "Decomposedfs: could not open lock file")
}
defer f.Close()
oldLock := &provider.Lock{}
if err := json.NewDecoder(f).Decode(oldLock); err != nil {
return errors.Wrap(err, "Decomposedfs: could not read lock")
}
// check lock
if lock == nil || (oldLock.LockId != lock.LockId) {
return errtypes.Locked(oldLock.LockId)
}
if ok, err := isLockModificationAllowed(ctx, oldLock, lock); !ok {
return err
}
if err = os.Remove(f.Name()); err != nil {
return errors.Wrap(err, "Decomposedfs: could not remove lock file")
}
return err
}
// CheckLock compares the context lock with the node lock
func (n *Node) CheckLock(ctx context.Context) error {
ctx, span := tracer.Start(ctx, "CheckLock")
defer span.End()
contextLock, _ := ctxpkg.ContextGetLockID(ctx)
diskLock, _ := n.ReadLock(ctx, false)
if diskLock != nil {
switch contextLock {
case "":
return errtypes.Locked(diskLock.LockId) // no lockid in request
case diskLock.LockId:
return nil // ok
default:
return errtypes.Aborted("mismatching lock")
}
}
if contextLock != "" {
return errtypes.Aborted("not locked") // no lock on disk. why is there a lockid in the context
}
return nil // ok
}
func readLocksIntoOpaque(ctx context.Context, n *Node, ri *provider.ResourceInfo) error {
lock, err := n.ReadLock(ctx, false)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Msg("Decomposedfs: could not read lock")
return err
}
// reencode to ensure valid json
var b []byte
if b, err = json.Marshal(lock); err != nil {
appctx.GetLogger(ctx).Error().Err(err).Msg("Decomposedfs: could not marshal locks")
}
if ri.Opaque == nil {
ri.Opaque = &types.Opaque{
Map: map[string]*types.OpaqueEntry{},
}
}
ri.Opaque.Map["lock"] = &types.OpaqueEntry{
Decoder: "json",
Value: b,
}
ri.Lock = lock
return err
}
func (n *Node) hasLocks(ctx context.Context) bool {
_, err := os.Stat(n.LockFilePath()) // FIXME better error checking
return err == nil
}
func isLockModificationAllowed(ctx context.Context, oldLock *provider.Lock, newLock *provider.Lock) (bool, error) {
if oldLock.Type == provider.LockType_LOCK_TYPE_SHARED {
return true, nil
}
appNameEquals := oldLock.AppName == newLock.AppName
if !appNameEquals {
return false, errtypes.PermissionDenied("app names of the locks are mismatching")
}
var lockUserEquals, contextUserEquals bool
if oldLock.User == nil && newLock.GetUser() == nil {
// no user lock set
lockUserEquals = true
contextUserEquals = true
} else {
lockUserEquals = utils.UserIDEqual(oldLock.User, newLock.GetUser())
if !lockUserEquals {
return false, errtypes.PermissionDenied("users of the locks are mismatching")
}
u := ctxpkg.ContextMustGetUser(ctx)
contextUserEquals = utils.UserIDEqual(oldLock.User, u.Id)
if !contextUserEquals {
return false, errtypes.PermissionDenied("lock holder and current user are mismatching")
}
}
return appNameEquals && lockUserEquals && contextUserEquals, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
//go:build !windows
// +build !windows
package node
import (
"syscall"
)
// GetAvailableSize stats the filesystem and return the available bytes
func GetAvailableSize(path string) (uint64, error) {
stat := syscall.Statfs_t{}
err := syscall.Statfs(path, &stat)
if err != nil {
return 0, err
}
// convert stat.Bavail to uint64 because it returns an int64 on freebsd
return uint64(stat.Bavail) * uint64(stat.Bsize), nil //nolint:unconvert
}

View File

@@ -0,0 +1,38 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
//go:build windows
// +build windows
package node
import "golang.org/x/sys/windows"
// GetAvailableSize stats the filesystem and return the available bytes
func GetAvailableSize(path string) (uint64, error) {
var free, total, avail uint64
pathPtr, err := windows.UTF16PtrFromString(path)
if err != nil {
return 0, err
}
err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free)
if err != nil {
return 0, err
}
return avail, nil
}

View File

@@ -0,0 +1,232 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package node
import (
"context"
"strings"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/pkg/errors"
)
// PermissionFunc should return true when the user has permission to access the node
type PermissionFunc func(*Node) bool
var (
// NoCheck doesn't check permissions, returns true always
NoCheck PermissionFunc = func(_ *Node) bool {
return true
}
)
// NoPermissions represents an empty set of permissions
func NoPermissions() *provider.ResourcePermissions {
return &provider.ResourcePermissions{}
}
// ShareFolderPermissions defines permissions for the shared jail
func ShareFolderPermissions() *provider.ResourcePermissions {
return &provider.ResourcePermissions{
// read permissions
ListContainer: true,
Stat: true,
InitiateFileDownload: true,
GetPath: true,
GetQuota: true,
ListFileVersions: true,
}
}
// OwnerPermissions defines permissions for nodes owned by the user
func OwnerPermissions() *provider.ResourcePermissions {
return &provider.ResourcePermissions{
// all permissions
AddGrant: true,
CreateContainer: true,
Delete: true,
GetPath: true,
GetQuota: true,
InitiateFileDownload: true,
InitiateFileUpload: true,
ListContainer: true,
ListFileVersions: true,
ListGrants: true,
ListRecycle: true,
Move: true,
PurgeRecycle: true,
RemoveGrant: true,
RestoreFileVersion: true,
RestoreRecycleItem: true,
Stat: true,
UpdateGrant: true,
DenyGrant: true,
}
}
// ServiceAccountPermissions defines the permissions for nodes when requested by a service account
func ServiceAccountPermissions() *provider.ResourcePermissions {
// TODO: Different permissions for different service accounts
return &provider.ResourcePermissions{
Stat: true,
ListContainer: true,
GetPath: true, // for search index
InitiateFileUpload: true, // for personal data export
InitiateFileDownload: true, // for full-text-search
RemoveGrant: true, // for share expiry
ListRecycle: true, // for purge-trash-bin command
PurgeRecycle: true, // for purge-trash-bin command
RestoreRecycleItem: true, // for cli restore command
Delete: true, // for cli restore command with replace option
CreateContainer: true, // for space provisioning
AddGrant: true, // for initial project space member assignment
}
}
// Permissions implements permission checks
type Permissions struct {
lu PathLookup
}
// NewPermissions returns a new Permissions instance
func NewPermissions(lu PathLookup) *Permissions {
return &Permissions{
lu: lu,
}
}
// AssemblePermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes
func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap *provider.ResourcePermissions, err error) {
return p.assemblePermissions(ctx, n, true)
}
// AssembleTrashPermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes
func (p *Permissions) AssembleTrashPermissions(ctx context.Context, n *Node) (ap *provider.ResourcePermissions, err error) {
return p.assemblePermissions(ctx, n, false)
}
// assemblePermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes
func (p *Permissions) assemblePermissions(ctx context.Context, n *Node, failOnTrashedSubtree bool) (ap *provider.ResourcePermissions, err error) {
u, ok := ctxpkg.ContextGetUser(ctx)
if !ok {
return NoPermissions(), nil
}
if u.GetId().GetType() == userpb.UserType_USER_TYPE_SERVICE {
return ServiceAccountPermissions(), nil
}
// are we reading a revision?
if strings.Contains(n.ID, RevisionIDDelimiter) {
// verify revision key format
kp := strings.SplitN(n.ID, RevisionIDDelimiter, 2)
if len(kp) != 2 {
return NoPermissions(), errtypes.NotFound(n.ID)
}
// use the actual node for the permission assembly
n.ID = kp[0]
}
// determine root
rn := n.SpaceRoot
cn := n
ap = &provider.ResourcePermissions{}
// for an efficient group lookup convert the list of groups to a map
// groups are just strings ... groupnames ... or group ids ??? AAARGH !!!
groupsMap := make(map[string]bool, len(u.Groups))
for i := range u.Groups {
groupsMap[u.Groups[i]] = true
}
// for all segments, starting at the leaf
for cn.ID != rn.ID {
if np, accessDenied, err := cn.ReadUserPermissions(ctx, u); err == nil {
// check if we have a denial on this node
if accessDenied {
return np, nil
}
AddPermissions(ap, np)
} else {
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", cn.SpaceID).Str("nodeid", cn.ID).Msg("error reading permissions")
// continue with next segment
}
if cn, err = cn.Parent(ctx); err != nil {
// We get an error but get a parent, but can not read it from disk (eg. it has been deleted already)
if cn != nil {
return ap, errors.Wrap(err, "Decomposedfs: error getting parent for node "+cn.ID)
}
// We do not have a parent, so we assume the next valid parent is the spaceRoot (which must always exist)
cn = n.SpaceRoot
}
if failOnTrashedSubtree && !cn.Exists {
return NoPermissions(), errtypes.NotFound(n.ID)
}
}
// for the root node
if np, accessDenied, err := cn.ReadUserPermissions(ctx, u); err == nil {
// check if we have a denial on this node
if accessDenied {
return np, nil
}
AddPermissions(ap, np)
} else {
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", cn.SpaceID).Str("nodeid", cn.ID).Msg("error reading root node permissions")
}
// check if the current user is the owner
if utils.UserIDEqual(u.Id, n.Owner()) {
return OwnerPermissions(), nil
}
appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning agregated permissions")
return ap, nil
}
// AddPermissions merges a set of permissions into another
// TODO we should use a bitfield for this ...
func AddPermissions(l *provider.ResourcePermissions, r *provider.ResourcePermissions) {
l.AddGrant = l.AddGrant || r.AddGrant
l.CreateContainer = l.CreateContainer || r.CreateContainer
l.Delete = l.Delete || r.Delete
l.GetPath = l.GetPath || r.GetPath
l.GetQuota = l.GetQuota || r.GetQuota
l.InitiateFileDownload = l.InitiateFileDownload || r.InitiateFileDownload
l.InitiateFileUpload = l.InitiateFileUpload || r.InitiateFileUpload
l.ListContainer = l.ListContainer || r.ListContainer
l.ListFileVersions = l.ListFileVersions || r.ListFileVersions
l.ListGrants = l.ListGrants || r.ListGrants
l.ListRecycle = l.ListRecycle || r.ListRecycle
l.Move = l.Move || r.Move
l.PurgeRecycle = l.PurgeRecycle || r.PurgeRecycle
l.RemoveGrant = l.RemoveGrant || r.RemoveGrant
l.RestoreFileVersion = l.RestoreFileVersion || r.RestoreFileVersion
l.RestoreRecycleItem = l.RestoreRecycleItem || r.RestoreRecycleItem
l.Stat = l.Stat || r.Stat
l.UpdateGrant = l.UpdateGrant || r.UpdateGrant
l.DenyGrant = l.DenyGrant || r.DenyGrant
}

View File

@@ -0,0 +1,217 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package node
import (
"context"
"io"
"io/fs"
"strconv"
"time"
"github.com/pkg/xattr"
)
// Attributes is a map of string keys and byte array values
type Attributes map[string][]byte
// String reads a String value
func (md Attributes) String(key string) string {
return string(md[key])
}
// SetString sets a string value
func (md Attributes) SetString(key, val string) {
md[key] = []byte(val)
}
// Int64 reads an int64 value
func (md Attributes) Int64(key string) (int64, error) {
return strconv.ParseInt(string(md[key]), 10, 64)
}
// SetInt64 sets an int64 value
func (md Attributes) SetInt64(key string, val int64) {
md[key] = []byte(strconv.FormatInt(val, 10))
}
// UInt64 reads an uint64 value
func (md Attributes) UInt64(key string) (uint64, error) {
return strconv.ParseUint(string(md[key]), 10, 64)
}
// SetInt64 sets an uint64 value
func (md Attributes) SetUInt64(key string, val uint64) {
md[key] = []byte(strconv.FormatUint(val, 10))
}
// Time reads a time value
func (md Attributes) Time(key string) (time.Time, error) {
return time.Parse(time.RFC3339Nano, string(md[key]))
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrsWithContext(ctx context.Context, attribs map[string][]byte, acquireLock bool) (err error) {
_, span := tracer.Start(ctx, "SetXattrsWithContext")
defer span.End()
if n.xattrsCache != nil {
for k, v := range attribs {
n.xattrsCache[k] = v
}
}
return n.lu.MetadataBackend().SetMultiple(ctx, n.InternalPath(), attribs, acquireLock)
}
// SetXattrs sets multiple extended attributes on the write-through cache/node
func (n *Node) SetXattrs(attribs map[string][]byte, acquireLock bool) (err error) {
return n.SetXattrsWithContext(context.Background(), attribs, acquireLock)
}
// SetXattr sets an extended attribute on the write-through cache/node
func (n *Node) SetXattr(ctx context.Context, key string, val []byte) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = val
}
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, val)
}
// SetXattrString sets a string extended attribute on the write-through cache/node
func (n *Node) SetXattrString(ctx context.Context, key, val string) (err error) {
if n.xattrsCache != nil {
n.xattrsCache[key] = []byte(val)
}
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, []byte(val))
}
// RemoveXattr removes an extended attribute from the write-through cache/node
func (n *Node) RemoveXattr(ctx context.Context, key string, acquireLock bool) error {
if n.xattrsCache != nil {
delete(n.xattrsCache, key)
}
return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key, acquireLock)
}
// XattrsWithReader returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) XattrsWithReader(ctx context.Context, r io.Reader) (Attributes, error) {
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
return Attributes{}, &xattr.Error{Op: "node.XattrsWithReader", Path: n.InternalPath(), Err: xattr.ENOATTR}
}
if n.xattrsCache != nil {
return n.xattrsCache, nil
}
var attrs Attributes
var err error
if r != nil {
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(ctx, n.InternalPath(), r)
} else {
attrs, err = n.lu.MetadataBackend().All(ctx, n.InternalPath())
}
if err != nil {
return nil, err
}
n.xattrsCache = attrs
return n.xattrsCache, nil
}
// Xattrs returns the extended attributes of the node. If the attributes have already
// been cached they are not read from disk again.
func (n *Node) Xattrs(ctx context.Context) (Attributes, error) {
return n.XattrsWithReader(ctx, nil)
}
// Xattr returns an extended attribute of the node. If the attributes have already
// been cached it is not read from disk again.
func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
path := n.InternalPath()
if path == "" {
// Do not try to read the attribute of an non-existing node
return []byte{}, fs.ErrNotExist
}
if n.ID == "" {
// Do not try to read the attribute of an empty node. The InternalPath points to the
// base nodes directory in this case.
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR}
}
if n.xattrsCache == nil {
attrs, err := n.lu.MetadataBackend().All(ctx, path)
if err != nil {
return []byte{}, err
}
n.xattrsCache = attrs
}
if val, ok := n.xattrsCache[key]; ok {
return val, nil
}
// wrap the error as xattr does
return []byte{}, &xattr.Error{Op: "node.Xattr", Path: path, Name: key, Err: xattr.ENOATTR}
}
// XattrString returns the string representation of an attribute
func (n *Node) XattrString(ctx context.Context, key string) (string, error) {
b, err := n.Xattr(ctx, key)
if err != nil {
return "", err
}
return string(b), nil
}
// XattrInt32 returns the int32 representation of an attribute
func (n *Node) XattrInt32(ctx context.Context, key string) (int32, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
typeInt, err := strconv.ParseInt(b, 10, 32)
if err != nil {
return 0, err
}
return int32(typeInt), nil
}
// XattrInt64 returns the int64 representation of an attribute
func (n *Node) XattrInt64(ctx context.Context, key string) (int64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
return strconv.ParseInt(b, 10, 64)
}
// XattrUint64 returns the uint64 representation of an attribute
func (n *Node) XattrUint64(ctx context.Context, key string) (uint64, error) {
b, err := n.XattrString(ctx, key)
if err != nil {
return 0, err
}
return strconv.ParseUint(b, 10, 64)
}

View File

@@ -0,0 +1,176 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package options
import (
"path/filepath"
"strings"
"time"
"github.com/mitchellh/mapstructure"
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
"github.com/opencloud-eu/reva/v2/pkg/sharedconf"
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
"github.com/pkg/errors"
)
// Option defines a single option function.
type Option func(o *Options)
// Options defines the available options for this package.
type Options struct {
// the gateway address
GatewayAddr string `mapstructure:"gateway_addr"`
// the metadata backend to use, currently supports `xattr` or `ini`
MetadataBackend string `mapstructure:"metadata_backend"`
// the propagator to use for this fs. currently only `sync` is fully supported, `async` is available as an experimental feature
Propagator string `mapstructure:"propagator"`
// Options specific to the async propagator
AsyncPropagatorOptions AsyncPropagatorOptions `mapstructure:"async_propagator_options"`
// ocis fs works on top of a dir of uuid nodes
Root string `mapstructure:"root"`
// the upload directory where uploads in progress are stored
UploadDirectory string `mapstructure:"upload_directory"`
// UserLayout describes the relative path from the storage's root node to the users home node.
UserLayout string `mapstructure:"user_layout"`
// ProjectLayout describes the relative path from the storage's root node to the project spaces root directory.
ProjectLayout string `mapstructure:"project_layout"`
// propagate mtime changes as tmtime (tree modification time) to the parent directory when user.ocis.propagation=1 is set on a node
TreeTimeAccounting bool `mapstructure:"treetime_accounting"`
// propagate size changes as treesize
TreeSizeAccounting bool `mapstructure:"treesize_accounting"`
// permissions service to use when checking permissions
PermissionsSVC string `mapstructure:"permissionssvc"`
PermissionsClientTLSMode string `mapstructure:"permissionssvc_tls_mode"`
PermTLSMode pool.TLSMode
PersonalSpaceAliasTemplate string `mapstructure:"personalspacealias_template"`
PersonalSpacePathTemplate string `mapstructure:"personalspacepath_template"`
GeneralSpaceAliasTemplate string `mapstructure:"generalspacealias_template"`
GeneralSpacePathTemplate string `mapstructure:"generalspacepath_template"`
AsyncFileUploads bool `mapstructure:"asyncfileuploads"`
Events EventOptions `mapstructure:"events"`
Tokens TokenOptions `mapstructure:"tokens"`
StatCache cache.Config `mapstructure:"statcache"`
FileMetadataCache cache.Config `mapstructure:"filemetadatacache"`
IDCache cache.Config `mapstructure:"idcache"`
MaxAcquireLockCycles int `mapstructure:"max_acquire_lock_cycles"`
LockCycleDurationFactor int `mapstructure:"lock_cycle_duration_factor"`
MaxConcurrency int `mapstructure:"max_concurrency"`
MaxQuota uint64 `mapstructure:"max_quota"`
DisableVersioning bool `mapstructure:"disable_versioning"`
MountID string `mapstructure:"mount_id"`
}
// AsyncPropagatorOptions holds the configuration for the async propagator
type AsyncPropagatorOptions struct {
PropagationDelay time.Duration `mapstructure:"propagation_delay"`
}
// EventOptions are the configurable options for events
type EventOptions struct {
NumConsumers int `mapstructure:"numconsumers"`
}
// TokenOptions are the configurable option for tokens
type TokenOptions struct {
DownloadEndpoint string `mapstructure:"download_endpoint"`
DataGatewayEndpoint string `mapstructure:"datagateway_endpoint"`
TransferSharedSecret string `mapstructure:"transfer_shared_secret"`
TransferExpires int64 `mapstructure:"transfer_expires"`
}
// New returns a new Options instance for the given configuration
func New(m map[string]interface{}) (*Options, error) {
o := &Options{}
if err := mapstructure.Decode(m, o); err != nil {
err = errors.Wrap(err, "error decoding conf")
return nil, err
}
o.GatewayAddr = sharedconf.GetGatewaySVC(o.GatewayAddr)
if o.MetadataBackend == "" {
o.MetadataBackend = "xattrs"
}
// ensure user layout has no starting or trailing /
o.UserLayout = strings.Trim(o.UserLayout, "/")
// c.DataDirectory should never end in / unless it is the root
o.Root = filepath.Clean(o.Root)
if o.PersonalSpaceAliasTemplate == "" {
o.PersonalSpaceAliasTemplate = "{{.SpaceType}}/{{.User.Username}}"
}
if o.GeneralSpaceAliasTemplate == "" {
o.GeneralSpaceAliasTemplate = "{{.SpaceType}}/{{.SpaceName | replace \" \" \"-\" | lower}}"
}
if o.PermissionsClientTLSMode != "" {
var err error
o.PermTLSMode, err = pool.StringToTLSMode(o.PermissionsClientTLSMode)
if err != nil {
return nil, err
}
} else {
sharedOpt := sharedconf.GRPCClientOptions()
var err error
if o.PermTLSMode, err = pool.StringToTLSMode(sharedOpt.TLSMode); err != nil {
return nil, err
}
}
if o.MaxConcurrency <= 0 {
o.MaxConcurrency = 5
}
if o.Propagator == "" {
o.Propagator = "sync"
}
if o.AsyncPropagatorOptions.PropagationDelay == 0 {
o.AsyncPropagatorOptions.PropagationDelay = 5 * time.Second
}
if o.UploadDirectory == "" {
o.UploadDirectory = filepath.Join(o.Root, "uploads")
}
return o, nil
}

View File

@@ -0,0 +1,169 @@
package permissions
import (
"context"
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1"
v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
)
var (
tracer trace.Tracer
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/permissions")
}
const (
_spaceTypePersonal = "personal"
_spaceTypeProject = "project"
)
// PermissionsChecker defines an interface for checking permissions on a Node
type PermissionsChecker interface {
AssemblePermissions(ctx context.Context, n *node.Node) (ap *provider.ResourcePermissions, err error)
AssembleTrashPermissions(ctx context.Context, n *node.Node) (ap *provider.ResourcePermissions, err error)
}
// CS3PermissionsClient defines an interface for checking permissions against the CS3 permissions service
type CS3PermissionsClient interface {
CheckPermission(ctx context.Context, in *cs3permissions.CheckPermissionRequest, opts ...grpc.CallOption) (*cs3permissions.CheckPermissionResponse, error)
}
// Permissions manages permissions
type Permissions struct {
item PermissionsChecker // handles item permissions
permissionsSelector pool.Selectable[cs3permissions.PermissionsAPIClient] // handlers space permissions
}
// NewPermissions returns a new Permissions instance
func NewPermissions(item PermissionsChecker, permissionsSelector pool.Selectable[cs3permissions.PermissionsAPIClient]) Permissions {
return Permissions{item: item, permissionsSelector: permissionsSelector}
}
// AssemblePermissions is used to assemble file permissions
func (p Permissions) AssemblePermissions(ctx context.Context, n *node.Node) (*provider.ResourcePermissions, error) {
ctx, span := tracer.Start(ctx, "AssemblePermissions")
defer span.End()
return p.item.AssemblePermissions(ctx, n)
}
// AssembleTrashPermissions is used to assemble file permissions
func (p Permissions) AssembleTrashPermissions(ctx context.Context, n *node.Node) (*provider.ResourcePermissions, error) {
_, span := tracer.Start(ctx, "AssembleTrashPermissions")
defer span.End()
return p.item.AssembleTrashPermissions(ctx, n)
}
// CreateSpace returns true when the user is allowed to create the space
func (p Permissions) CreateSpace(ctx context.Context, spaceid string) bool {
return p.checkPermission(ctx, "Drives.Create", spaceRef(spaceid))
}
// SetSpaceQuota returns true when the user is allowed to change the spaces quota
func (p Permissions) SetSpaceQuota(ctx context.Context, spaceid string, spaceType string) bool {
switch spaceType {
default:
return false // only quotas of personal and project space may be changed
case _spaceTypePersonal:
return p.checkPermission(ctx, "Drives.ReadWritePersonalQuota", spaceRef(spaceid))
case _spaceTypeProject:
return p.checkPermission(ctx, "Drives.ReadWriteProjectQuota", spaceRef(spaceid))
}
}
// ManageSpaceProperties returns true when the user is allowed to change space properties (name/subtitle)
func (p Permissions) ManageSpaceProperties(ctx context.Context, spaceid string) bool {
return p.checkPermission(ctx, "Drives.ReadWrite", spaceRef(spaceid))
}
// SpaceAbility returns true when the user is allowed to enable/disable the space
func (p Permissions) SpaceAbility(ctx context.Context, spaceid string) bool {
return p.checkPermission(ctx, "Drives.ReadWriteEnabled", spaceRef(spaceid))
}
// ListAllSpaces returns true when the user is allowed to list all spaces
func (p Permissions) ListAllSpaces(ctx context.Context) bool {
return p.checkPermission(ctx, "Drives.List", nil)
}
// ListSpacesOfUser returns true when the user is allowed to list the spaces of the given user
func (p Permissions) ListSpacesOfUser(ctx context.Context, userid *userv1beta1.UserId) bool {
switch {
case userid == nil:
// there is no filter
return true // TODO: is `true` actually correct here? Shouldn't we check for ListAllSpaces too?
case utils.UserIDEqual(ctxpkg.ContextMustGetUser(ctx).GetId(), userid):
return true
default:
return p.ListAllSpaces(ctx)
}
}
// DeleteAllSpaces returns true when the user is allowed to delete all spaces
func (p Permissions) DeleteAllSpaces(ctx context.Context) bool {
return p.checkPermission(ctx, "Drives.DeleteProject", nil)
}
// DeleteAllHomeSpaces returns true when the user is allowed to delete all home spaces
func (p Permissions) DeleteAllHomeSpaces(ctx context.Context) bool {
return p.checkPermission(ctx, "Drives.DeletePersonal", nil)
}
// checkPermission is used to check a users space permissions
func (p Permissions) checkPermission(ctx context.Context, perm string, ref *provider.Reference) bool {
permissionsClient, err := p.permissionsSelector.Next()
if err != nil {
return false
}
user := ctxpkg.ContextMustGetUser(ctx)
checkRes, err := permissionsClient.CheckPermission(ctx, &cs3permissions.CheckPermissionRequest{
Permission: perm,
SubjectRef: &cs3permissions.SubjectReference{
Spec: &cs3permissions.SubjectReference_UserId{
UserId: user.Id,
},
},
Ref: ref,
})
if err != nil {
return false
}
return checkRes.Status.Code == v1beta11.Code_CODE_OK
}
// IsManager returns true if the given resource permissions evaluate the user as "manager"
func IsManager(rp *provider.ResourcePermissions) bool {
return rp.RemoveGrant
}
// IsEditor returns true if the given resource permissions evaluate the user as "editor"
func IsEditor(rp *provider.ResourcePermissions) bool {
return rp.InitiateFileUpload
}
// IsViewer returns true if the given resource permissions evaluate the user as "viewer"
func IsViewer(rp *provider.ResourcePermissions) bool {
return rp.Stat
}
func spaceRef(spaceid string) *provider.Reference {
return &provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: spaceid,
// OpaqueId is the same, no need to transfer it
},
}
}

View File

@@ -0,0 +1,460 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
iofs "io/fs"
"os"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
)
type DecomposedfsTrashbin struct {
fs *Decomposedfs
}
// Setup the trashbin
func (tb *DecomposedfsTrashbin) Setup(fs storage.FS) error {
if _, ok := fs.(*Decomposedfs); !ok {
return errors.New("invalid filesystem")
}
tb.fs = fs.(*Decomposedfs)
return nil
}
// Recycle items are stored inside the node folder and start with the uuid of the deleted node.
// The `.T.` indicates it is a trash item and what follows is the timestamp of the deletion.
// The deleted file is kept in the same location/dir as the original node. This prevents deletes
// from triggering cross storage moves when the trash is accidentally stored on another partition,
// because the admin mounted a different partition there.
// For an efficient listing of deleted nodes the ocis storage driver maintains a 'trash' folder
// with symlinks to trash files for every storagespace.
// ListRecycle returns the list of available recycle items
// ref -> the space (= resourceid), key -> deleted node id, relativePath = relative to key
func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) {
_, span := tracer.Start(ctx, "ListRecycle")
defer span.End()
if ref == nil || ref.ResourceId == nil || ref.ResourceId.OpaqueId == "" {
return nil, errtypes.BadRequest("spaceid required")
}
if key == "" && relativePath != "" {
return nil, errtypes.BadRequest("key is required when navigating with a path")
}
spaceID := ref.ResourceId.OpaqueId
sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("key", key).Str("relative_path", relativePath).Logger()
// check permissions
trashnode, err := tb.fs.lu.NodeFromSpaceID(ctx, spaceID)
if err != nil {
return nil, err
}
rp, err := tb.fs.p.AssembleTrashPermissions(ctx, trashnode)
switch {
case err != nil:
return nil, err
case !rp.ListRecycle:
if rp.Stat {
return nil, errtypes.PermissionDenied(key)
}
return nil, errtypes.NotFound(key)
}
if key == "" && relativePath == "" {
return tb.listTrashRoot(ctx, spaceID)
}
// build a list of trash items relative to the given trash root and path
items := make([]*provider.RecycleItem, 0)
trashRootPath := filepath.Join(tb.getRecycleRoot(spaceID), lookup.Pathify(key, 4, 2))
originalPath, _, timeSuffix, err := readTrashLink(trashRootPath)
if err != nil {
sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link")
return nil, err
}
origin := ""
attrs, err := tb.fs.lu.MetadataBackend().All(ctx, originalPath)
if err != nil {
return items, err
}
// lookup origin path in extended attributes
if attrBytes, ok := attrs[prefixes.TrashOriginAttr]; ok {
origin = string(attrBytes)
} else {
sublog.Error().Err(err).Str("spaceid", spaceID).Msg("could not read origin path, skipping")
return nil, err
}
// all deleted items have the same deletion time
var deletionTime *types.Timestamp
if parsed, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil {
deletionTime = &types.Timestamp{
Seconds: uint64(parsed.Unix()),
// TODO nanos
}
} else {
sublog.Error().Err(err).Msg("could not parse time format, ignoring")
}
var size int64
if relativePath == "" {
// this is the case when we want to directly list a file in the trashbin
nodeType := tb.fs.lu.TypeFromPath(ctx, originalPath)
switch nodeType {
case provider.ResourceType_RESOURCE_TYPE_FILE:
_, size, err = tb.fs.lu.ReadBlobIDAndSizeAttr(ctx, originalPath, nil)
if err != nil {
return items, err
}
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
size, err = tb.fs.lu.MetadataBackend().GetInt64(ctx, originalPath, prefixes.TreesizeAttr)
if err != nil {
return items, err
}
}
item := &provider.RecycleItem{
Type: tb.fs.lu.TypeFromPath(ctx, originalPath),
Size: uint64(size),
Key: filepath.Join(key, relativePath),
DeletionTime: deletionTime,
Ref: &provider.Reference{
Path: filepath.Join(origin, relativePath),
},
}
items = append(items, item)
return items, err
}
// we have to read the names and stat the path to follow the symlinks
childrenPath := filepath.Join(originalPath, relativePath)
childrenDir, err := os.Open(childrenPath)
if err != nil {
return nil, err
}
names, err := childrenDir.Readdirnames(0)
if err != nil {
return nil, err
}
for _, name := range names {
resolvedChildPath, err := filepath.EvalSymlinks(filepath.Join(childrenPath, name))
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("could not resolve symlink, skipping")
continue
}
// reset size
size = 0
nodeType := tb.fs.lu.TypeFromPath(ctx, resolvedChildPath)
switch nodeType {
case provider.ResourceType_RESOURCE_TYPE_FILE:
_, size, err = tb.fs.lu.ReadBlobIDAndSizeAttr(ctx, resolvedChildPath, nil)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping")
continue
}
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
size, err = tb.fs.lu.MetadataBackend().GetInt64(ctx, resolvedChildPath, prefixes.TreesizeAttr)
if err != nil {
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
continue
}
case provider.ResourceType_RESOURCE_TYPE_INVALID:
sublog.Error().Err(err).Str("name", name).Str("resolvedChildPath", resolvedChildPath).Msg("invalid node type, skipping")
continue
}
item := &provider.RecycleItem{
Type: nodeType,
Size: uint64(size),
Key: filepath.Join(key, relativePath, name),
DeletionTime: deletionTime,
Ref: &provider.Reference{
Path: filepath.Join(origin, relativePath, name),
},
}
items = append(items, item)
}
return items, nil
}
// readTrashLink returns path, nodeID and timestamp
func readTrashLink(path string) (string, string, string, error) {
link, err := os.Readlink(path)
if err != nil {
return "", "", "", err
}
resolved, err := filepath.EvalSymlinks(path)
if err != nil {
return "", "", "", err
}
// ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z
// TODO use filepath.Separator to support windows
link = strings.ReplaceAll(link, "/", "")
// ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z
if link[0:15] != "..........nodes" || link[51:54] != node.TrashIDDelimiter {
return "", "", "", errtypes.InternalError("malformed trash link")
}
return resolved, link[15:51], link[54:], nil
}
func (tb *DecomposedfsTrashbin) listTrashRoot(ctx context.Context, spaceID string) ([]*provider.RecycleItem, error) {
log := appctx.GetLogger(ctx)
trashRoot := tb.getRecycleRoot(spaceID)
items := []*provider.RecycleItem{}
subTrees, err := filepath.Glob(trashRoot + "/*")
if err != nil {
return nil, err
}
numWorkers := tb.fs.o.MaxConcurrency
if len(subTrees) < numWorkers {
numWorkers = len(subTrees)
}
work := make(chan string, len(subTrees))
results := make(chan *provider.RecycleItem, len(subTrees))
g, ctx := errgroup.WithContext(ctx)
// Distribute work
g.Go(func() error {
defer close(work)
for _, itemPath := range subTrees {
select {
case work <- itemPath:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
// Spawn workers that'll concurrently work the queue
for i := 0; i < numWorkers; i++ {
g.Go(func() error {
for subTree := range work {
matches, err := filepath.Glob(subTree + "/*/*/*/*")
if err != nil {
return err
}
for _, itemPath := range matches {
// TODO can we encode this in the path instead of reading the link?
nodePath, nodeID, timeSuffix, err := readTrashLink(itemPath)
if err != nil {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Msg("error reading trash link, skipping")
continue
}
md, err := os.Stat(nodePath)
if err != nil {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not stat trash item, skipping")
continue
}
attrs, err := tb.fs.lu.MetadataBackend().All(ctx, nodePath)
if err != nil {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get extended attributes, skipping")
continue
}
nodeType := tb.fs.lu.TypeFromPath(ctx, nodePath)
if nodeType == provider.ResourceType_RESOURCE_TYPE_INVALID {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("invalid node type, skipping")
continue
}
item := &provider.RecycleItem{
Type: nodeType,
Size: uint64(md.Size()),
Key: nodeID,
}
if deletionTime, err := time.Parse(time.RFC3339Nano, timeSuffix); err == nil {
item.DeletionTime = &types.Timestamp{
Seconds: uint64(deletionTime.Unix()),
// TODO nanos
}
} else {
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("spaceid", spaceID).Str("nodeid", nodeID).Str("dtime", timeSuffix).Msg("could not parse time format, ignoring")
}
// lookup origin path in extended attributes
if attr, ok := attrs[prefixes.TrashOriginAttr]; ok {
item.Ref = &provider.Reference{Path: string(attr)}
} else {
log.Error().Str("trashRoot", trashRoot).Str("item", itemPath).Str("spaceid", spaceID).Str("nodeid", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path")
}
select {
case results <- item:
case <-ctx.Done():
return ctx.Err()
}
}
}
return nil
})
}
// Wait for things to settle down, then close results chan
go func() {
_ = g.Wait() // error is checked later
close(results)
}()
// Collect results
for ri := range results {
items = append(items, ri)
}
return items, nil
}
// RestoreRecycleItem restores the specified item
func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error {
_, span := tracer.Start(ctx, "RestoreRecycleItem")
defer span.End()
if ref == nil {
return errtypes.BadRequest("missing reference, needs a space id")
}
var targetNode *node.Node
if restoreRef != nil {
tn, err := tb.fs.lu.NodeFromResource(ctx, restoreRef)
if err != nil {
return err
}
targetNode = tn
}
rn, parent, restoreFunc, err := tb.fs.tp.RestoreRecycleItemFunc(ctx, ref.ResourceId.SpaceId, key, relativePath, targetNode)
if err != nil {
return err
}
// check permissions of deleted node
rp, err := tb.fs.p.AssembleTrashPermissions(ctx, rn)
switch {
case err != nil:
return err
case !rp.RestoreRecycleItem:
if rp.Stat {
return errtypes.PermissionDenied(key)
}
return errtypes.NotFound(key)
}
// Set space owner in context
storagespace.ContextSendSpaceOwnerID(ctx, rn.SpaceOwnerOrManager(ctx))
// check we can write to the parent of the restore reference
pp, err := tb.fs.p.AssemblePermissions(ctx, parent)
switch {
case err != nil:
return err
case !pp.InitiateFileUpload:
// share receiver cannot restore to a shared resource to which she does not have write permissions.
if rp.Stat {
return errtypes.PermissionDenied(key)
}
return errtypes.NotFound(key)
}
// Run the restore func
return restoreFunc()
}
// PurgeRecycleItem purges the specified item, all its children and all their revisions
func (tb *DecomposedfsTrashbin) PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error {
_, span := tracer.Start(ctx, "PurgeRecycleItem")
defer span.End()
if ref == nil {
return errtypes.BadRequest("missing reference, needs a space id")
}
rn, purgeFunc, err := tb.fs.tp.PurgeRecycleItemFunc(ctx, ref.ResourceId.OpaqueId, key, relativePath)
if err != nil {
if errors.Is(err, iofs.ErrNotExist) {
return errtypes.NotFound(key)
}
return err
}
// check permissions of deleted node
rp, err := tb.fs.p.AssembleTrashPermissions(ctx, rn)
switch {
case err != nil:
return err
case !rp.PurgeRecycle:
if rp.Stat {
return errtypes.PermissionDenied(key)
}
return errtypes.NotFound(key)
}
// Run the purge func
return purgeFunc()
}
// EmptyRecycle empties the trash
func (tb *DecomposedfsTrashbin) EmptyRecycle(ctx context.Context, ref *provider.Reference) error {
_, span := tracer.Start(ctx, "EmptyRecycle")
defer span.End()
if ref == nil || ref.ResourceId == nil || ref.ResourceId.OpaqueId == "" {
return errtypes.BadRequest("spaceid must be set")
}
items, err := tb.ListRecycle(ctx, ref, "", "")
if err != nil {
return err
}
for _, i := range items {
if err := tb.PurgeRecycleItem(ctx, ref, i.Key, ""); err != nil {
return err
}
}
// TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node?
// The current impl will wipe your own trash. or when no user provided the trash of 'root'
return os.RemoveAll(tb.getRecycleRoot(ref.ResourceId.SpaceId))
}
func (tb *DecomposedfsTrashbin) getRecycleRoot(spaceID string) string {
return filepath.Join(tb.fs.getSpaceRoot(spaceID), "trash")
}

View File

@@ -0,0 +1,391 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"time"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
// Revision entries are stored inside the node folder and start with the same uuid as the current version.
// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions
// can be kept in the same location as the current file content. This prevents new fileuploads
// to trigger cross storage moves when revisions accidentally are stored on another partition,
// because the admin mounted a different partition there.
// We can add a background process to move old revisions to a slower storage
// and replace the revision file with a symbolic link in the future, if necessary.
// ListRevisions lists the revisions of the given resource
func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) {
_, span := tracer.Start(ctx, "ListRevisions")
defer span.End()
var n *node.Node
if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil {
return
}
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return
}
rp, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return nil, err
case !rp.ListFileVersions:
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return nil, errtypes.PermissionDenied(f)
}
return nil, errtypes.NotFound(f)
}
revisions = []*provider.FileVersion{}
np := n.InternalPath()
if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil {
for i := range items {
if fs.lu.MetadataBackend().IsMetaFile(items[i]) || strings.HasSuffix(items[i], ".mlock") {
continue
}
if fi, err := os.Stat(items[i]); err == nil {
parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2)
if len(parts) != 2 {
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("invalid revision name, skipping")
continue
}
mtime := fi.ModTime()
rev := &provider.FileVersion{
Key: n.ID + node.RevisionIDDelimiter + parts[1],
Mtime: uint64(mtime.Unix()),
}
_, blobSize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, items[i], nil)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
}
rev.Size = uint64(blobSize)
etag, err := node.CalculateEtag(n.ID, mtime)
if err != nil {
return nil, errors.Wrapf(err, "error calculating etag")
}
rev.Etag = etag
revisions = append(revisions, rev)
}
}
}
// maybe we need to sort the list by key
/*
sort.Slice(revisions, func(i, j int) bool {
return revisions[i].Key > revisions[j].Key
})
*/
return
}
// DownloadRevision returns a reader for the specified revision
// FIXME the CS3 api should explicitly allow initiating revision and trash download, a related issue is https://github.com/cs3org/reva/issues/1813
func (fs *Decomposedfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string, openReaderFunc func(md *provider.ResourceInfo) bool) (*provider.ResourceInfo, io.ReadCloser, error) {
_, span := tracer.Start(ctx, "DownloadRevision")
defer span.End()
log := appctx.GetLogger(ctx)
// verify revision key format
kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2)
if len(kp) != 2 {
log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey")
return nil, nil, errtypes.NotFound(revisionKey)
}
log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision")
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
if err != nil {
return nil, nil, err
}
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return nil, nil, err
}
rp, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return nil, nil, err
case !rp.ListFileVersions || !rp.InitiateFileDownload: // TODO add explicit permission in the CS3 api?
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return nil, nil, errtypes.PermissionDenied(f)
}
return nil, nil, errtypes.NotFound(f)
}
contentPath := fs.lu.InternalPath(spaceID, revisionKey)
blobid, blobsize, err := fs.lu.ReadBlobIDAndSizeAttr(ctx, contentPath, nil)
if err != nil {
return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID)
}
revisionNode := node.Node{SpaceID: spaceID, BlobID: blobid, Blobsize: blobsize} // blobsize is needed for the s3ng blobstore
ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true)
if err != nil {
return nil, nil, err
}
// update resource info with revision data
mtime, err := time.Parse(time.RFC3339Nano, kp[1])
if err != nil {
return nil, nil, errors.Wrapf(err, "Decomposedfs: could not parse mtime for revision '%s' of node '%s'", kp[1], n.ID)
}
ri.Size = uint64(blobsize)
ri.Mtime = utils.TimeToTS(mtime)
ri.Etag, err = node.CalculateEtag(n.ID, mtime)
if err != nil {
return nil, nil, errors.Wrapf(err, "error calculating etag for revision '%s' of node '%s'", kp[1], n.ID)
}
var reader io.ReadCloser
if openReaderFunc(ri) {
reader, err = fs.tp.ReadBlob(&revisionNode)
if err != nil {
return nil, nil, errors.Wrapf(err, "Decomposedfs: could not download blob of revision '%s' for node '%s'", n.ID, revisionKey)
}
}
return ri, reader, nil
}
// RestoreRevision restores the specified revision of the resource
func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (returnErr error) {
_, span := tracer.Start(ctx, "RestoreRevision")
defer span.End()
log := appctx.GetLogger(ctx)
// verify revision key format
kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2)
if len(kp) != 2 {
log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey")
return errtypes.NotFound(revisionKey)
}
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
if err != nil {
return err
}
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return err
}
rp, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return err
case !rp.RestoreFileVersion:
f, _ := storagespace.FormatReference(ref)
if rp.Stat {
return errtypes.PermissionDenied(f)
}
return errtypes.NotFound(f)
}
// Set space owner in context
storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
// check lock
if err := n.CheckLock(ctx); err != nil {
return err
}
// write lock node before copying metadata
f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer func() {
_ = f.Close()
_ = os.Remove(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()))
}()
// move current version to new revision
nodePath := fs.lu.InternalPath(spaceID, kp[0])
mtime, err := n.GetMTime(ctx)
if err != nil {
log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("cannot read mtime")
return err
}
// revisions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries
newRevisionPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+mtime.UTC().Format(time.RFC3339Nano))
// touch new revision
if _, err := os.Create(newRevisionPath); err != nil {
return err
}
defer func() {
if returnErr != nil {
if err := os.Remove(newRevisionPath); err != nil {
log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node")
}
if err := fs.lu.MetadataBackend().Purge(ctx, newRevisionPath); err != nil {
log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node")
}
}
}()
// copy blob metadata from node to new revision node
err = fs.lu.CopyMetadataWithSourceLock(ctx, nodePath, newRevisionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr ||
attributeName == prefixes.MTimeAttr // FIXME somewhere I mix up the revision time and the mtime, causing the restore to overwrite the other existing revisien
}, f, true)
if err != nil {
return errtypes.InternalError("failed to copy blob xattrs to version node: " + err.Error())
}
// remember mtime from node as new revision mtime
if err = os.Chtimes(newRevisionPath, mtime, mtime); err != nil {
return errtypes.InternalError("failed to change mtime of version node")
}
// update blob id in node
// copy blob metadata from restored revision to node
restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey)
err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr
}, false)
if err != nil {
return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error())
}
// always set the node mtime to the current time
err = fs.lu.MetadataBackend().SetMultiple(ctx, nodePath,
map[string][]byte{
prefixes.MTimeAttr: []byte(time.Now().UTC().Format(time.RFC3339Nano)),
},
false)
if err != nil {
return errtypes.InternalError("failed to set mtime attribute on node: " + err.Error())
}
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr)
if err != nil {
return errtypes.InternalError("failed to read blob size xattr from old revision")
}
// drop old revision
if err := os.Remove(restoredRevisionPath); err != nil {
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision, continuing")
}
if err := os.Remove(fs.lu.MetadataBackend().MetadataPath(restoredRevisionPath)); err != nil {
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata, continuing")
}
if err := os.Remove(fs.lu.MetadataBackend().LockfilePath(restoredRevisionPath)); err != nil {
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata lockfile, continuing")
}
if err := fs.lu.MetadataBackend().Purge(ctx, restoredRevisionPath); err != nil {
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not purge old revision from cache, continuing")
}
// revision 5, current 10 (restore a smaller blob) -> 5-10 = -5
// revision 10, current 5 (restore a bigger blob) -> 10-5 = +5
sizeDiff := revisionSize - n.Blobsize
return fs.tp.Propagate(ctx, n, sizeDiff)
}
// DeleteRevision deletes the specified revision of the resource
func (fs *Decomposedfs) DeleteRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error {
_, span := tracer.Start(ctx, "DeleteRevision")
defer span.End()
n, err := fs.getRevisionNode(ctx, ref, revisionKey, func(rp *provider.ResourcePermissions) bool {
return rp.RestoreFileVersion
})
if err != nil {
return err
}
if err := os.RemoveAll(fs.lu.InternalPath(n.SpaceID, revisionKey)); err != nil {
return err
}
return fs.tp.DeleteBlob(n)
}
func (fs *Decomposedfs) getRevisionNode(ctx context.Context, ref *provider.Reference, revisionKey string, hasPermission func(*provider.ResourcePermissions) bool) (*node.Node, error) {
_, span := tracer.Start(ctx, "getRevisionNode")
defer span.End()
log := appctx.GetLogger(ctx)
// verify revision key format
kp := strings.SplitN(revisionKey, node.RevisionIDDelimiter, 2)
if len(kp) != 2 {
log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey")
return nil, errtypes.NotFound(revisionKey)
}
log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision")
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
if err != nil {
return nil, err
}
if !n.Exists {
err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name))
return nil, err
}
p, err := fs.p.AssemblePermissions(ctx, n)
switch {
case err != nil:
return nil, err
case !hasPermission(p):
return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name))
}
// Set space owner in context
storagespace.ContextSendSpaceOwnerID(ctx, n.SpaceOwnerOrManager(ctx))
return n, nil
}

View File

@@ -0,0 +1,159 @@
package spaceidindex
import (
"io"
"os"
"path/filepath"
"time"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/mtimesyncedcache"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/shamaton/msgpack/v2"
)
// Index holds space id indexes
type Index struct {
root string
name string
cache mtimesyncedcache.Cache[string, map[string]string]
}
type readWriteCloseSeekTruncater interface {
io.ReadWriteCloser
io.Seeker
Truncate(int64) error
}
// New returns a new index instance
func New(root, name string) *Index {
return &Index{
root: root,
name: name,
}
}
// Init initializes the index and makes sure it can be used
func (i *Index) Init() error {
// Make sure to work on an existing tree
return os.MkdirAll(filepath.Join(i.root, i.name), 0700)
}
// Load returns the content of an index
func (i *Index) Load(index string) (map[string]string, error) {
indexPath := filepath.Join(i.root, i.name, index+".mpk")
fi, err := os.Stat(indexPath)
if err != nil {
return nil, err
}
return i.readSpaceIndex(indexPath, i.name+":"+index, fi.ModTime())
}
// Add adds an entry to an index
// Consider calling AddAll() when trying to add multiple entries as every Add call has to lock the index
func (i *Index) Add(index, key string, value string) error {
return i.updateIndex(index, map[string]string{key: value}, []string{})
}
// AddAll adds multiple entries to the index
func (i *Index) AddAll(index string, m map[string]string) error {
return i.updateIndex(index, m, []string{})
}
// Remove removes an entry from the index
func (i *Index) Remove(index, key string) error {
return i.updateIndex(index, map[string]string{}, []string{key})
}
func (i *Index) updateIndex(index string, addLinks map[string]string, removeLinks []string) error {
indexPath := filepath.Join(i.root, i.name, index+".mpk")
var err error
// acquire writelock
var f readWriteCloseSeekTruncater
f, err = lockedfile.OpenFile(indexPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors.Wrap(err, "unable to lock index to write")
}
defer func() {
rerr := f.Close()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
// Read current state
msgBytes, err := io.ReadAll(f)
if err != nil {
return err
}
links := map[string]string{}
if len(msgBytes) > 0 {
err = msgpack.Unmarshal(msgBytes, &links)
if err != nil {
return err
}
}
// set new metadata
for key, val := range addLinks {
links[key] = val
}
for _, key := range removeLinks {
delete(links, key)
}
// Truncate file
_, err = f.Seek(0, io.SeekStart)
if err != nil {
return err
}
err = f.Truncate(0)
if err != nil {
return err
}
// Write new metadata to file
d, err := msgpack.Marshal(links)
if err != nil {
return errors.Wrap(err, "unable to marshal index")
}
_, err = f.Write(d)
if err != nil {
return errors.Wrap(err, "unable to write index")
}
return nil
}
func (i *Index) readSpaceIndex(indexPath, cacheKey string, mtime time.Time) (map[string]string, error) {
return i.cache.LoadOrStore(cacheKey, mtime, func() (map[string]string, error) {
// Acquire a read log on the index file
f, err := lockedfile.Open(indexPath)
if err != nil {
return nil, errors.Wrap(err, "unable to lock index to read")
}
defer func() {
rerr := f.Close()
// if err is non nil we do not overwrite that
if err == nil {
err = rerr
}
}()
// Read current state
msgBytes, err := io.ReadAll(f)
if err != nil {
return nil, errors.Wrap(err, "unable to read index")
}
links := map[string]string{}
if len(msgBytes) > 0 {
err = msgpack.Unmarshal(msgBytes, &links)
if err != nil {
return nil, errors.Wrap(err, "unable to parse index")
}
}
return links, nil
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,127 @@
// Copyright 2018-2024 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package timemanager
import (
"context"
"os"
"time"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
)
// Manager is responsible for managing time-related attributes of nodes in a decomposed file system.
type Manager struct {
}
// OverrideMtime overrides the modification time (mtime) attribute of a node with the given time.
func (m *Manager) OverrideMtime(ctx context.Context, _ *node.Node, attrs *node.Attributes, mtime time.Time) error {
attrs.SetString(prefixes.MTimeAttr, mtime.UTC().Format(time.RFC3339Nano))
return nil
}
// MTime retrieves the modification time (mtime) attribute of a node.
// If the attribute is not set, it falls back to the file's last modification time.
func (dtm *Manager) MTime(ctx context.Context, n *node.Node) (time.Time, error) {
b, err := n.XattrString(ctx, prefixes.MTimeAttr)
if err != nil {
fi, err := os.Lstat(n.InternalPath())
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
return time.Parse(time.RFC3339Nano, b)
}
// SetMTime sets the modification time (mtime) attribute of a node to the given time.
// If the time is nil, the attribute is removed.
func (dtm *Manager) SetMTime(ctx context.Context, n *node.Node, mtime *time.Time) error {
if mtime == nil {
return n.RemoveXattr(ctx, prefixes.MTimeAttr, true)
}
return n.SetXattrString(ctx, prefixes.MTimeAttr, mtime.UTC().Format(time.RFC3339Nano))
}
// TMTime retrieves the tree modification time (tmtime) attribute of a node.
// If the attribute is not set, it falls back to the node's modification time (mtime).
func (dtm *Manager) TMTime(ctx context.Context, n *node.Node) (time.Time, error) {
b, err := n.XattrString(ctx, prefixes.TreeMTimeAttr)
if err == nil {
return time.Parse(time.RFC3339Nano, b)
}
// no tmtime, use mtime
return dtm.MTime(ctx, n)
}
// SetTMTime sets the tree modification time (tmtime) attribute of a node to the given time.
// If the time is nil, the attribute is removed.
func (dtm *Manager) SetTMTime(ctx context.Context, n *node.Node, tmtime *time.Time) error {
if tmtime == nil {
return n.RemoveXattr(ctx, prefixes.TreeMTimeAttr, true)
}
return n.SetXattrString(ctx, prefixes.TreeMTimeAttr, tmtime.UTC().Format(time.RFC3339Nano))
}
// CTime retrieves the creation time (ctime) attribute of a node.
// Since decomposedfs does not differentiate between ctime and mtime, it falls back to the node's modification time (mtime).
func (dtm *Manager) CTime(ctx context.Context, n *node.Node) (time.Time, error) {
// decomposedfs does not differentiate between ctime and mtime
return dtm.MTime(ctx, n)
}
// SetCTime sets the creation time (ctime) attribute of a node to the given time.
// Since decomposedfs does not differentiate between ctime and mtime, it sets the modification time (mtime) instead.
func (dtm *Manager) SetCTime(ctx context.Context, n *node.Node, mtime *time.Time) error {
// decomposedfs does not differentiate between ctime and mtime
return dtm.SetMTime(ctx, n, mtime)
}
// TCTime retrieves the tree creation time (tctime) attribute of a node.
// Since decomposedfs does not differentiate between ctime and mtime, it falls back to the tree modification time (tmtime).
func (dtm *Manager) TCTime(ctx context.Context, n *node.Node) (time.Time, error) {
// decomposedfs does not differentiate between ctime and mtime
return dtm.TMTime(ctx, n)
}
// SetTCTime sets the tree creation time (tctime) attribute of a node to the given time.
// Since decomposedfs does not differentiate between ctime and mtime, it sets the tree modification time (tmtime) instead.
func (dtm *Manager) SetTCTime(ctx context.Context, n *node.Node, tmtime *time.Time) error {
// decomposedfs does not differentiate between ctime and mtime
return dtm.SetTMTime(ctx, n, tmtime)
}
// DTime retrieves the deletion time (dtime) attribute of a node.
func (dtm *Manager) DTime(ctx context.Context, n *node.Node) (tmTime time.Time, err error) {
b, err := n.XattrString(ctx, prefixes.DTimeAttr)
if err != nil {
return time.Time{}, err
}
return time.Parse(time.RFC3339Nano, b)
}
// SetDTime sets the deletion time (dtime) attribute of a node to the given time.
// If the time is nil, the attribute is removed.
func (dtm *Manager) SetDTime(ctx context.Context, n *node.Node, t *time.Time) (err error) {
if t == nil {
return n.RemoveXattr(ctx, prefixes.DTimeAttr, true)
}
return n.SetXattrString(ctx, prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano))
}

View File

@@ -0,0 +1,35 @@
// Copyright 2018-2024 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package trashbin
import (
"context"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/storage"
)
type Trashbin interface {
Setup(storage.FS) error
ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error)
RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error
PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error
EmptyRecycle(ctx context.Context, ref *provider.Reference) error
}

View File

@@ -0,0 +1,439 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package propagator
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/renameio/v2"
"github.com/google/uuid"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog"
"github.com/shamaton/msgpack/v2"
)
var _propagationGracePeriod = 3 * time.Minute
// AsyncPropagator implements asynchronous treetime & treesize propagation
type AsyncPropagator struct {
treeSizeAccounting bool
treeTimeAccounting bool
propagationDelay time.Duration
lookup node.PathLookup
log *zerolog.Logger
}
// Change represents a change to the tree
type Change struct {
SyncTime time.Time
SizeDiff int64
}
// NewAsyncPropagator returns a new AsyncPropagator instance
func NewAsyncPropagator(treeSizeAccounting, treeTimeAccounting bool, o options.AsyncPropagatorOptions, lookup node.PathLookup, log *zerolog.Logger) AsyncPropagator {
p := AsyncPropagator{
treeSizeAccounting: treeSizeAccounting,
treeTimeAccounting: treeTimeAccounting,
propagationDelay: o.PropagationDelay,
lookup: lookup,
log: log,
}
log.Info().Msg("async propagator starting up...")
// spawn a goroutine that watches for stale .processing dirs and fixes them
go func() {
if !p.treeTimeAccounting && !p.treeSizeAccounting {
// no propagation enabled
log.Debug().Msg("propagation disabled or nothing to propagate")
return
}
changesDirPath := filepath.Join(p.lookup.InternalRoot(), "changes")
doSleep := false // switch to not sleep on the first iteration
for {
if doSleep {
time.Sleep(5 * time.Minute)
}
doSleep = true
log.Debug().Msg("scanning for stale .processing dirs")
entries, err := filepath.Glob(changesDirPath + "/**/*")
if err != nil {
log.Error().Err(err).Msg("failed to list changes")
continue
}
for _, e := range entries {
changesDirPath := e
entry, err := os.Stat(changesDirPath)
if err != nil {
continue
}
// recover all dirs that seem to have been stuck
if !entry.IsDir() || time.Now().Before(entry.ModTime().Add(_propagationGracePeriod)) {
continue
}
go func() {
if !strings.HasSuffix(changesDirPath, ".processing") {
// first rename the existing node dir
err = os.Rename(changesDirPath, changesDirPath+".processing")
if err != nil {
return
}
changesDirPath += ".processing"
}
log.Debug().Str("dir", changesDirPath).Msg("propagating stale .processing dir")
parts := strings.SplitN(entry.Name(), ":", 2)
if len(parts) != 2 {
log.Error().Str("file", entry.Name()).Msg("encountered invalid .processing dir")
return
}
now := time.Now()
_ = os.Chtimes(changesDirPath, now, now)
p.propagate(context.Background(), parts[0], strings.TrimSuffix(parts[1], ".processing"), true, *log)
}()
}
}
}()
return p
}
// Propagate triggers a propagation
func (p AsyncPropagator) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) error {
ctx, span := tracer.Start(ctx, "Propagate")
defer span.End()
log := appctx.GetLogger(ctx).With().
Str("method", "async.Propagate").
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Str("parentid", n.ParentID).
Int64("sizeDiff", sizeDiff).
Logger()
if !p.treeTimeAccounting && (!p.treeSizeAccounting || sizeDiff == 0) {
// no propagation enabled
log.Debug().Msg("propagation disabled or nothing to propagate")
return nil
}
// add a change to the parent node
c := Change{
// use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly
SyncTime: time.Now().UTC(),
SizeDiff: sizeDiff,
}
go p.queuePropagation(ctx, n.SpaceID, n.ParentID, c, log)
return nil
}
func (p AsyncPropagator) queuePropagation(ctx context.Context, spaceID, nodeID string, change Change, log zerolog.Logger) {
// add a change to the parent node
changePath := p.changesPath(spaceID, nodeID, uuid.New().String()+".mpk")
data, err := msgpack.Marshal(change)
if err != nil {
log.Error().Err(err).Msg("failed to marshal Change")
return
}
_, subspan := tracer.Start(ctx, "write changes file")
ready := false
triggerPropagation := false
_ = os.MkdirAll(filepath.Dir(filepath.Dir(changePath)), 0700)
err = os.Mkdir(filepath.Dir(changePath), 0700)
triggerPropagation = err == nil || os.IsExist(err) // only the first goroutine, which succeeds to create the directory, is supposed to actually trigger the propagation
for retries := 0; retries <= 500; retries++ {
err := renameio.WriteFile(changePath, data, 0644)
if err == nil {
ready = true
break
}
log.Debug().Err(err).Msg("failed to write Change to disk (retrying)")
err = os.Mkdir(filepath.Dir(changePath), 0700)
triggerPropagation = err == nil || os.IsExist(err) // only the first goroutine, which succeeds to create the directory, is supposed to actually trigger the propagation
}
if !ready {
log.Error().Err(err).Msg("failed to write Change to disk")
return
}
subspan.End()
if !triggerPropagation {
return
}
_, subspan = tracer.Start(ctx, "delay propagation")
time.Sleep(p.propagationDelay) // wait a moment before propagating
subspan.End()
log.Debug().Msg("propagating")
// add a change to the parent node
changeDirPath := p.changesPath(spaceID, nodeID, "")
// first rename the existing node dir
err = os.Rename(changeDirPath, changeDirPath+".processing")
if err != nil {
// This can fail in 2 ways
// 1. source does not exist anymore as it has already been propagated by another goroutine
// -> ignore, as the change is already being processed
// 2. target already exists because a previous propagation is still running
// -> ignore, the previous propagation will pick the new changes up
return
}
p.propagate(ctx, spaceID, nodeID, false, log)
}
func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string, recalculateTreeSize bool, log zerolog.Logger) {
changeDirPath := p.changesPath(spaceID, nodeID, "")
processingPath := changeDirPath + ".processing"
cleanup := func() {
err := os.RemoveAll(processingPath)
if err != nil {
log.Error().Err(err).Msg("Could not remove .processing dir")
}
}
_, subspan := tracer.Start(ctx, "list changes files")
d, err := os.Open(processingPath)
if err != nil {
log.Error().Err(err).Msg("Could not open change .processing dir")
cleanup()
return
}
defer d.Close()
names, err := d.Readdirnames(0)
if err != nil {
log.Error().Err(err).Msg("Could not read dirnames")
cleanup()
return
}
subspan.End()
_, subspan = tracer.Start(ctx, "read changes files")
pc := Change{}
for _, name := range names {
if !strings.HasSuffix(name, ".mpk") {
continue
}
b, err := os.ReadFile(filepath.Join(processingPath, name))
if err != nil {
log.Error().Err(err).Msg("Could not read change")
cleanup()
return
}
c := Change{}
err = msgpack.Unmarshal(b, &c)
if err != nil {
log.Error().Err(err).Msg("Could not unmarshal change")
cleanup()
return
}
if c.SyncTime.After(pc.SyncTime) {
pc.SyncTime = c.SyncTime
}
pc.SizeDiff += c.SizeDiff
}
subspan.End()
// TODO do we need to write an aggregated parentchange file?
attrs := node.Attributes{}
var f *lockedfile.File
// lock parent before reading treesize or tree time
nodePath := filepath.Join(p.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(nodeID, 4, 2))
_, subspan = tracer.Start(ctx, "lockedfile.OpenFile")
lockFilepath := p.lookup.MetadataBackend().LockfilePath(nodePath)
f, err = lockedfile.OpenFile(lockFilepath, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
if err != nil {
log.Error().Err(err).
Str("lock filepath", lockFilepath).
Msg("Propagation failed. Could not open metadata for node with lock.")
cleanup()
return
}
// always log error if closing node fails
defer func() {
// ignore already closed error
cerr := f.Close()
if err == nil && cerr != nil && !errors.Is(cerr, os.ErrClosed) {
err = cerr // only overwrite err with en error from close if the former was nil
}
}()
_, subspan = tracer.Start(ctx, "node.ReadNode")
var n *node.Node
if n, err = node.ReadNode(ctx, p.lookup, spaceID, nodeID, false, nil, false); err != nil {
log.Error().Err(err).
Msg("Propagation failed. Could not read node.")
cleanup()
return
}
subspan.End()
if !n.Exists {
log.Debug().Str("attr", prefixes.PropagationAttr).Msg("node does not exist anymore, not propagating")
cleanup()
return
}
if !n.HasPropagation(ctx) {
log.Debug().Str("attr", prefixes.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating")
cleanup()
return
}
if p.treeTimeAccounting {
// update the parent tree time if it is older than the nodes mtime
updateSyncTime := false
var tmTime time.Time
tmTime, err = n.GetTMTime(ctx)
switch {
case err != nil:
// missing attribute, or invalid format, overwrite
log.Debug().Err(err).
Msg("could not read tmtime attribute, overwriting")
updateSyncTime = true
case tmTime.Before(pc.SyncTime):
log.Debug().
Time("tmtime", tmTime).
Time("stime", pc.SyncTime).
Msg("parent tmtime is older than node mtime, updating")
updateSyncTime = true
default:
log.Debug().
Time("tmtime", tmTime).
Time("stime", pc.SyncTime).
Dur("delta", pc.SyncTime.Sub(tmTime)).
Msg("node tmtime is younger than stime, not updating")
}
if updateSyncTime {
// update the tree time of the parent node
attrs.SetString(prefixes.TreeMTimeAttr, pc.SyncTime.UTC().Format(time.RFC3339Nano))
}
attrs.SetString(prefixes.TmpEtagAttr, "")
}
// size accounting
if p.treeSizeAccounting && pc.SizeDiff != 0 {
var newSize uint64
// read treesize
treeSize, err := n.GetTreeSize(ctx)
switch {
case recalculateTreeSize || metadata.IsAttrUnset(err):
// fallback to calculating the treesize
log.Warn().Msg("treesize attribute unset, falling back to calculating the treesize")
newSize, err = calculateTreeSize(ctx, p.lookup, n.InternalPath())
if err != nil {
log.Error().Err(err).
Msg("Error when calculating treesize of node.") // FIXME wat?
cleanup()
return
}
case err != nil:
log.Error().Err(err).
Msg("Failed to propagate treesize change. Error when reading the treesize attribute from node")
cleanup()
return
case pc.SizeDiff > 0:
newSize = treeSize + uint64(pc.SizeDiff)
case uint64(-pc.SizeDiff) > treeSize:
// The sizeDiff is larger than the current treesize. Which would result in
// a negative new treesize. Something must have gone wrong with the accounting.
// Reset the current treesize to 0.
log.Error().Uint64("treeSize", treeSize).Int64("sizeDiff", pc.SizeDiff).
Msg("Error when updating treesize of node. Updated treesize < 0. Resetting to 0")
newSize = 0
default:
newSize = treeSize - uint64(-pc.SizeDiff)
}
// update the tree size of the node
attrs.SetString(prefixes.TreesizeAttr, strconv.FormatUint(newSize, 10))
log.Debug().Uint64("newSize", newSize).Msg("updated treesize of node")
}
if err = n.SetXattrsWithContext(ctx, attrs, false); err != nil {
log.Error().Err(err).Msg("Failed to update extend attributes of node")
cleanup()
return
}
// Release node lock early, ignore already closed error
_, subspan = tracer.Start(ctx, "f.Close")
cerr := f.Close()
subspan.End()
if cerr != nil && !errors.Is(cerr, os.ErrClosed) {
log.Error().Err(cerr).Msg("Failed to close node and release lock")
}
log.Info().Msg("Propagation done. cleaning up")
cleanup()
if !n.IsSpaceRoot(ctx) {
p.queuePropagation(ctx, n.SpaceID, n.ParentID, pc, log)
}
// Check for a changes dir that might have been added meanwhile and pick it up
if _, err = os.Open(changeDirPath); err == nil {
log.Info().Msg("Found a new changes dir. starting next propagation")
time.Sleep(p.propagationDelay) // wait a moment before propagating
err = os.Rename(changeDirPath, processingPath)
if err != nil {
// This can fail in 2 ways
// 1. source does not exist anymore as it has already been propagated by another goroutine
// -> ignore, as the change is already being processed
// 2. target already exists because a previous propagation is still running
// -> ignore, the previous propagation will pick the new changes up
return
}
p.propagate(ctx, spaceID, nodeID, false, log)
}
}
func (p AsyncPropagator) changesPath(spaceID, nodeID, filename string) string {
return filepath.Join(p.lookup.InternalRoot(), "changes", spaceID[0:2], spaceID+":"+nodeID, filename)
}

View File

@@ -0,0 +1,101 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package propagator
import (
"context"
"os"
"path/filepath"
"strconv"
sprovider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/propagator")
}
type Propagator interface {
Propagate(ctx context.Context, node *node.Node, sizediff int64) error
}
func New(lookup node.PathLookup, o *options.Options, log *zerolog.Logger) Propagator {
switch o.Propagator {
case "async":
return NewAsyncPropagator(o.TreeSizeAccounting, o.TreeTimeAccounting, o.AsyncPropagatorOptions, lookup, log)
default:
return NewSyncPropagator(o.TreeSizeAccounting, o.TreeTimeAccounting, lookup)
}
}
func calculateTreeSize(ctx context.Context, lookup node.PathLookup, childrenPath string) (uint64, error) {
ctx, span := tracer.Start(ctx, "calculateTreeSize")
defer span.End()
var size uint64
f, err := os.Open(childrenPath)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childrenPath", childrenPath).Msg("could not open dir")
return 0, err
}
defer f.Close()
names, err := f.Readdirnames(0)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childrenPath", childrenPath).Msg("could not read dirnames")
return 0, err
}
for i := range names {
cPath := filepath.Join(childrenPath, names[i])
resolvedPath, err := filepath.EvalSymlinks(cPath)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not resolve child entry symlink")
continue // continue after an error
}
// raw read of the attributes for performance reasons
attribs, err := lookup.MetadataBackend().All(ctx, resolvedPath)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read attributes of child entry")
continue // continue after an error
}
sizeAttr := ""
if string(attribs[prefixes.TypeAttr]) == strconv.FormatUint(uint64(sprovider.ResourceType_RESOURCE_TYPE_FILE), 10) {
sizeAttr = string(attribs[prefixes.BlobsizeAttr])
} else {
sizeAttr = string(attribs[prefixes.TreesizeAttr])
}
csize, err := strconv.ParseInt(sizeAttr, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "invalid blobsize xattr format")
}
size += uint64(csize)
}
return size, err
}

View File

@@ -0,0 +1,209 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package propagator
import (
"context"
"errors"
"os"
"strconv"
"time"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog"
)
// SyncPropagator implements synchronous treetime & treesize propagation
type SyncPropagator struct {
treeSizeAccounting bool
treeTimeAccounting bool
lookup node.PathLookup
}
// NewSyncPropagator returns a new AsyncPropagator instance
func NewSyncPropagator(treeSizeAccounting, treeTimeAccounting bool, lookup node.PathLookup) SyncPropagator {
return SyncPropagator{
treeSizeAccounting: treeSizeAccounting,
treeTimeAccounting: treeTimeAccounting,
lookup: lookup,
}
}
// Propagate triggers a propagation
func (p SyncPropagator) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) error {
ctx, span := tracer.Start(ctx, "Propagate")
defer span.End()
sublog := appctx.GetLogger(ctx).With().
Str("method", "sync.Propagate").
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Int64("sizeDiff", sizeDiff).
Logger()
if !p.treeTimeAccounting && (!p.treeSizeAccounting || sizeDiff == 0) {
// no propagation enabled
sublog.Debug().Msg("propagation disabled or nothing to propagate")
return nil
}
// is propagation enabled for the parent node?
root := n.SpaceRoot
// use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly
sTime := time.Now().UTC()
// we loop until we reach the root
var (
err error
stop bool
)
for err == nil && !stop && n.ID != root.ID {
n, stop, err = p.propagateItem(ctx, n, sTime, sizeDiff, sublog)
}
if err != nil {
sublog.Error().Err(err).Msg("error propagating")
return err
}
return nil
}
func (p SyncPropagator) propagateItem(ctx context.Context, n *node.Node, sTime time.Time, sizeDiff int64, log zerolog.Logger) (*node.Node, bool, error) {
log.Debug().Msg("propagating")
attrs := node.Attributes{}
var f *lockedfile.File
// lock parent before reading treesize or tree time
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
parentFilename := p.lookup.MetadataBackend().LockfilePath(n.ParentPath())
f, err := lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
subspan.End()
if err != nil {
log.Error().Err(err).
Str("parent filename", parentFilename).
Msg("Propagation failed. Could not open metadata for parent with lock.")
return nil, true, err
}
// always log error if closing node fails
defer func() {
// ignore already closed error
cerr := f.Close()
if err == nil && cerr != nil && !errors.Is(cerr, os.ErrClosed) {
err = cerr // only overwrite err with en error from close if the former was nil
}
}()
if n, err = n.Parent(ctx); err != nil {
log.Error().Err(err).
Msg("Propagation failed. Could not read parent node.")
return n, true, err
}
if !n.HasPropagation(ctx) {
log.Debug().Str("attr", prefixes.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating")
// if the attribute is not set treat it as false / none / no propagation
return n, true, nil
}
log = log.With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()
if p.treeTimeAccounting {
// update the parent tree time if it is older than the nodes mtime
updateSyncTime := false
var tmTime time.Time
tmTime, err = n.GetTMTime(ctx)
switch {
case err != nil:
// missing attribute, or invalid format, overwrite
log.Debug().Err(err).
Msg("could not read tmtime attribute, overwriting")
updateSyncTime = true
case tmTime.Before(sTime):
log.Debug().
Time("tmtime", tmTime).
Time("stime", sTime).
Msg("parent tmtime is older than node mtime, updating")
updateSyncTime = true
default:
log.Debug().
Time("tmtime", tmTime).
Time("stime", sTime).
Dur("delta", sTime.Sub(tmTime)).
Msg("parent tmtime is younger than node mtime, not updating")
}
if updateSyncTime {
// update the tree time of the parent node
attrs.SetString(prefixes.TreeMTimeAttr, sTime.UTC().Format(time.RFC3339Nano))
}
attrs.SetString(prefixes.TmpEtagAttr, "")
}
// size accounting
if p.treeSizeAccounting && sizeDiff != 0 {
var newSize uint64
// read treesize
treeSize, err := n.GetTreeSize(ctx)
switch {
case metadata.IsAttrUnset(err):
// fallback to calculating the treesize
log.Warn().Msg("treesize attribute unset, falling back to calculating the treesize")
newSize, err = calculateTreeSize(ctx, p.lookup, n.InternalPath())
if err != nil {
return n, true, err
}
case err != nil:
log.Error().Err(err).
Msg("Faild to propagate treesize change. Error when reading the treesize attribute from parent")
return n, true, err
case sizeDiff > 0:
newSize = treeSize + uint64(sizeDiff)
case uint64(-sizeDiff) > treeSize:
// The sizeDiff is larger than the current treesize. Which would result in
// a negative new treesize. Something must have gone wrong with the accounting.
// Reset the current treesize to 0.
log.Error().Uint64("treeSize", treeSize).Int64("sizeDiff", sizeDiff).
Msg("Error when updating treesize of parent node. Updated treesize < 0. Reestting to 0")
newSize = 0
default:
newSize = treeSize - uint64(-sizeDiff)
}
// update the tree size of the node
attrs.SetString(prefixes.TreesizeAttr, strconv.FormatUint(newSize, 10))
log.Debug().Uint64("newSize", newSize).Msg("updated treesize of parent node")
}
if err = n.SetXattrsWithContext(ctx, attrs, false); err != nil {
log.Error().Err(err).Msg("Failed to update extend attributes of parent node")
return n, true, err
}
return n, false, nil
}

View File

@@ -0,0 +1,975 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package tree
import (
"bytes"
"context"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"regexp"
"strings"
"time"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator"
"github.com/opencloud-eu/reva/v2/pkg/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"go-micro.dev/v4/store"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree")
}
// Blobstore defines an interface for storing blobs in a blobstore
type Blobstore interface {
Upload(node *node.Node, source string) error
Download(node *node.Node) (io.ReadCloser, error)
Delete(node *node.Node) error
}
// Tree manages a hierarchical tree
type Tree struct {
lookup node.PathLookup
blobstore Blobstore
propagator propagator.Propagator
options *options.Options
idCache store.Store
}
// PermissionCheckFunc defined a function used to check resource permissions
type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool
// New returns a new instance of Tree
func New(lu node.PathLookup, bs Blobstore, o *options.Options, cache store.Store, log *zerolog.Logger) *Tree {
return &Tree{
lookup: lu,
blobstore: bs,
options: o,
idCache: cache,
propagator: propagator.New(lu, o, log),
}
}
// Setup prepares the tree structure
func (t *Tree) Setup() error {
// create data paths for internal layout
dataPaths := []string{
filepath.Join(t.options.Root, "spaces"),
// notes contain symlinks from nodes/<u-u-i-d>/uploads/<uploadid> to ../../uploads/<uploadid>
// better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir
filepath.Join(t.options.Root, "uploads"),
}
for _, v := range dataPaths {
err := os.MkdirAll(v, 0700)
if err != nil {
return err
}
}
return nil
}
// GetMD returns the metadata of a node in the tree
func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) {
_, span := tracer.Start(ctx, "GetMD")
defer span.End()
md, err := os.Stat(n.InternalPath())
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, errtypes.NotFound(n.ID)
}
return nil, errors.Wrap(err, "tree: error stating "+n.ID)
}
return md, nil
}
// TouchFile creates a new empty file
func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, mtime string) error {
_, span := tracer.Start(ctx, "TouchFile")
defer span.End()
if n.Exists {
if markprocessing {
return n.SetXattr(ctx, prefixes.StatusPrefix, []byte(node.ProcessingStatus))
}
return errtypes.AlreadyExists(n.ID)
}
if n.ID == "" {
n.ID = uuid.New().String()
}
n.SetType(provider.ResourceType_RESOURCE_TYPE_FILE)
nodePath := n.InternalPath()
if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating node")
}
_, err := os.Create(nodePath)
if err != nil {
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata(ctx)
if markprocessing {
attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus)
}
if mtime != "" {
if err := n.SetMtimeString(ctx, mtime); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set mtime")
}
} else {
now := time.Now()
if err := n.SetMtime(ctx, &now); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set mtime")
}
}
err = n.SetXattrsWithContext(ctx, attributes, true)
if err != nil {
return err
}
// link child name to parent if it is new
childNameLink := filepath.Join(n.ParentPath(), n.Name)
var link string
link, err = os.Readlink(childNameLink)
if err == nil && link != "../"+n.ID {
if err = os.Remove(childNameLink); err != nil {
return errors.Wrap(err, "Decomposedfs: could not remove symlink child entry")
}
}
if errors.Is(err, fs.ErrNotExist) || link != "../"+n.ID {
relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2))
if err = os.Symlink(relativeNodePath, childNameLink); err != nil {
return errors.Wrap(err, "Decomposedfs: could not symlink child entry")
}
}
return t.Propagate(ctx, n, 0)
}
// CreateDir creates a new directory entry in the tree
func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "CreateDir")
defer span.End()
if n.Exists {
return errtypes.AlreadyExists(n.ID) // path?
}
// create a directory node
n.SetType(provider.ResourceType_RESOURCE_TYPE_CONTAINER)
if n.ID == "" {
n.ID = uuid.New().String()
}
err = t.createDirNode(ctx, n)
if err != nil {
return
}
// make child appear in listings
relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2))
ctx, subspan := tracer.Start(ctx, "os.Symlink")
err = os.Symlink(relativeNodePath, filepath.Join(n.ParentPath(), n.Name))
subspan.End()
if err != nil {
// no better way to check unfortunately
if !strings.Contains(err.Error(), "file exists") {
return
}
// try to remove the node
ctx, subspan = tracer.Start(ctx, "os.RemoveAll")
e := os.RemoveAll(n.InternalPath())
subspan.End()
if e != nil {
appctx.GetLogger(ctx).Debug().Err(e).Msg("cannot delete node")
}
return errtypes.AlreadyExists(err.Error())
}
return t.Propagate(ctx, n, 0)
}
// Move replaces the target with the source
func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) (err error) {
_, span := tracer.Start(ctx, "Move")
defer span.End()
if oldNode.SpaceID != newNode.SpaceID {
// WebDAV RFC https://www.rfc-editor.org/rfc/rfc4918#section-9.9.4 says to use
// > 502 (Bad Gateway) - This may occur when the destination is on another
// > server and the destination server refuses to accept the resource.
// > This could also occur when the destination is on another sub-section
// > of the same server namespace.
// but we only have a not supported error
return errtypes.NotSupported("cannot move across spaces")
}
// if target exists delete it without trashing it
if newNode.Exists {
// TODO make sure all children are deleted
if err := os.RemoveAll(newNode.InternalPath()); err != nil {
return errors.Wrap(err, "Decomposedfs: Move: error deleting target node "+newNode.ID)
}
}
// remove cache entry in any case to avoid inconsistencies
defer func() { _ = t.idCache.Delete(filepath.Join(oldNode.ParentPath(), oldNode.Name)) }()
// Always target the old node ID for xattr updates.
// The new node id is empty if the target does not exist
// and we need to overwrite the new one when overwriting an existing path.
// are we just renaming (parent stays the same)?
if oldNode.ParentID == newNode.ParentID {
// parentPath := t.lookup.InternalPath(oldNode.SpaceID, oldNode.ParentID)
parentPath := oldNode.ParentPath()
// rename child
err = os.Rename(
filepath.Join(parentPath, oldNode.Name),
filepath.Join(parentPath, newNode.Name),
)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not rename child")
}
// update name attribute
if err := oldNode.SetXattrString(ctx, prefixes.NameAttr, newNode.Name); err != nil {
return errors.Wrap(err, "Decomposedfs: could not set name attribute")
}
return t.Propagate(ctx, newNode, 0)
}
// we are moving the node to a new parent, any target has been removed
// bring old node to the new parent
// rename child
err = os.Rename(
filepath.Join(oldNode.ParentPath(), oldNode.Name),
filepath.Join(newNode.ParentPath(), newNode.Name),
)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not move child")
}
// update target parentid and name
attribs := node.Attributes{}
attribs.SetString(prefixes.ParentidAttr, newNode.ParentID)
attribs.SetString(prefixes.NameAttr, newNode.Name)
if err := oldNode.SetXattrsWithContext(ctx, attribs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update old node attributes")
}
// the size diff is the current treesize or blobsize of the old/source node
var sizeDiff int64
if oldNode.IsDir(ctx) {
treeSize, err := oldNode.GetTreeSize(ctx)
if err != nil {
return err
}
sizeDiff = int64(treeSize)
} else {
sizeDiff = oldNode.Blobsize
}
// TODO inefficient because we might update several nodes twice, only propagate unchanged nodes?
// collect in a list, then only stat each node once
// also do this in a go routine ... webdav should check the etag async
err = t.Propagate(ctx, oldNode, -sizeDiff)
if err != nil {
return errors.Wrap(err, "Decomposedfs: Move: could not propagate old node")
}
err = t.Propagate(ctx, newNode, sizeDiff)
if err != nil {
return errors.Wrap(err, "Decomposedfs: Move: could not propagate new node")
}
return nil
}
func readChildNodeFromLink(ctx context.Context, path string) (string, error) {
_, span := tracer.Start(ctx, "readChildNodeFromLink")
defer span.End()
link, err := os.Readlink(path)
if err != nil {
return "", err
}
nodeID := strings.TrimLeft(link, "/.")
nodeID = strings.ReplaceAll(nodeID, "/", "")
return nodeID, nil
}
// ListFolder lists the content of a folder node
func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) {
ctx, span := tracer.Start(ctx, "ListFolder")
defer span.End()
dir := n.InternalPath()
_, subspan := tracer.Start(ctx, "os.Open")
f, err := os.Open(dir)
subspan.End()
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, errtypes.NotFound(dir)
}
return nil, errors.Wrap(err, "tree: error listing "+dir)
}
defer f.Close()
_, subspan = tracer.Start(ctx, "f.Readdirnames")
names, err := f.Readdirnames(0)
subspan.End()
if err != nil {
return nil, err
}
numWorkers := t.options.MaxConcurrency
if len(names) < numWorkers {
numWorkers = len(names)
}
work := make(chan string)
results := make(chan *node.Node)
g, ctx := errgroup.WithContext(ctx)
// Distribute work
g.Go(func() error {
defer close(work)
for _, name := range names {
select {
case work <- name:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
// Spawn workers that'll concurrently work the queue
for i := 0; i < numWorkers; i++ {
g.Go(func() error {
var err error
for name := range work {
path := filepath.Join(dir, name)
nodeID := getNodeIDFromCache(ctx, path, t.idCache)
if nodeID == "" {
nodeID, err = readChildNodeFromLink(ctx, path)
if err != nil {
return err
}
err = storeNodeIDInCache(ctx, path, nodeID, t.idCache)
if err != nil {
return err
}
}
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
if err != nil {
return err
}
// prevent listing denied resources
if !child.IsDenied(ctx) {
if child.SpaceRoot == nil {
child.SpaceRoot = n.SpaceRoot
}
select {
case results <- child:
case <-ctx.Done():
return ctx.Err()
}
}
}
return nil
})
}
// Wait for things to settle down, then close results chan
go func() {
_ = g.Wait() // error is checked later
close(results)
}()
retNodes := []*node.Node{}
for n := range results {
retNodes = append(retNodes, n)
}
if err := g.Wait(); err != nil {
return nil, err
}
return retNodes, nil
}
// Delete deletes a node in the tree by moving it to the trash
func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
_, span := tracer.Start(ctx, "Delete")
defer span.End()
path := filepath.Join(n.ParentPath(), n.Name)
// remove entry from cache immediately to avoid inconsistencies
defer func() { _ = t.idCache.Delete(path) }()
if appctx.DeletingSharedResourceFromContext(ctx) {
src := filepath.Join(n.ParentPath(), n.Name)
return os.Remove(src)
}
// get the original path
origin, err := t.lookup.Path(ctx, n, node.NoCheck)
if err != nil {
return
}
// set origin location in metadata
nodePath := n.InternalPath()
if err := n.SetXattrString(ctx, prefixes.TrashOriginAttr, origin); err != nil {
return err
}
var sizeDiff int64
if n.IsDir(ctx) {
treesize, err := n.GetTreeSize(ctx)
if err != nil {
return err // TODO calculate treesize if it is not set
}
sizeDiff = -int64(treesize)
} else {
sizeDiff = -n.Blobsize
}
deletionTime := time.Now().UTC().Format(time.RFC3339Nano)
// Prepare the trash
trashLink := filepath.Join(t.options.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2))
if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil {
// Roll back changes
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
return err
}
// FIXME can we just move the node into the trash dir? instead of adding another symlink and appending a trash timestamp?
// can we just use the mtime as the trash time?
// TODO store a trashed by userid
// first make node appear in the space trash
// parent id and name are stored as extended attributes in the node itself
err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink)
if err != nil {
// Roll back changes
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
return
}
// at this point we have a symlink pointing to a non existing destination, which is fine
// rename the trashed node so it is not picked up when traversing up the tree and matches the symlink
trashPath := nodePath + node.TrashIDDelimiter + deletionTime
err = os.Rename(nodePath, trashPath)
if err != nil {
// To roll back changes
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
return
}
err = t.lookup.MetadataBackend().Rename(nodePath, trashPath)
if err != nil {
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
_ = os.Rename(trashPath, nodePath)
return
}
// Remove lock file if it exists
_ = os.Remove(n.LockFilePath())
// finally remove the entry from the parent dir
if err = os.Remove(path); err != nil {
// To roll back changes
// TODO revert the rename
// TODO remove symlink
// Roll back changes
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
return
}
return t.Propagate(ctx, n, sizeDiff)
}
// RestoreRecycleItemFunc returns a node and a function to restore it from the trash.
func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, targetNode *node.Node) (*node.Node, *node.Node, func() error, error) {
_, span := tracer.Start(ctx, "RestoreRecycleItemFunc")
defer span.End()
logger := appctx.GetLogger(ctx)
recycleNode, trashItem, deletedNodePath, origin, err := t.readRecycleItem(ctx, spaceid, key, trashPath)
if err != nil {
return nil, nil, nil, err
}
targetRef := &provider.Reference{
ResourceId: &provider.ResourceId{SpaceId: spaceid, OpaqueId: spaceid},
Path: utils.MakeRelativePath(origin),
}
if targetNode == nil {
targetNode, err = t.lookup.NodeFromResource(ctx, targetRef)
if err != nil {
return nil, nil, nil, err
}
}
if err := targetNode.CheckLock(ctx); err != nil {
return nil, nil, nil, err
}
parent, err := targetNode.Parent(ctx)
if err != nil {
return nil, nil, nil, err
}
fn := func() error {
if targetNode.Exists {
return errtypes.AlreadyExists("origin already exists")
}
// add the entry for the parent dir
err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
if err != nil {
return err
}
// rename to node only name, so it is picked up by id
nodePath := recycleNode.InternalPath()
// attempt to rename only if we're not in a subfolder
if deletedNodePath != nodePath {
err = os.Rename(deletedNodePath, nodePath)
if err != nil {
return err
}
err = t.lookup.MetadataBackend().Rename(deletedNodePath, nodePath)
if err != nil {
return err
}
}
targetNode.Exists = true
attrs := node.Attributes{}
attrs.SetString(prefixes.NameAttr, targetNode.Name)
// set ParentidAttr to restorePath's node parent id
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
}
// delete item link in trash
deletePath := trashItem
if trashPath != "" && trashPath != "/" {
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
}
deletePath = filepath.Join(resolvedTrashRoot, trashPath)
if err = os.Remove(deletePath); err != nil {
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletePath", deletePath).Str("trashPath", trashPath).Msg("error deleting trash item")
}
} else {
if err = utils.RemoveItem(deletePath); err != nil {
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletePath", deletePath).Str("trashPath", trashPath).Msg("error recursively deleting trash item")
}
}
var sizeDiff int64
if recycleNode.IsDir(ctx) {
treeSize, err := recycleNode.GetTreeSize(ctx)
if err != nil {
return err
}
sizeDiff = int64(treeSize)
} else {
sizeDiff = recycleNode.Blobsize
}
return t.Propagate(ctx, targetNode, sizeDiff)
}
return recycleNode, parent, fn, nil
}
// PurgeRecycleItemFunc returns a node and a function to purge it from the trash
func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, path string) (*node.Node, func() error, error) {
_, span := tracer.Start(ctx, "PurgeRecycleItemFunc")
defer span.End()
logger := appctx.GetLogger(ctx)
rn, trashItem, deletedNodePath, _, err := t.readRecycleItem(ctx, spaceid, key, path)
if err != nil {
return nil, nil, err
}
ts := ""
timeSuffix := strings.SplitN(filepath.Base(deletedNodePath), node.TrashIDDelimiter, 2)
if len(timeSuffix) == 2 {
ts = timeSuffix[1]
}
fn := func() error {
if err := t.removeNode(ctx, deletedNodePath, ts, rn); err != nil {
return err
}
// delete item link in trash
deletePath := trashItem
if path != "" && path != "/" {
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
}
deletePath = filepath.Join(resolvedTrashRoot, path)
}
if err = utils.RemoveItem(deletePath); err != nil {
logger.Error().Err(err).Str("deletePath", deletePath).Msg("error deleting trash item")
return err
}
return nil
}
return rn, fn, nil
}
// InitNewNode initializes a new node
func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (metadata.UnlockFunc, error) {
_, span := tracer.Start(ctx, "InitNewNode")
defer span.End()
// create folder structure (if needed)
_, subspan := tracer.Start(ctx, "os.MkdirAll")
err := os.MkdirAll(filepath.Dir(n.InternalPath()), 0700)
subspan.End()
if err != nil {
return nil, err
}
// create and write lock new node metadata
_, subspan = tracer.Start(ctx, "metadata.Lock")
unlock, err := t.lookup.MetadataBackend().Lock(n.InternalPath())
subspan.End()
if err != nil {
return nil, err
}
// we also need to touch the actual node file here it stores the mtime of the resource
_, subspan = tracer.Start(ctx, "os.OpenFile")
h, err := os.OpenFile(n.InternalPath(), os.O_CREATE|os.O_EXCL, 0600)
subspan.End()
if err != nil {
return unlock, err
}
h.Close()
_, subspan = tracer.Start(ctx, "node.CheckQuota")
_, err = node.CheckQuota(ctx, n.SpaceRoot, false, 0, fsize)
subspan.End()
if err != nil {
return unlock, err
}
// link child name to parent if it is new
childNameLink := filepath.Join(n.ParentPath(), n.Name)
relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2))
log := appctx.GetLogger(ctx).With().Str("childNameLink", childNameLink).Str("relativeNodePath", relativeNodePath).Logger()
log.Info().Msg("initNewNode: creating symlink")
_, subspan = tracer.Start(ctx, "os.Symlink")
err = os.Symlink(relativeNodePath, childNameLink)
subspan.End()
if err != nil {
log.Info().Err(err).Msg("initNewNode: symlink failed")
if errors.Is(err, fs.ErrExist) {
log.Info().Err(err).Msg("initNewNode: symlink already exists")
return unlock, errtypes.AlreadyExists(n.Name)
}
return unlock, errors.Wrap(err, "Decomposedfs: could not symlink child entry")
}
log.Info().Msg("initNewNode: symlink created")
return unlock, nil
}
func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.Node) error {
logger := appctx.GetLogger(ctx)
if timeSuffix != "" {
n.ID = n.ID + node.TrashIDDelimiter + timeSuffix
}
if n.IsDir(ctx) {
item, err := t.ListFolder(ctx, n)
if err != nil {
logger.Error().Err(err).Str("path", path).Msg("error listing folder")
} else {
for _, child := range item {
if err := t.removeNode(ctx, child.InternalPath(), "", child); err != nil {
return err
}
}
}
}
// delete the actual node
if err := utils.RemoveItem(path); err != nil {
logger.Error().Err(err).Str("path", path).Msg("error purging node")
return err
}
if err := t.lookup.MetadataBackend().Purge(ctx, path); err != nil {
logger.Error().Err(err).Str("path", t.lookup.MetadataBackend().MetadataPath(path)).Msg("error purging node metadata")
return err
}
// delete blob from blobstore
if n.BlobID != "" {
if err := t.DeleteBlob(n); err != nil {
logger.Error().Err(err).Str("blobID", n.BlobID).Msg("error purging nodes blob")
return err
}
}
// delete revisions
revs, err := filepath.Glob(n.InternalPath() + node.RevisionIDDelimiter + "*")
if err != nil {
logger.Error().Err(err).Str("path", n.InternalPath()+node.RevisionIDDelimiter+"*").Msg("glob failed badly")
return err
}
for _, rev := range revs {
if t.lookup.MetadataBackend().IsMetaFile(rev) {
continue
}
bID, _, err := t.lookup.ReadBlobIDAndSizeAttr(ctx, rev, nil)
if err != nil {
logger.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute")
return err
}
if err := utils.RemoveItem(rev); err != nil {
logger.Error().Err(err).Str("revision", rev).Msg("error removing revision node")
return err
}
if bID != "" {
if err := t.DeleteBlob(&node.Node{SpaceID: n.SpaceID, BlobID: bID}); err != nil {
logger.Error().Err(err).Str("revision", rev).Str("blobID", bID).Msg("error removing revision node blob")
return err
}
}
}
return nil
}
// Propagate propagates changes to the root of the tree
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
return t.propagator.Propagate(ctx, n, sizeDiff)
}
// WriteBlob writes a blob to the blobstore
func (t *Tree) WriteBlob(node *node.Node, source string) error {
return t.blobstore.Upload(node, source)
}
// ReadBlob reads a blob from the blobstore
func (t *Tree) ReadBlob(node *node.Node) (io.ReadCloser, error) {
if node.BlobID == "" {
// there is no blob yet - we are dealing with a 0 byte file
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
return t.blobstore.Download(node)
}
// DeleteBlob deletes a blob from the blobstore
func (t *Tree) DeleteBlob(node *node.Node) error {
if node == nil {
return fmt.Errorf("could not delete blob, nil node was given")
}
if node.BlobID == "" {
return fmt.Errorf("could not delete blob, node with empty blob id was given")
}
return t.blobstore.Delete(node)
}
// BuildSpaceIDIndexEntry returns the entry for the space id index
func (t *Tree) BuildSpaceIDIndexEntry(spaceID, nodeID string) string {
return "../../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
}
// ResolveSpaceIDIndexEntry returns the node id for the space id index entry
func (t *Tree) ResolveSpaceIDIndexEntry(_, entry string) (string, string, error) {
return ReadSpaceAndNodeFromIndexLink(entry)
}
// ReadSpaceAndNodeFromIndexLink reads a symlink and parses space and node id if the link has the correct format, eg:
// ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51
// ../../spaces/4c/510ada-c86b-4815-8820-42cdf82c3d51/nodes/4c/51/0a/da/-c86b-4815-8820-42cdf82c3d51.T.2022-02-24T12:35:18.196484592Z
func ReadSpaceAndNodeFromIndexLink(link string) (string, string, error) {
// ../../../spaces/sp/ace-id/nodes/sh/or/tn/od/eid
// 0 1 2 3 4 5 6 7 8 9 10 11
parts := strings.Split(link, string(filepath.Separator))
if len(parts) != 12 || parts[0] != ".." || parts[1] != ".." || parts[2] != ".." || parts[3] != "spaces" || parts[6] != "nodes" {
return "", "", errtypes.InternalError("malformed link")
}
return strings.Join(parts[4:6], ""), strings.Join(parts[7:12], ""), nil
}
// TODO check if node exists?
func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
ctx, span := tracer.Start(ctx, "createDirNode")
defer span.End()
// create a directory node
nodePath := n.InternalPath()
if err := os.MkdirAll(nodePath, 0700); err != nil {
return errors.Wrap(err, "Decomposedfs: error creating node")
}
attributes := n.NodeMetadata(ctx)
attributes[prefixes.TreesizeAttr] = []byte("0") // initialize as empty, TODO why bother? if it is not set we could treat it as 0?
if t.options.TreeTimeAccounting || t.options.TreeSizeAccounting {
attributes[prefixes.PropagationAttr] = []byte("1") // mark the node for propagation
}
return n.SetXattrsWithContext(ctx, attributes, true)
}
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
// TODO refactor the returned params into Node properties? would make all the path transformations go away...
func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) {
_, span := tracer.Start(ctx, "readRecycleItem")
defer span.End()
logger := appctx.GetLogger(ctx)
if key == "" {
return nil, "", "", "", errtypes.InternalError("key is empty")
}
backend := t.lookup.MetadataBackend()
var nodeID string
trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2))
resolvedTrashItem, err := filepath.EvalSymlinks(trashItem)
if err != nil {
return
}
deletedNodePath, err = filepath.EvalSymlinks(filepath.Join(resolvedTrashItem, path))
if err != nil {
return
}
nodeID = nodeIDRegep.ReplaceAllString(deletedNodePath, "$1")
nodeID = strings.ReplaceAll(nodeID, "/", "")
recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false, nil, false)
if err != nil {
return
}
recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
var attrBytes []byte
if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
// lookup blobID in extended attributes
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
recycleNode.BlobID = string(attrBytes)
} else {
return
}
// lookup blobSize in extended attributes
if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
return
}
}
// lookup parent id in extended attributes
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
recycleNode.ParentID = string(attrBytes)
} else {
return
}
// lookup name in extended attributes
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
recycleNode.Name = string(attrBytes)
} else {
return
}
// get origin node, is relative to space root
origin = "/"
// lookup origin path in extended attributes
if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
origin = filepath.Join(string(attrBytes), path)
} else {
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
}
return
}
func getNodeIDFromCache(ctx context.Context, path string, cache store.Store) string {
_, span := tracer.Start(ctx, "getNodeIDFromCache")
defer span.End()
recs, err := cache.Read(path)
if err == nil && len(recs) > 0 {
return string(recs[0].Value)
}
return ""
}
func storeNodeIDInCache(ctx context.Context, path string, nodeID string, cache store.Store) error {
_, span := tracer.Start(ctx, "storeNodeIDInCache")
defer span.End()
return cache.Write(&store.Record{
Key: path,
Value: []byte(nodeID),
})
}

View File

@@ -0,0 +1,436 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package decomposedfs
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
"github.com/pkg/errors"
tusd "github.com/tus/tusd/v2/pkg/handler"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/metrics"
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload"
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/chunking"
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
// Upload uploads data to the given resource
// TODO Upload (and InitiateUpload) needs a way to receive the expected checksum.
// Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated?
func (fs *Decomposedfs) Upload(ctx context.Context, req storage.UploadRequest, uff storage.UploadFinishedFunc) (*provider.ResourceInfo, error) {
_, span := tracer.Start(ctx, "Upload")
defer span.End()
up, err := fs.GetUpload(ctx, req.Ref.GetPath())
if err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error retrieving upload")
}
session := up.(*upload.OcisSession)
ctx = session.Context(ctx)
if session.Chunk() != "" { // check chunking v1
p, assembledFile, err := fs.chunkHandler.WriteChunk(session.Chunk(), req.Body)
if err != nil {
return &provider.ResourceInfo{}, err
}
if p == "" {
if err = session.Terminate(ctx); err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error removing auxiliary files")
}
return &provider.ResourceInfo{}, errtypes.PartialContent(req.Ref.String())
}
fd, err := os.Open(assembledFile)
if err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error opening assembled file")
}
defer fd.Close()
defer os.RemoveAll(assembledFile)
req.Body = fd
size, err := session.WriteChunk(ctx, 0, req.Body)
if err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error writing to binary file")
}
session.SetSize(size)
} else {
size, err := session.WriteChunk(ctx, 0, req.Body)
if err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error writing to binary file")
}
if size != req.Length {
return &provider.ResourceInfo{}, errtypes.PartialContent("Decomposedfs: unexpected end of stream")
}
}
if err := session.FinishUploadDecomposed(ctx); err != nil {
return &provider.ResourceInfo{}, err
}
if uff != nil {
uploadRef := &provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: session.ProviderID(),
SpaceId: session.SpaceID(),
OpaqueId: session.SpaceID(),
},
Path: utils.MakeRelativePath(filepath.Join(session.Dir(), session.Filename())),
}
executant := session.Executant()
uff(session.SpaceOwner(), &executant, uploadRef)
}
ri := &provider.ResourceInfo{
// fill with at least fileid, mtime and etag
Id: &provider.ResourceId{
StorageId: session.ProviderID(),
SpaceId: session.SpaceID(),
OpaqueId: session.NodeID(),
},
}
// add etag to metadata
ri.Etag, _ = node.CalculateEtag(session.NodeID(), session.MTime())
if !session.MTime().IsZero() {
ri.Mtime = utils.TimeToTS(session.MTime())
}
return ri, nil
}
// InitiateUpload returns upload ids corresponding to different protocols it supports
// TODO read optional content for small files in this request
// TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated?
func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) {
_, span := tracer.Start(ctx, "InitiateUpload")
defer span.End()
log := appctx.GetLogger(ctx)
// remember the path from the reference
refpath := ref.GetPath()
var chunk *chunking.ChunkBLOBInfo
var err error
if chunking.IsChunked(refpath) { // check chunking v1
chunk, err = chunking.GetChunkBLOBInfo(refpath)
if err != nil {
return nil, errtypes.BadRequest(err.Error())
}
ref.Path = chunk.Path
}
n, err := fs.lu.NodeFromResource(ctx, ref)
switch err.(type) {
case nil:
// ok
case errtypes.IsNotFound:
return nil, errtypes.PreconditionFailed(err.Error())
default:
return nil, err
}
// permissions are checked in NewUpload below
relative, err := fs.lu.Path(ctx, n, node.NoCheck)
// TODO why do we need the path here?
// jfd: it is used later when emitting the UploadReady event ...
// AAAND refPath might be . when accessing with an id / relative reference ... which causes NodeName to become . But then dir will also always be .
// That is why we still have to read the path here: so that the event we emit contains a relative reference with a path relative to the space root. WTF
if err != nil {
return nil, err
}
lockID, _ := ctxpkg.ContextGetLockID(ctx)
session := fs.sessionStore.New(ctx)
session.SetMetadata("filename", n.Name)
session.SetStorageValue("NodeName", n.Name)
if chunk != nil {
session.SetStorageValue("Chunk", filepath.Base(refpath))
}
session.SetMetadata("dir", filepath.Dir(relative))
session.SetStorageValue("Dir", filepath.Dir(relative))
session.SetMetadata("lockid", lockID)
session.SetSize(uploadLength)
session.SetStorageValue("SpaceRoot", n.SpaceRoot.ID) // TODO SpaceRoot -> SpaceID
session.SetStorageValue("SpaceOwnerOrManager", n.SpaceOwnerOrManager(ctx).GetOpaqueId()) // TODO needed for what?
spaceGID, ok := ctx.Value(CtxKeySpaceGID).(uint32)
if ok {
session.SetStorageValue("SpaceGid", fmt.Sprintf("%d", spaceGID))
}
iid, _ := ctxpkg.ContextGetInitiator(ctx)
session.SetMetadata("initiatorid", iid)
if metadata != nil {
session.SetMetadata("providerID", metadata["providerID"])
if mtime, ok := metadata["mtime"]; ok {
if mtime != "null" {
session.SetMetadata("mtime", metadata["mtime"])
}
}
if expiration, ok := metadata["expires"]; ok {
if expiration != "null" {
session.SetMetadata("expires", metadata["expires"])
}
}
if _, ok := metadata["sizedeferred"]; ok {
session.SetSizeIsDeferred(true)
}
if checksum, ok := metadata["checksum"]; ok {
parts := strings.SplitN(checksum, " ", 2)
if len(parts) != 2 {
return nil, errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'")
}
switch parts[0] {
case "sha1", "md5", "adler32":
session.SetMetadata("checksum", checksum)
default:
return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0])
}
}
// only check preconditions if they are not empty // TODO or is this a bad request?
if metadata["if-match"] != "" {
session.SetMetadata("if-match", metadata["if-match"])
}
if metadata["if-none-match"] != "" {
session.SetMetadata("if-none-match", metadata["if-none-match"])
}
if metadata["if-unmodified-since"] != "" {
session.SetMetadata("if-unmodified-since", metadata["if-unmodified-since"])
}
}
if session.MTime().IsZero() {
session.SetMetadata("mtime", utils.TimeToOCMtime(time.Now()))
}
log.Debug().Str("uploadid", session.ID()).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename")
_, err = node.CheckQuota(ctx, n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(session.Size()))
if err != nil {
return nil, err
}
if session.Filename() == "" {
return nil, errors.New("Decomposedfs: missing filename in metadata")
}
if session.Dir() == "" {
return nil, errors.New("Decomposedfs: missing dir in metadata")
}
// the parent owner will become the new owner
parent, perr := n.Parent(ctx)
if perr != nil {
return nil, errors.Wrap(perr, "Decomposedfs: error getting parent "+n.ParentID)
}
// check permissions
var (
checkNode *node.Node
path string
)
if n.Exists {
// check permissions of file to be overwritten
checkNode = n
path, _ = storagespace.FormatReference(&provider.Reference{ResourceId: &provider.ResourceId{
SpaceId: checkNode.SpaceID,
OpaqueId: checkNode.ID,
}})
} else {
// check permissions of parent
checkNode = parent
path, _ = storagespace.FormatReference(&provider.Reference{ResourceId: &provider.ResourceId{
SpaceId: checkNode.SpaceID,
OpaqueId: checkNode.ID,
}, Path: n.Name})
}
rp, err := fs.p.AssemblePermissions(ctx, checkNode)
switch {
case err != nil:
return nil, err
case !rp.InitiateFileUpload:
return nil, errtypes.PermissionDenied(path)
}
// are we trying to overwriting a folder with a file?
if n.Exists && n.IsDir(ctx) {
return nil, errtypes.PreconditionFailed("resource is not a file")
}
// check lock
if err := n.CheckLock(ctx); err != nil {
return nil, err
}
usr := ctxpkg.ContextMustGetUser(ctx)
// fill future node info
if n.Exists {
if session.HeaderIfNoneMatch() == "*" {
return nil, errtypes.Aborted(fmt.Sprintf("parent %s already has a child %s, id %s", n.ParentID, n.Name, n.ID))
}
session.SetStorageValue("NodeId", n.ID)
session.SetStorageValue("NodeExists", "true")
} else {
session.SetStorageValue("NodeId", uuid.New().String())
}
session.SetStorageValue("NodeParentId", n.ParentID)
session.SetExecutant(usr)
session.SetStorageValue("LogLevel", log.GetLevel().String())
log.Debug().Interface("session", session).Msg("Decomposedfs: built session info")
err = fs.um.RunInBaseScope(func() error {
// Create binary file in the upload folder with no content
// It will be used when determining the current offset of an upload
err := session.TouchBin()
if err != nil {
return err
}
return session.Persist(ctx)
})
if err != nil {
return nil, err
}
metrics.UploadSessionsInitiated.Inc()
if uploadLength == 0 {
// Directly finish this upload
err = session.FinishUploadDecomposed(ctx)
if err != nil {
return nil, err
}
}
return map[string]string{
"simple": session.ID(),
"tus": session.ID(),
}, nil
}
// UseIn tells the tus upload middleware which extensions it supports.
func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) {
composer.UseCore(fs)
composer.UseTerminater(fs)
composer.UseConcater(fs)
composer.UseLengthDeferrer(fs)
}
// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol
// - the storage needs to implement NewUpload and GetUpload
// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload
// NewUpload returns a new tus Upload instance
func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (tusd.Upload, error) {
return nil, fmt.Errorf("not implemented, use InitiateUpload on the CS3 API to start a new upload")
}
// GetUpload returns the Upload for the given upload id
func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) {
var ul tusd.Upload
var err error
_ = fs.um.RunInBaseScope(func() error {
ul, err = fs.sessionStore.Get(ctx, id)
return nil
})
return ul, err
}
// ListUploadSessions returns the upload sessions for the given filter
func (fs *Decomposedfs) ListUploadSessions(ctx context.Context, filter storage.UploadSessionFilter) ([]storage.UploadSession, error) {
var sessions []*upload.OcisSession
if filter.ID != nil && *filter.ID != "" {
session, err := fs.sessionStore.Get(ctx, *filter.ID)
if err != nil {
return nil, err
}
sessions = []*upload.OcisSession{session}
} else {
var err error
sessions, err = fs.sessionStore.List(ctx)
if err != nil {
return nil, err
}
}
filteredSessions := []storage.UploadSession{}
now := time.Now()
for _, session := range sessions {
if filter.Processing != nil && *filter.Processing != session.IsProcessing() {
continue
}
if filter.Expired != nil {
if *filter.Expired {
if now.Before(session.Expires()) {
continue
}
} else {
if now.After(session.Expires()) {
continue
}
}
}
if filter.HasVirus != nil {
sr, _ := session.ScanData()
infected := sr != ""
if *filter.HasVirus != infected {
continue
}
}
filteredSessions = append(filteredSessions, session)
}
return filteredSessions, nil
}
// AsTerminatableUpload returns a TerminatableUpload
// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination
// the storage needs to implement AsTerminatableUpload
func (fs *Decomposedfs) AsTerminatableUpload(up tusd.Upload) tusd.TerminatableUpload {
return up.(*upload.OcisSession)
}
// AsLengthDeclarableUpload returns a LengthDeclarableUpload
// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation
// the storage needs to implement AsLengthDeclarableUpload
func (fs *Decomposedfs) AsLengthDeclarableUpload(up tusd.Upload) tusd.LengthDeclarableUpload {
return up.(*upload.OcisSession)
}
// AsConcatableUpload returns a ConcatableUpload
// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation
// the storage needs to implement AsConcatableUpload
func (fs *Decomposedfs) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload {
return up.(*upload.OcisSession)
}

View File

@@ -0,0 +1,344 @@
// Copyright 2018-2023 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package upload
import (
"context"
"encoding/json"
"os"
"path/filepath"
"strconv"
"time"
"github.com/google/renameio/v2"
tusd "github.com/tus/tusd/v2/pkg/handler"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
// OcisSession extends tus upload lifecycle with postprocessing steps.
type OcisSession struct {
store OcisStore
// for now, we keep the json files in the uploads folder
info tusd.FileInfo
}
// Context returns a context with the user, logger and lockid used when initiating the upload session
func (s *OcisSession) Context(ctx context.Context) context.Context { // restore logger from file info
sub := s.store.log.With().Int("pid", os.Getpid()).Logger()
ctx = appctx.WithLogger(ctx, &sub)
ctx = ctxpkg.ContextSetLockID(ctx, s.lockID())
ctx = ctxpkg.ContextSetUser(ctx, s.executantUser())
return ctxpkg.ContextSetInitiator(ctx, s.InitiatorID())
}
func (s *OcisSession) lockID() string {
return s.info.MetaData["lockid"]
}
func (s *OcisSession) executantUser() *userpb.User {
var o *typespb.Opaque
_ = json.Unmarshal([]byte(s.info.Storage["UserOpaque"]), &o)
return &userpb.User{
Id: &userpb.UserId{
Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]),
Idp: s.info.Storage["Idp"],
OpaqueId: s.info.Storage["UserId"],
},
Username: s.info.Storage["UserName"],
DisplayName: s.info.Storage["UserDisplayName"],
Opaque: o,
}
}
// Purge deletes the upload session metadata and written binary data
func (s *OcisSession) Purge(ctx context.Context) error {
_, span := tracer.Start(ctx, "Purge")
defer span.End()
sessionPath := sessionPath(s.store.root, s.info.ID)
if err := os.Remove(sessionPath); err != nil {
return err
}
if err := os.Remove(s.binPath()); err != nil {
return err
}
return nil
}
// TouchBin creates a file to contain the binary data. It's size will be used to keep track of the tus upload offset.
func (s *OcisSession) TouchBin() error {
file, err := os.OpenFile(s.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
if err != nil {
return err
}
return file.Close()
}
// Persist writes the upload session metadata to disk
// events can update the scan outcome and the finished event might read an empty file because of race conditions
// so we need to lock the file while writing and use atomic writes
func (s *OcisSession) Persist(ctx context.Context) error {
_, span := tracer.Start(ctx, "Persist")
defer span.End()
sessionPath := sessionPath(s.store.root, s.info.ID)
// create folder structure (if needed)
if err := os.MkdirAll(filepath.Dir(sessionPath), 0700); err != nil {
return err
}
var d []byte
d, err := json.Marshal(s.info)
if err != nil {
return err
}
return renameio.WriteFile(sessionPath, d, 0600)
}
// ToFileInfo returns tus compatible FileInfo so the tus handler can access the upload offset
func (s *OcisSession) ToFileInfo() tusd.FileInfo {
return s.info
}
// ProviderID returns the provider id
func (s *OcisSession) ProviderID() string {
return s.info.MetaData["providerID"]
}
// SpaceID returns the space id
func (s *OcisSession) SpaceID() string {
return s.info.Storage["SpaceRoot"]
}
// NodeID returns the node id
func (s *OcisSession) NodeID() string {
return s.info.Storage["NodeId"]
}
// NodeParentID returns the nodes parent id
func (s *OcisSession) NodeParentID() string {
return s.info.Storage["NodeParentId"]
}
// NodeExists returns wether or not the node existed during InitiateUpload.
// FIXME If two requests try to write the same file they both will store a new
// random node id in the session and try to initialize a new node when
// finishing the upload. The second request will fail with an already exists
// error when trying to create the symlink for the node in the parent directory.
// A node should be created as part of InitiateUpload. When listing a directory
// we can decide if we want to skip the entry, or expose uploed progress
// information. But that is a bigger change and might involve client work.
func (s *OcisSession) NodeExists() bool {
return s.info.Storage["NodeExists"] == "true"
}
// HeaderIfMatch returns the if-match header for the upload session
func (s *OcisSession) HeaderIfMatch() string {
return s.info.MetaData["if-match"]
}
// HeaderIfNoneMatch returns the if-none-match header for the upload session
func (s *OcisSession) HeaderIfNoneMatch() string {
return s.info.MetaData["if-none-match"]
}
// HeaderIfUnmodifiedSince returns the if-unmodified-since header for the upload session
func (s *OcisSession) HeaderIfUnmodifiedSince() string {
return s.info.MetaData["if-unmodified-since"]
}
// Node returns the node for the session
func (s *OcisSession) Node(ctx context.Context) (*node.Node, error) {
return node.ReadNode(ctx, s.store.lu, s.SpaceID(), s.info.Storage["NodeId"], false, nil, true)
}
// ID returns the upload session id
func (s *OcisSession) ID() string {
return s.info.ID
}
// Filename returns the name of the node which is not the same as the name af the file being uploaded for legacy chunked uploads
func (s *OcisSession) Filename() string {
return s.info.Storage["NodeName"]
}
// Chunk returns the chunk name when a legacy chunked upload was started
func (s *OcisSession) Chunk() string {
return s.info.Storage["Chunk"]
}
// SetMetadata is used to fill the upload metadata that will be exposed to the end user
func (s *OcisSession) SetMetadata(key, value string) {
s.info.MetaData[key] = value
}
// SetStorageValue is used to set metadata only relevant for the upload session implementation
func (s *OcisSession) SetStorageValue(key, value string) {
s.info.Storage[key] = value
}
// SetSize will set the upload size of the underlying tus info.
func (s *OcisSession) SetSize(size int64) {
s.info.Size = size
}
// SetSizeIsDeferred is uset to change the SizeIsDeferred property of the underlying tus info.
func (s *OcisSession) SetSizeIsDeferred(value bool) {
s.info.SizeIsDeferred = value
}
// Dir returns the directory to which the upload is made
// TODO get rid of Dir(), whoever consumes the reference should be able to deal
// with a relative reference.
// Dir is only used to:
// - fill the Path property when emitting the UploadReady event after
// postprocessing finished. I wonder why the UploadReady contains a finished
// flag ... maybe multiple distinct events would make more sense.
// - build the reference that is passed to the FileUploaded event in the
// UploadFinishedFunc callback passed to the Upload call used for simple
// datatx put requests
//
// AFAICT only search and audit services consume the path.
// - search needs to index from the root anyway. And it only needs the most
// recent path to put it in the index. So it should already be able to deal
// with an id based reference.
// - audit on the other hand needs to log events with the path at the state of
// the event ... so it does need the full path.
//
// I think we can safely determine the path later, right before emitting the
// event. And maybe make it configurable, because only audit needs it, anyway.
func (s *OcisSession) Dir() string {
return s.info.Storage["Dir"]
}
// Size returns the upload size
func (s *OcisSession) Size() int64 {
return s.info.Size
}
// SizeDiff returns the size diff that was calculated after postprocessing
func (s *OcisSession) SizeDiff() int64 {
sizeDiff, _ := strconv.ParseInt(s.info.MetaData["sizeDiff"], 10, 64)
return sizeDiff
}
// Reference returns a reference that can be used to access the uploaded resource
func (s *OcisSession) Reference() provider.Reference {
return provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: s.info.MetaData["providerID"],
SpaceId: s.info.Storage["SpaceRoot"],
OpaqueId: s.info.Storage["NodeId"],
},
// Path is not used
}
}
// Executant returns the id of the user that initiated the upload session
func (s *OcisSession) Executant() userpb.UserId {
return userpb.UserId{
Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]),
Idp: s.info.Storage["Idp"],
OpaqueId: s.info.Storage["UserId"],
}
}
// SetExecutant is used to remember the user that initiated the upload session
func (s *OcisSession) SetExecutant(u *userpb.User) {
s.info.Storage["Idp"] = u.GetId().GetIdp()
s.info.Storage["UserId"] = u.GetId().GetOpaqueId()
s.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type)
s.info.Storage["UserName"] = u.GetUsername()
s.info.Storage["UserDisplayName"] = u.GetDisplayName()
b, _ := json.Marshal(u.GetOpaque())
s.info.Storage["UserOpaque"] = string(b)
}
// Offset returns the current upload offset
func (s *OcisSession) Offset() int64 {
return s.info.Offset
}
// SpaceOwner returns the id of the space owner
func (s *OcisSession) SpaceOwner() *userpb.UserId {
return &userpb.UserId{
// idp and type do not seem to be consumed and the node currently only stores the user id anyway
OpaqueId: s.info.Storage["SpaceOwnerOrManager"],
}
}
// Expires returns the time the upload session expires
func (s *OcisSession) Expires() time.Time {
var t time.Time
if value, ok := s.info.MetaData["expires"]; ok {
t, _ = utils.MTimeToTime(value)
}
return t
}
// MTime returns the mtime to use for the uploaded file
func (s *OcisSession) MTime() time.Time {
var t time.Time
if value, ok := s.info.MetaData["mtime"]; ok {
t, _ = utils.MTimeToTime(value)
}
return t
}
// IsProcessing returns true if all bytes have been received. The session then has entered postprocessing state.
func (s *OcisSession) IsProcessing() bool {
// We might need a more sophisticated way to determine processing status soon
return s.info.Size == s.info.Offset && s.info.MetaData["scanResult"] == ""
}
// binPath returns the path to the file storing the binary data.
func (s *OcisSession) binPath() string {
return filepath.Join(s.store.root, "uploads", s.info.ID)
}
// InitiatorID returns the id of the initiating client
func (s *OcisSession) InitiatorID() string {
return s.info.MetaData["initiatorid"]
}
// SetScanData sets virus scan data to the upload session
func (s *OcisSession) SetScanData(result string, date time.Time) {
s.info.MetaData["scanResult"] = result
s.info.MetaData["scanDate"] = date.Format(time.RFC3339)
}
// ScanData returns the virus scan data
func (s *OcisSession) ScanData() (string, time.Time) {
date := s.info.MetaData["scanDate"]
if date == "" {
return "", time.Time{}
}
d, _ := time.Parse(time.RFC3339, date)
return s.info.MetaData["scanResult"], d
}
// sessionPath returns the path to the .info file storing the file's info.
func sessionPath(root, id string) string {
return filepath.Join(root, "uploads", id+".info")
}

View File

@@ -0,0 +1,445 @@
// Copyright 2018-2022 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package upload
import (
"context"
"encoding/json"
"fmt"
iofs "io/fs"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/storage"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/aspects"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
"github.com/pkg/errors"
"github.com/rogpeppe/go-internal/lockedfile"
"github.com/rs/zerolog"
tusd "github.com/tus/tusd/v2/pkg/handler"
)
var _idRegexp = regexp.MustCompile(".*/([^/]+).info")
// PermissionsChecker defines an interface for checking permissions on a Node
type PermissionsChecker interface {
AssemblePermissions(ctx context.Context, n *node.Node) (ap provider.ResourcePermissions, err error)
}
// OcisStore manages upload sessions
type OcisStore struct {
fs storage.FS
lu node.PathLookup
tp node.Tree
um usermapper.Mapper
root string
pub events.Publisher
async bool
tknopts options.TokenOptions
disableVersioning bool
log *zerolog.Logger
}
// NewSessionStore returns a new OcisStore
func NewSessionStore(fs storage.FS, aspects aspects.Aspects, root string, async bool, tknopts options.TokenOptions, log *zerolog.Logger) *OcisStore {
return &OcisStore{
fs: fs,
lu: aspects.Lookup,
tp: aspects.Tree,
root: root,
pub: aspects.EventStream,
async: async,
tknopts: tknopts,
disableVersioning: aspects.DisableVersioning,
um: aspects.UserMapper,
log: log,
}
}
// New returns a new upload session
func (store OcisStore) New(ctx context.Context) *OcisSession {
return &OcisSession{
store: store,
info: tusd.FileInfo{
ID: uuid.New().String(),
Storage: map[string]string{
"Type": "OCISStore",
},
MetaData: tusd.MetaData{},
},
}
}
// List lists all upload sessions
func (store OcisStore) List(ctx context.Context) ([]*OcisSession, error) {
uploads := []*OcisSession{}
infoFiles, err := filepath.Glob(filepath.Join(store.root, "uploads", "*.info"))
if err != nil {
return nil, err
}
for _, info := range infoFiles {
id := strings.TrimSuffix(filepath.Base(info), filepath.Ext(info))
progress, err := store.Get(ctx, id)
if err != nil {
appctx.GetLogger(ctx).Error().Interface("path", info).Msg("Decomposedfs: could not getUploadSession")
continue
}
uploads = append(uploads, progress)
}
return uploads, nil
}
// Get returns the upload session for the given upload id
func (store OcisStore) Get(ctx context.Context, id string) (*OcisSession, error) {
sessionPath := sessionPath(store.root, id)
match := _idRegexp.FindStringSubmatch(sessionPath)
if match == nil || len(match) < 2 {
return nil, fmt.Errorf("invalid upload path")
}
session := OcisSession{
store: store,
info: tusd.FileInfo{},
}
data, err := os.ReadFile(sessionPath)
if err != nil {
// handle stale NFS file handles that can occur when the file is deleted betwenn the ATTR and FOPEN call of os.ReadFile
if pathErr, ok := err.(*os.PathError); ok && pathErr.Err == syscall.ESTALE {
appctx.GetLogger(ctx).Info().Str("session", id).Err(err).Msg("treating stale file handle as not found")
err = tusd.ErrNotFound
}
if errors.Is(err, iofs.ErrNotExist) {
// Interpret os.ErrNotExist as 404 Not Found
err = tusd.ErrNotFound
}
return nil, err
}
if err := json.Unmarshal(data, &session.info); err != nil {
return nil, err
}
stat, err := os.Stat(session.binPath())
if err != nil {
if os.IsNotExist(err) {
// Interpret os.ErrNotExist as 404 Not Found
err = tusd.ErrNotFound
}
return nil, err
}
session.info.Offset = stat.Size()
return &session, nil
}
// Session is the interface used by the Cleanup call
type Session interface {
ID() string
Node(ctx context.Context) (*node.Node, error)
Context(ctx context.Context) context.Context
Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool)
}
// Cleanup cleans upload metadata, binary data and processing status as necessary
func (store OcisStore) Cleanup(ctx context.Context, session Session, revertNodeMetadata, keepUpload, unmarkPostprocessing bool) {
ctx, span := tracer.Start(session.Context(ctx), "Cleanup")
defer span.End()
session.Cleanup(revertNodeMetadata, !keepUpload, !keepUpload)
// unset processing status
if unmarkPostprocessing {
n, err := session.Node(ctx)
if err != nil {
appctx.GetLogger(ctx).Info().Str("session", session.ID()).Err(err).Msg("could not read node")
return
}
// FIXME: after cleanup the node might already be deleted ...
if n != nil { // node can be nil when there was an error before it was created (eg. checksum-mismatch)
if err := n.UnmarkProcessing(ctx, session.ID()); err != nil {
appctx.GetLogger(ctx).Info().Str("path", n.InternalPath()).Err(err).Msg("unmarking processing failed")
}
}
}
}
// CreateNodeForUpload will create the target node for the Upload
// TODO move this to the node package as NodeFromUpload?
// should we in InitiateUpload create the node first? and then the upload?
func (store OcisStore) CreateNodeForUpload(ctx context.Context, session *OcisSession, initAttrs node.Attributes) (*node.Node, error) {
ctx, span := tracer.Start(session.Context(ctx), "CreateNodeForUpload")
defer span.End()
n := node.New(
session.SpaceID(),
session.NodeID(),
session.NodeParentID(),
session.Filename(),
session.Size(),
session.ID(),
provider.ResourceType_RESOURCE_TYPE_FILE,
nil,
store.lu,
)
var err error
n.SpaceRoot, err = node.ReadNode(ctx, store.lu, session.SpaceID(), session.SpaceID(), false, nil, false)
if err != nil {
return nil, err
}
// check lock
if err := n.CheckLock(ctx); err != nil {
return nil, err
}
var unlock metadata.UnlockFunc
if session.NodeExists() { // TODO this is wrong. The node should be created when the upload starts, the revisions should be created independently of the node
// we do not need to propagate a change when a node is created, only when the upload is ready.
// that still creates problems for desktop clients because if another change causes propagation it will detects an empty file
// so the first upload has to point to the first revision with the expected size. The file cannot be downloaded, but it can be overwritten (which will create a new revision and make the node reflect the latest revision)
// any finished postprocessing will not affect the node metadata.
// *thinking* but then initializing an upload will lock the file until the upload has finished. That sucks.
// so we have to check if the node has been created meanwhile (well, only in case the upload does not know the nodeid ... or the NodeExists array that is checked by session.NodeExists())
// FIXME look at the disk again to see if the file has been created in between, or just try initializing a new node and do the update existing node as a fallback. <- the latter!
unlock, err = store.updateExistingNode(ctx, session, n, session.SpaceID(), uint64(session.Size()))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Msg("failed to update existing node")
}
} else {
if c, ok := store.lu.(node.IDCacher); ok {
err := c.CacheID(ctx, n.SpaceID, n.ID, filepath.Join(n.ParentPath(), n.Name))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Msg("failed to cache id")
}
}
unlock, err = store.tp.InitNewNode(ctx, n, uint64(session.Size()))
if err != nil {
appctx.GetLogger(ctx).Error().Str("path", n.InternalPath()).Err(err).Msg("failed to init new node")
}
session.info.MetaData["sizeDiff"] = strconv.FormatInt(session.Size(), 10)
}
defer func() {
if unlock == nil {
appctx.GetLogger(ctx).Info().Msg("did not get a unlockfunc, not unlocking")
return
}
if err := unlock(); err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("nodeid", n.ID).Str("parentid", n.ParentID).Msg("could not close lock")
}
}()
if err != nil {
return nil, err
}
// overwrite technical information
initAttrs.SetString(prefixes.IDAttr, n.ID)
initAttrs.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE))
initAttrs.SetString(prefixes.ParentidAttr, n.ParentID)
initAttrs.SetString(prefixes.NameAttr, n.Name)
initAttrs.SetString(prefixes.BlobIDAttr, n.BlobID)
initAttrs.SetInt64(prefixes.BlobsizeAttr, n.Blobsize)
initAttrs.SetString(prefixes.StatusPrefix, node.ProcessingStatus+session.ID())
// set mtime on the new node
mtime := time.Now()
if !session.MTime().IsZero() {
// overwrite mtime if requested
mtime = session.MTime()
}
err = store.lu.TimeManager().OverrideMtime(ctx, n, &initAttrs, mtime)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: failed to set the mtime")
}
// update node metadata with new blobid etc
err = n.SetXattrsWithContext(ctx, initAttrs, false)
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: could not write metadata")
}
err = store.um.RunInBaseScope(func() error {
return session.Persist(ctx)
})
if err != nil {
return nil, err
}
return n, nil
}
func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSession, n *node.Node, spaceID string, fsize uint64) (metadata.UnlockFunc, error) {
_, span := tracer.Start(ctx, "updateExistingNode")
defer span.End()
targetPath := n.InternalPath()
// write lock existing node before reading any metadata
f, err := lockedfile.OpenFile(store.lu.MetadataBackend().LockfilePath(targetPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return nil, err
}
unlock := func() error {
// NOTE: to prevent stale NFS file handles do not remove lock file!
return f.Close()
}
old, _ := node.ReadNode(ctx, store.lu, spaceID, n.ID, false, nil, false)
if _, err := node.CheckQuota(ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
return unlock, err
}
oldNodeMtime, err := old.GetMTime(ctx)
if err != nil {
return unlock, err
}
oldNodeEtag, err := node.CalculateEtag(old.ID, oldNodeMtime)
if err != nil {
return unlock, err
}
// When the if-match header was set we need to check if the
// etag still matches before finishing the upload.
if session.HeaderIfMatch() != "" && session.HeaderIfMatch() != oldNodeEtag {
return unlock, errtypes.Aborted("etag mismatch")
}
// When the if-none-match header was set we need to check if any of the
// etags matches before finishing the upload.
if session.HeaderIfNoneMatch() != "" {
if session.HeaderIfNoneMatch() == "*" {
return unlock, errtypes.Aborted("etag mismatch, resource exists")
}
for _, ifNoneMatchTag := range strings.Split(session.HeaderIfNoneMatch(), ",") {
if ifNoneMatchTag == oldNodeEtag {
return unlock, errtypes.Aborted("etag mismatch")
}
}
}
// When the if-unmodified-since header was set we need to check if the
// etag still matches before finishing the upload.
if session.HeaderIfUnmodifiedSince() != "" {
ifUnmodifiedSince, err := time.Parse(time.RFC3339Nano, session.HeaderIfUnmodifiedSince())
if err != nil {
return unlock, errtypes.InternalError(fmt.Sprintf("failed to parse if-unmodified-since time: %s", err))
}
if oldNodeMtime.After(ifUnmodifiedSince) {
return unlock, errtypes.Aborted("if-unmodified-since mismatch")
}
}
if !store.disableVersioning {
versionPath := session.store.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano))
// create version node
_, err := os.OpenFile(versionPath, os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
if !errors.Is(err, os.ErrExist) {
return unlock, err
}
// a revision with this mtime does already exist.
// If the blobs are the same we can just delete the old one
if err := validateChecksums(ctx, old, session, versionPath); err != nil {
return unlock, err
}
// delete old blob
bID, _, err := session.store.lu.ReadBlobIDAndSizeAttr(ctx, versionPath, nil)
if err != nil {
return unlock, err
}
if err := session.store.tp.DeleteBlob(&node.Node{BlobID: bID, SpaceID: n.SpaceID}); err != nil {
return unlock, err
}
// clean revision file
span.AddEvent("os.Create")
if _, err := os.Create(versionPath); err != nil {
return unlock, err
}
}
// copy blob metadata to version node
if err := store.lu.CopyMetadataWithSourceLock(ctx, targetPath, versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr ||
attributeName == prefixes.MTimeAttr
}, f, true); err != nil {
return unlock, err
}
session.info.MetaData["versionsPath"] = versionPath
// keep mtime from previous version
span.AddEvent("os.Chtimes")
if err := os.Chtimes(session.info.MetaData["versionsPath"], oldNodeMtime, oldNodeMtime); err != nil {
return unlock, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err))
}
}
session.info.MetaData["sizeDiff"] = strconv.FormatInt((int64(fsize) - old.Blobsize), 10)
return unlock, nil
}
func validateChecksums(ctx context.Context, n *node.Node, session *OcisSession, versionPath string) error {
for _, t := range []string{"md5", "sha1", "adler32"} {
key := prefixes.ChecksumPrefix + t
checksum, err := n.Xattr(ctx, key)
if err != nil {
return err
}
revisionChecksum, err := session.store.lu.MetadataBackend().Get(ctx, versionPath, key)
if err != nil {
return err
}
if string(checksum) == "" || string(revisionChecksum) == "" {
return errors.New("checksum not found")
}
if string(checksum) != string(revisionChecksum) {
return errors.New("checksum mismatch")
}
}
return nil
}

View File

@@ -0,0 +1,412 @@
// Copyright 2018-2022 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package upload
import (
"context"
"encoding/hex"
"fmt"
"hash"
"io"
"io/fs"
"net/http"
"os"
"strconv"
"strings"
"time"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/golang-jwt/jwt/v5"
"github.com/pkg/errors"
tusd "github.com/tus/tusd/v2/pkg/handler"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/metrics"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
var (
tracer trace.Tracer
ErrAlreadyExists = tusd.NewError("ERR_ALREADY_EXISTS", "file already exists", http.StatusConflict)
defaultFilePerm = os.FileMode(0664)
)
func init() {
tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/upload")
}
// WriteChunk writes the stream from the reader to the given offset of the upload
func (session *OcisSession) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
ctx, span := tracer.Start(session.Context(ctx), "WriteChunk")
defer span.End()
_, subspan := tracer.Start(ctx, "os.OpenFile")
file, err := os.OpenFile(session.binPath(), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
subspan.End()
if err != nil {
return 0, err
}
defer file.Close()
// calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum
// TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ...
// It is sent with the PATCH request, well or in the POST when the creation-with-upload extension is used
// but the tus handler uses a context.Background() so we cannot really check the header and put it in the context ...
_, subspan = tracer.Start(ctx, "io.Copy")
n, err := io.Copy(file, src)
subspan.End()
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
// However, for the ocis driver it's not important whether the stream has ended
// on purpose or accidentally.
if err != nil && err != io.ErrUnexpectedEOF {
return n, err
}
// update upload.Session.Offset so subsequent code flow can use it.
// No need to persist the session as the offset is determined by stating the blob in the GetUpload / ReadSession codepath.
// The session offset is written to disk in FinishUpload
session.info.Offset += n
return n, nil
}
// GetInfo returns the FileInfo
func (session *OcisSession) GetInfo(_ context.Context) (tusd.FileInfo, error) {
return session.ToFileInfo(), nil
}
// GetReader returns an io.Reader for the upload
func (session *OcisSession) GetReader(ctx context.Context) (io.ReadCloser, error) {
_, span := tracer.Start(session.Context(ctx), "GetReader")
defer span.End()
return os.Open(session.binPath())
}
// FinishUpload finishes an upload and moves the file to the internal destination
// implements tusd.DataStore interface
// returns tusd errors
func (session *OcisSession) FinishUpload(ctx context.Context) error {
err := session.FinishUploadDecomposed(ctx)
// we need to return a tusd error here to make the tusd handler return the correct status code
switch err.(type) {
case errtypes.AlreadyExists:
return tusd.NewError("ERR_ALREADY_EXISTS", err.Error(), http.StatusConflict)
case errtypes.Aborted:
return tusd.NewError("ERR_PRECONDITION_FAILED", err.Error(), http.StatusPreconditionFailed)
default:
return err
}
}
// FinishUploadDecomposed finishes an upload and moves the file to the internal destination
// retures errtypes errors
func (session *OcisSession) FinishUploadDecomposed(ctx context.Context) error {
ctx, span := tracer.Start(session.Context(ctx), "FinishUpload")
defer span.End()
log := appctx.GetLogger(ctx)
ctx = ctxpkg.ContextSetInitiator(ctx, session.InitiatorID())
sha1h, md5h, adler32h, err := node.CalculateChecksums(ctx, session.binPath())
if err != nil {
return err
}
// compare if they match the sent checksum
// TODO the tus checksum extension would do this on every chunk, but I currently don't see an easy way to pass in the requested checksum. for now we do it in FinishUpload which is also called for chunked uploads
if session.info.MetaData["checksum"] != "" {
var err error
parts := strings.SplitN(session.info.MetaData["checksum"], " ", 2)
if len(parts) != 2 {
return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'")
}
switch parts[0] {
case "sha1":
err = checkHash(parts[1], sha1h)
case "md5":
err = checkHash(parts[1], md5h)
case "adler32":
err = checkHash(parts[1], adler32h)
default:
err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0])
}
if err != nil {
session.store.Cleanup(ctx, session, true, false, false)
return err
}
}
// update checksums
attrs := node.Attributes{
prefixes.ChecksumPrefix + "sha1": sha1h.Sum(nil),
prefixes.ChecksumPrefix + "md5": md5h.Sum(nil),
prefixes.ChecksumPrefix + "adler32": adler32h.Sum(nil),
}
// At this point we scope by the space to create the final file in the final location
if session.store.um != nil && session.info.Storage["SpaceGid"] != "" {
gid, err := strconv.Atoi(session.info.Storage["SpaceGid"])
if err != nil {
return errors.Wrap(err, "failed to parse space gid")
}
unscope, err := session.store.um.ScopeUserByIds(-1, gid)
if err != nil {
return errors.Wrap(err, "failed to scope user")
}
if unscope != nil {
defer func() { _ = unscope() }()
}
}
n, err := session.store.CreateNodeForUpload(ctx, session, attrs)
if err != nil {
return err
}
// increase the processing counter for every started processing
// will be decreased in Cleanup()
metrics.UploadProcessing.Inc()
metrics.UploadSessionsBytesReceived.Inc()
if session.store.pub != nil && session.info.Size > 0 {
u, _ := ctxpkg.ContextGetUser(ctx)
s, err := session.URL(ctx)
if err != nil {
return err
}
var iu *userpb.User
if utils.ExistsInOpaque(u.Opaque, "impersonating-user") {
iu = &userpb.User{}
if err := utils.ReadJSONFromOpaque(u.Opaque, "impersonating-user", iu); err != nil {
return err
}
}
if err := events.Publish(ctx, session.store.pub, events.BytesReceived{
UploadID: session.ID(),
URL: s,
SpaceOwner: n.SpaceOwnerOrManager(session.Context(ctx)),
ExecutingUser: u,
ResourceID: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID},
Filename: session.Filename(),
Filesize: uint64(session.Size()),
ImpersonatingUser: iu,
}); err != nil {
return err
}
}
// if the upload is synchronous or the upload is empty, finalize it now
// for 0-byte uploads we take a shortcut and finalize isn't called elsewhere
if !session.store.async || session.info.Size == 0 {
// handle postprocessing synchronously
err = session.Finalize(ctx)
session.store.Cleanup(ctx, session, err != nil, false, err == nil)
if err != nil {
log.Error().Err(err).Msg("failed to upload")
return err
}
metrics.UploadSessionsFinalized.Inc()
}
return session.store.tp.Propagate(ctx, n, session.SizeDiff())
}
// Terminate terminates the upload
func (session *OcisSession) Terminate(_ context.Context) error {
session.Cleanup(true, true, true)
return nil
}
// DeclareLength updates the upload length information
func (session *OcisSession) DeclareLength(ctx context.Context, length int64) error {
session.info.Size = length
session.info.SizeIsDeferred = false
return session.store.um.RunInBaseScope(func() error {
return session.Persist(session.Context(ctx))
})
}
// ConcatUploads concatenates multiple uploads
func (session *OcisSession) ConcatUploads(_ context.Context, uploads []tusd.Upload) (err error) {
file, err := os.OpenFile(session.binPath(), os.O_WRONLY|os.O_APPEND, defaultFilePerm)
if err != nil {
return err
}
defer file.Close()
for _, partialUpload := range uploads {
fileUpload := partialUpload.(*OcisSession)
src, err := os.Open(fileUpload.binPath())
if err != nil {
return err
}
defer src.Close()
if _, err := io.Copy(file, src); err != nil {
return err
}
}
return
}
// Finalize finalizes the upload (eg moves the file to the internal destination)
func (session *OcisSession) Finalize(ctx context.Context) (err error) {
ctx, span := tracer.Start(session.Context(ctx), "Finalize")
defer span.End()
revisionNode := node.New(session.SpaceID(), session.NodeID(), "", "", session.Size(), session.ID(),
provider.ResourceType_RESOURCE_TYPE_FILE, session.SpaceOwner(), session.store.lu)
// upload the data to the blobstore
_, subspan := tracer.Start(ctx, "WriteBlob")
err = session.store.tp.WriteBlob(revisionNode, session.binPath())
subspan.End()
if err != nil {
return errors.Wrap(err, "failed to upload file to blobstore")
}
return nil
}
func checkHash(expected string, h hash.Hash) error {
hash := hex.EncodeToString(h.Sum(nil))
if expected != hash {
return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", expected, hash))
}
return nil
}
func (session *OcisSession) removeNode(ctx context.Context) {
n, err := session.Node(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Str("session", session.ID()).Err(err).Msg("getting node from session failed")
return
}
if err := n.Purge(ctx); err != nil {
appctx.GetLogger(ctx).Error().Str("nodepath", n.InternalPath()).Err(err).Msg("purging node failed")
}
}
// cleanup cleans up after the upload is finished
func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool) {
ctx := session.Context(context.Background())
if revertNodeMetadata {
n, err := session.Node(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("sessionid", session.ID()).Msg("reading node for session failed")
} else {
if session.NodeExists() && session.info.MetaData["versionsPath"] != "" {
p := session.info.MetaData["versionsPath"]
if err := session.store.lu.CopyMetadata(ctx, p, n.InternalPath(), func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr ||
attributeName == prefixes.MTimeAttr
}, true); err != nil {
appctx.GetLogger(ctx).Info().Str("versionpath", p).Str("nodepath", n.InternalPath()).Err(err).Msg("renaming version node failed")
}
if err := os.RemoveAll(p); err != nil {
appctx.GetLogger(ctx).Info().Str("versionpath", p).Str("nodepath", n.InternalPath()).Err(err).Msg("error removing version")
}
} else {
// if no other upload session is in progress (processing id != session id) or has finished (processing id == "")
latestSession, err := n.ProcessingID(ctx)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("uploadid", session.ID()).Msg("reading processingid for session failed")
}
if latestSession == session.ID() {
// actually delete the node
session.removeNode(ctx)
}
// FIXME else if the upload has become a revision, delete the revision, or if it is the last one, delete the node
}
}
}
if cleanBin {
if err := os.Remove(session.binPath()); err != nil && !errors.Is(err, fs.ErrNotExist) {
appctx.GetLogger(ctx).Error().Str("path", session.binPath()).Err(err).Msg("removing upload failed")
}
}
if cleanInfo {
if err := session.Purge(ctx); err != nil && !errors.Is(err, fs.ErrNotExist) {
appctx.GetLogger(ctx).Error().Err(err).Str("session", session.ID()).Msg("removing upload info failed")
}
}
}
// URL returns a url to download an upload
func (session *OcisSession) URL(_ context.Context) (string, error) {
type transferClaims struct {
jwt.RegisteredClaims
Target string `json:"target"`
}
u := joinurl(session.store.tknopts.DownloadEndpoint, "tus/", session.ID())
ttl := time.Duration(session.store.tknopts.TransferExpires) * time.Second
claims := transferClaims{
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(ttl)),
Audience: jwt.ClaimStrings{"reva"},
IssuedAt: jwt.NewNumericDate(time.Now()),
},
Target: u,
}
t := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), claims)
tkn, err := t.SignedString([]byte(session.store.tknopts.TransferSharedSecret))
if err != nil {
return "", errors.Wrapf(err, "error signing token with claims %+v", claims)
}
return joinurl(session.store.tknopts.DataGatewayEndpoint, tkn), nil
}
// replace with url.JoinPath after switching to go1.19
func joinurl(paths ...string) string {
var s strings.Builder
l := len(paths)
for i, p := range paths {
s.WriteString(p)
if !strings.HasSuffix(p, "/") && i != l-1 {
s.WriteString("/")
}
}
return s.String()
}

View File

@@ -0,0 +1,56 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package usermapper
import (
"context"
)
// Mapper is the interface that wraps the basic mapping methods
type Mapper interface {
RunInBaseScope(f func() error) error
ScopeBase() (func() error, error)
ScopeUser(ctx context.Context) (func() error, error)
ScopeUserByIds(uid, gid int) (func() error, error)
}
// UnscopeFunc is a function that unscopes the current user
type UnscopeFunc func() error
// NullMapper is a user mapper that does nothing
type NullMapper struct{}
// RunInBaseScope runs the given function in the scope of the base user
func (nm *NullMapper) RunInBaseScope(f func() error) error {
return f()
}
// ScopeBase returns to the base uid and gid returning a function that can be used to restore the previous scope
func (nm *NullMapper) ScopeBase() (func() error, error) {
return func() error { return nil }, nil
}
// ScopeUser returns to the base uid and gid returning a function that can be used to restore the previous scope
func (nm *NullMapper) ScopeUser(ctx context.Context) (func() error, error) {
return func() error { return nil }, nil
}
func (nm *NullMapper) ScopeUserByIds(uid, gid int) (func() error, error) {
return func() error { return nil }, nil
}

View File

@@ -0,0 +1,131 @@
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package usermapper
import (
"context"
"fmt"
"os/user"
"runtime"
"strconv"
"golang.org/x/sys/unix"
revactx "github.com/opencloud-eu/reva/v2/pkg/ctx"
)
// UnixMapper is a user mapper that maps users to unix uids and gids
type UnixMapper struct {
baseUid int
baseGid int
}
// New returns a new user mapper
func NewUnixMapper() *UnixMapper {
baseUid, _ := unix.SetfsuidRetUid(-1)
baseGid, _ := unix.SetfsgidRetGid(-1)
return &UnixMapper{
baseUid: baseUid,
baseGid: baseGid,
}
}
// RunInUserScope runs the given function in the scope of the base user
func (um *UnixMapper) RunInBaseScope(f func() error) error {
unscope, err := um.ScopeBase()
if err != nil {
return err
}
defer func() { _ = unscope() }()
return f()
}
// ScopeBase returns to the base uid and gid returning a function that can be used to restore the previous scope
func (um *UnixMapper) ScopeBase() (func() error, error) {
return um.ScopeUserByIds(-1, um.baseGid)
}
// ScopeUser returns to the base uid and gid returning a function that can be used to restore the previous scope
func (um *UnixMapper) ScopeUser(ctx context.Context) (func() error, error) {
u := revactx.ContextMustGetUser(ctx)
uid, gid, err := um.mapUser(u.Username)
if err != nil {
return nil, err
}
return um.ScopeUserByIds(uid, gid)
}
// ScopeUserByIds scopes the current user to the given uid and gid returning a function that can be used to restore the previous scope
func (um *UnixMapper) ScopeUserByIds(uid, gid int) (func() error, error) {
runtime.LockOSThread() // Lock this Goroutine to the current OS thread
var err error
var prevUid int
var prevGid int
if uid >= 0 {
prevUid, err = unix.SetfsuidRetUid(uid)
if err != nil {
return nil, err
}
if testUid, _ := unix.SetfsuidRetUid(-1); testUid != uid {
return nil, fmt.Errorf("failed to setfsuid to %d", uid)
}
}
if gid >= 0 {
prevGid, err = unix.SetfsgidRetGid(gid)
if err != nil {
return nil, err
}
if testGid, _ := unix.SetfsgidRetGid(-1); testGid != gid {
return nil, fmt.Errorf("failed to setfsgid to %d", gid)
}
}
return func() error {
if uid >= 0 {
_ = unix.Setfsuid(prevUid)
}
if gid >= 0 {
_ = unix.Setfsgid(prevGid)
}
runtime.UnlockOSThread()
return nil
}, nil
}
func (u *UnixMapper) mapUser(username string) (int, int, error) {
userDetails, err := user.Lookup(username)
if err != nil {
return 0, 0, err
}
uid, err := strconv.Atoi(userDetails.Uid)
if err != nil {
return 0, 0, err
}
gid, err := strconv.Atoi(userDetails.Gid)
if err != nil {
return 0, 0, err
}
return uid, gid, nil
}

23
vendor/modules.txt vendored
View File

@@ -1202,7 +1202,7 @@ github.com/open-policy-agent/opa/types
github.com/open-policy-agent/opa/util
github.com/open-policy-agent/opa/util/decoding
github.com/open-policy-agent/opa/version
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250121094357-24f23b6a27ed
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206
## explicit; go 1.22.7
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
@@ -1488,6 +1488,10 @@ github.com/opencloud-eu/reva/v2/pkg/storage/favorite/loader
github.com/opencloud-eu/reva/v2/pkg/storage/favorite/memory
github.com/opencloud-eu/reva/v2/pkg/storage/favorite/registry
github.com/opencloud-eu/reva/v2/pkg/storage/fs/cephfs
github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed
github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed/blobstore
github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3
github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3/blobstore
github.com/opencloud-eu/reva/v2/pkg/storage/fs/eos
github.com/opencloud-eu/reva/v2/pkg/storage/fs/eosgrpc
github.com/opencloud-eu/reva/v2/pkg/storage/fs/eosgrpchome
@@ -1512,6 +1516,23 @@ github.com/opencloud-eu/reva/v2/pkg/storage/fs/registry
github.com/opencloud-eu/reva/v2/pkg/storage/fs/s3
github.com/opencloud-eu/reva/v2/pkg/storage/fs/s3ng
github.com/opencloud-eu/reva/v2/pkg/storage/fs/s3ng/blobstore
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/aspects
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/mtimesyncedcache
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaceidindex
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/timemanager
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/trashbin
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/propagator
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper
github.com/opencloud-eu/reva/v2/pkg/storage/registry/loader
github.com/opencloud-eu/reva/v2/pkg/storage/registry/registry
github.com/opencloud-eu/reva/v2/pkg/storage/registry/spaces