diff --git a/go.mod b/go.mod index 5e9ba1c64..072bf70c3 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,7 @@ require ( github.com/onsi/ginkgo/v2 v2.23.3 github.com/onsi/gomega v1.37.0 github.com/open-policy-agent/opa v1.3.0 - github.com/opencloud-eu/reva/v2 v2.29.1 + github.com/opencloud-eu/reva/v2 v2.31.0 github.com/orcaman/concurrent-map v1.0.0 github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea github.com/pkg/errors v0.9.1 @@ -82,7 +82,7 @@ require ( github.com/test-go/testify v1.1.4 github.com/thejerf/suture/v4 v4.0.6 github.com/tidwall/gjson v1.18.0 - github.com/tus/tusd/v2 v2.7.1 + github.com/tus/tusd/v2 v2.8.0 github.com/unrolled/secure v1.16.0 github.com/urfave/cli/v2 v2.27.6 github.com/xhit/go-simple-mail/v2 v2.16.0 @@ -105,7 +105,7 @@ require ( golang.org/x/term v0.30.0 golang.org/x/text v0.23.0 google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb - google.golang.org/grpc v1.71.0 + google.golang.org/grpc v1.71.1 google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.5.2 @@ -237,7 +237,7 @@ require ( github.com/juliangruber/go-intersect v1.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/libregraph/oidc-go v1.1.0 // indirect github.com/longsleep/go-metrics v1.0.0 // indirect @@ -246,7 +246,7 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/mattn/go-sqlite3 v1.14.27 // indirect github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b // indirect github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect github.com/miekg/dns v1.1.57 // indirect @@ -254,7 +254,7 @@ require ( github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/minio-go/v7 v7.0.88 // indirect + github.com/minio/minio-go/v7 v7.0.89 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -325,7 +325,7 @@ require ( golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 1d610c476..e744d7946 100644 --- a/go.sum +++ b/go.sum @@ -689,8 +689,8 @@ github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= -github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kobergj/gowebdav v0.0.0-20250102091030-aa65266db202 h1:A1xJ2NKgiYFiaHiLl9B5yw/gUBACSs9crDykTS3GuQI= github.com/kobergj/gowebdav v0.0.0-20250102091030-aa65266db202/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/kobergj/plugins/v4/store/nats-js-kv v0.0.0-20240807130109-f62bb67e8c90 h1:pfI8Z5yavO6fU6vDGlWhZ4BgDlvj8c6xB7J57HfTPwA= @@ -768,8 +768,8 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= +github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -789,8 +789,8 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.88 h1:v8MoIJjwYxOkehp+eiLIuvXk87P2raUtoU5klrAAshs= -github.com/minio/minio-go/v7 v7.0.88/go.mod h1:33+O8h0tO7pCeCWwBVa07RhVVfB/3vS4kEX7rwYKmIg= +github.com/minio/minio-go/v7 v7.0.89 h1:hx4xV5wwTUfyv8LarhJAwNecnXpoTsj9v3f3q/ZkiJU= +github.com/minio/minio-go/v7 v7.0.89/go.mod h1:2rFnGAp02p7Dddo1Fq4S2wYOfpF0MUTSeLTRC90I204= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -865,8 +865,8 @@ github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/open-policy-agent/opa v1.3.0 h1:zVvQvQg+9+FuSRBt4LgKNzJwsWl/c85kD5jPozJTydY= github.com/open-policy-agent/opa v1.3.0/go.mod h1:t9iPNhaplD2qpiBqeudzJtEX3fKHK8zdA29oFvofAHo= -github.com/opencloud-eu/reva/v2 v2.29.1 h1:SgB2zn8d/3UWwFiJ0pUs85aDKJJ36JoKnyRM+iW+VoI= -github.com/opencloud-eu/reva/v2 v2.29.1/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM= +github.com/opencloud-eu/reva/v2 v2.31.0 h1:UVgeb0hSPoaDdqcKSJ7XZAhXCtHaVK9qm/JtFtJM/7U= +github.com/opencloud-eu/reva/v2 v2.31.0/go.mod h1:8MT1a/WJASZZhlSMC0oeE3ECQdjqFw3BUiiAIZ/JR8I= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1098,8 +1098,8 @@ github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXz github.com/transip/gotransip/v6 v6.2.0/go.mod h1:pQZ36hWWRahCUXkFWlx9Hs711gLd8J4qdgLdRzmtY+g= github.com/trustelem/zxcvbn v1.0.1 h1:mp4JFtzdDYGj9WYSD3KQSkwwUumWNFzXaAjckaTYpsc= github.com/trustelem/zxcvbn v1.0.1/go.mod h1:zonUyKeh7sw6psPf/e3DtRqkRyZvAbOfjNz/aO7YQ5s= -github.com/tus/tusd/v2 v2.7.1 h1:TGJjhv9RYXDmsTz8ug/qSd9vQpmD0Ik0G0IPo80Qmc0= -github.com/tus/tusd/v2 v2.7.1/go.mod h1:PLdIMQ/ge+5ADgGKcL3FgTaPs+7wB0JIiI5HQXAiJE8= +github.com/tus/tusd/v2 v2.8.0 h1:X2jGxQ05jAW4inDd2ogmOKqwnb4c/D0lw2yhgHayWyU= +github.com/tus/tusd/v2 v2.8.0/go.mod h1:3/zEOVQQIwmJhvNam8phV4x/UQt68ZmZiTzeuJUNhVo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= @@ -1599,8 +1599,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= +google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= @@ -1620,8 +1620,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= +google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e h1:m7aQHHqd0q89mRwhwS9Bx2rjyl/hsFAeta+uGrHsQaU= google.golang.org/grpc/examples v0.0.0-20211102180624-670c133e568e/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml index 944cc0007..1b695b62c 100644 --- a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -1,5 +1,4 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 builds: - @@ -27,16 +26,7 @@ builds: archives: - id: cpuid - name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD + name_template: "cpuid-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows format: zip @@ -44,8 +34,6 @@ archives: - LICENSE checksum: name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" changelog: sort: asc filters: @@ -58,7 +46,7 @@ changelog: nfpms: - - file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + file_name_template: "cpuid_package_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" vendor: Klaus Post homepage: https://github.com/klauspost/cpuid maintainer: Klaus Post @@ -67,8 +55,3 @@ nfpms: formats: - deb - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index f06ba51c5..e59d3d0c0 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -282,7 +282,9 @@ Exit Code 1 | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | | AMXFP8 | Tile computational operations on FP8 numbers | +| AMXCOMPLEX | Tile computational operations on complex numbers | | AMXTILE | Tile architecture | +| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile | | APX_F | Intel APX | | AVX | AVX functions | | AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | @@ -480,12 +482,16 @@ Exit Code 1 | DCPOP | Data cache clean to Point of Persistence (DC CVAP) | | EVTSTRM | Generic timer | | FCMA | Floatin point complex number addition and multiplication | +| FHM | FMLAL and FMLSL instructions | | FP | Single-precision and double-precision floating point | | FPHP | Half-precision floating point | | GPA | Generic Pointer Authentication | | JSCVT | Javascript-style double->int convert (FJCVTZS) | | LRCPC | Weaker release consistency (LDAPR, etc) | | PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| RNDR | Random Number instructions | +| TLB | Outer Shareable and TLB range maintenance instructions | +| TS | Flag manipulation instructions | | SHA1 | SHA-1 instructions (SHA1C, etc) | | SHA2 | SHA-2 instructions (SHA256H, etc) | | SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index db99eb62f..8103fb343 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -83,6 +83,8 @@ const ( AMXINT8 // Tile computational operations on 8-bit integers AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture + AMXTF32 // Tile architecture + AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile APX_F // Intel APX AVX // AVX functions AVX10 // If set the Intel AVX10 Converged Vector ISA is supported @@ -282,12 +284,16 @@ const ( DCPOP // Data cache clean to Point of Persistence (DC CVAP) EVTSTRM // Generic timer FCMA // Floatin point complex number addition and multiplication + FHM // FMLAL and FMLSL instructions FP // Single-precision and double-precision floating point FPHP // Half-precision floating point GPA // Generic Pointer Authentication JSCVT // Javascript-style double->int convert (FJCVTZS) LRCPC // Weaker release consistency (LDAPR, etc) PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + RNDR // Random Number instructions + TLB // Outer Shareable and TLB range maintenance instructions + TS // Flag manipulation instructions SHA1 // SHA-1 instructions (SHA1C, etc) SHA2 // SHA-2 instructions (SHA256H, etc) SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) @@ -532,7 +538,7 @@ func (c CPUInfo) Ia32TscAux() uint32 { return ecx } -// SveLengths returns arm SVE vector and predicate lengths. +// SveLengths returns arm SVE vector and predicate lengths in bits. // Will return 0, 0 if SVE is not enabled or otherwise unable to detect. func (c CPUInfo) SveLengths() (vl, pl uint64) { if !c.Has(SVE) { @@ -1284,6 +1290,8 @@ func support() flagSet { // CPUID.(EAX=7, ECX=1).EDX fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<7) != 0, AMXTF32) + fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX) fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) fs.setIf(edx1&(1<<14) != 0, PREFETCHI) fs.setIf(edx1&(1<<19) != 0, AVX10) diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go index 566743d22..9ae32d607 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -157,6 +157,10 @@ func addInfo(c *CPUInfo, safe bool) { // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| + // | RNDR | [63-60] | y | + // |--------------------------------------------------| + // | TLB | [59-56] | y | + // |--------------------------------------------------| // | TS | [55-52] | y | // |--------------------------------------------------| // | FHM | [51-48] | y | @@ -182,12 +186,10 @@ func addInfo(c *CPUInfo, safe bool) { // | AES | [7-4] | y | // x--------------------------------------------------x - // if instAttrReg0&(0xf<<52) != 0 { - // fmt.Println("TS") - // } - // if instAttrReg0&(0xf<<48) != 0 { - // fmt.Println("FHM") - // } + f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR) + f.setIf(instAttrReg0&(0xf<<56) != 0, TLB) + f.setIf(instAttrReg0&(0xf<<52) != 0, TS) + f.setIf(instAttrReg0&(0xf<<48) != 0, FHM) f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index e7f874a7e..04760c1af 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -17,223 +17,229 @@ func _() { _ = x[AMXINT8-7] _ = x[AMXFP8-8] _ = x[AMXTILE-9] - _ = x[APX_F-10] - _ = x[AVX-11] - _ = x[AVX10-12] - _ = x[AVX10_128-13] - _ = x[AVX10_256-14] - _ = x[AVX10_512-15] - _ = x[AVX2-16] - _ = x[AVX512BF16-17] - _ = x[AVX512BITALG-18] - _ = x[AVX512BW-19] - _ = x[AVX512CD-20] - _ = x[AVX512DQ-21] - _ = x[AVX512ER-22] - _ = x[AVX512F-23] - _ = x[AVX512FP16-24] - _ = x[AVX512IFMA-25] - _ = x[AVX512PF-26] - _ = x[AVX512VBMI-27] - _ = x[AVX512VBMI2-28] - _ = x[AVX512VL-29] - _ = x[AVX512VNNI-30] - _ = x[AVX512VP2INTERSECT-31] - _ = x[AVX512VPOPCNTDQ-32] - _ = x[AVXIFMA-33] - _ = x[AVXNECONVERT-34] - _ = x[AVXSLOW-35] - _ = x[AVXVNNI-36] - _ = x[AVXVNNIINT8-37] - _ = x[AVXVNNIINT16-38] - _ = x[BHI_CTRL-39] - _ = x[BMI1-40] - _ = x[BMI2-41] - _ = x[CETIBT-42] - _ = x[CETSS-43] - _ = x[CLDEMOTE-44] - _ = x[CLMUL-45] - _ = x[CLZERO-46] - _ = x[CMOV-47] - _ = x[CMPCCXADD-48] - _ = x[CMPSB_SCADBS_SHORT-49] - _ = x[CMPXCHG8-50] - _ = x[CPBOOST-51] - _ = x[CPPC-52] - _ = x[CX16-53] - _ = x[EFER_LMSLE_UNS-54] - _ = x[ENQCMD-55] - _ = x[ERMS-56] - _ = x[F16C-57] - _ = x[FLUSH_L1D-58] - _ = x[FMA3-59] - _ = x[FMA4-60] - _ = x[FP128-61] - _ = x[FP256-62] - _ = x[FSRM-63] - _ = x[FXSR-64] - _ = x[FXSROPT-65] - _ = x[GFNI-66] - _ = x[HLE-67] - _ = x[HRESET-68] - _ = x[HTT-69] - _ = x[HWA-70] - _ = x[HYBRID_CPU-71] - _ = x[HYPERVISOR-72] - _ = x[IA32_ARCH_CAP-73] - _ = x[IA32_CORE_CAP-74] - _ = x[IBPB-75] - _ = x[IBPB_BRTYPE-76] - _ = x[IBRS-77] - _ = x[IBRS_PREFERRED-78] - _ = x[IBRS_PROVIDES_SMP-79] - _ = x[IBS-80] - _ = x[IBSBRNTRGT-81] - _ = x[IBSFETCHSAM-82] - _ = x[IBSFFV-83] - _ = x[IBSOPCNT-84] - _ = x[IBSOPCNTEXT-85] - _ = x[IBSOPSAM-86] - _ = x[IBSRDWROPCNT-87] - _ = x[IBSRIPINVALIDCHK-88] - _ = x[IBS_FETCH_CTLX-89] - _ = x[IBS_OPDATA4-90] - _ = x[IBS_OPFUSE-91] - _ = x[IBS_PREVENTHOST-92] - _ = x[IBS_ZEN4-93] - _ = x[IDPRED_CTRL-94] - _ = x[INT_WBINVD-95] - _ = x[INVLPGB-96] - _ = x[KEYLOCKER-97] - _ = x[KEYLOCKERW-98] - _ = x[LAHF-99] - _ = x[LAM-100] - _ = x[LBRVIRT-101] - _ = x[LZCNT-102] - _ = x[MCAOVERFLOW-103] - _ = x[MCDT_NO-104] - _ = x[MCOMMIT-105] - _ = x[MD_CLEAR-106] - _ = x[MMX-107] - _ = x[MMXEXT-108] - _ = x[MOVBE-109] - _ = x[MOVDIR64B-110] - _ = x[MOVDIRI-111] - _ = x[MOVSB_ZL-112] - _ = x[MOVU-113] - _ = x[MPX-114] - _ = x[MSRIRC-115] - _ = x[MSRLIST-116] - _ = x[MSR_PAGEFLUSH-117] - _ = x[NRIPS-118] - _ = x[NX-119] - _ = x[OSXSAVE-120] - _ = x[PCONFIG-121] - _ = x[POPCNT-122] - _ = x[PPIN-123] - _ = x[PREFETCHI-124] - _ = x[PSFD-125] - _ = x[RDPRU-126] - _ = x[RDRAND-127] - _ = x[RDSEED-128] - _ = x[RDTSCP-129] - _ = x[RRSBA_CTRL-130] - _ = x[RTM-131] - _ = x[RTM_ALWAYS_ABORT-132] - _ = x[SBPB-133] - _ = x[SERIALIZE-134] - _ = x[SEV-135] - _ = x[SEV_64BIT-136] - _ = x[SEV_ALTERNATIVE-137] - _ = x[SEV_DEBUGSWAP-138] - _ = x[SEV_ES-139] - _ = x[SEV_RESTRICTED-140] - _ = x[SEV_SNP-141] - _ = x[SGX-142] - _ = x[SGXLC-143] - _ = x[SHA-144] - _ = x[SME-145] - _ = x[SME_COHERENT-146] - _ = x[SPEC_CTRL_SSBD-147] - _ = x[SRBDS_CTRL-148] - _ = x[SRSO_MSR_FIX-149] - _ = x[SRSO_NO-150] - _ = x[SRSO_USER_KERNEL_NO-151] - _ = x[SSE-152] - _ = x[SSE2-153] - _ = x[SSE3-154] - _ = x[SSE4-155] - _ = x[SSE42-156] - _ = x[SSE4A-157] - _ = x[SSSE3-158] - _ = x[STIBP-159] - _ = x[STIBP_ALWAYSON-160] - _ = x[STOSB_SHORT-161] - _ = x[SUCCOR-162] - _ = x[SVM-163] - _ = x[SVMDA-164] - _ = x[SVMFBASID-165] - _ = x[SVML-166] - _ = x[SVMNP-167] - _ = x[SVMPF-168] - _ = x[SVMPFT-169] - _ = x[SYSCALL-170] - _ = x[SYSEE-171] - _ = x[TBM-172] - _ = x[TDX_GUEST-173] - _ = x[TLB_FLUSH_NESTED-174] - _ = x[TME-175] - _ = x[TOPEXT-176] - _ = x[TSCRATEMSR-177] - _ = x[TSXLDTRK-178] - _ = x[VAES-179] - _ = x[VMCBCLEAN-180] - _ = x[VMPL-181] - _ = x[VMSA_REGPROT-182] - _ = x[VMX-183] - _ = x[VPCLMULQDQ-184] - _ = x[VTE-185] - _ = x[WAITPKG-186] - _ = x[WBNOINVD-187] - _ = x[WRMSRNS-188] - _ = x[X87-189] - _ = x[XGETBV1-190] - _ = x[XOP-191] - _ = x[XSAVE-192] - _ = x[XSAVEC-193] - _ = x[XSAVEOPT-194] - _ = x[XSAVES-195] - _ = x[AESARM-196] - _ = x[ARMCPUID-197] - _ = x[ASIMD-198] - _ = x[ASIMDDP-199] - _ = x[ASIMDHP-200] - _ = x[ASIMDRDM-201] - _ = x[ATOMICS-202] - _ = x[CRC32-203] - _ = x[DCPOP-204] - _ = x[EVTSTRM-205] - _ = x[FCMA-206] - _ = x[FP-207] - _ = x[FPHP-208] - _ = x[GPA-209] - _ = x[JSCVT-210] - _ = x[LRCPC-211] - _ = x[PMULL-212] - _ = x[SHA1-213] - _ = x[SHA2-214] - _ = x[SHA3-215] - _ = x[SHA512-216] - _ = x[SM3-217] - _ = x[SM4-218] - _ = x[SVE-219] - _ = x[lastID-220] + _ = x[AMXTF32-10] + _ = x[AMXCOMPLEX-11] + _ = x[APX_F-12] + _ = x[AVX-13] + _ = x[AVX10-14] + _ = x[AVX10_128-15] + _ = x[AVX10_256-16] + _ = x[AVX10_512-17] + _ = x[AVX2-18] + _ = x[AVX512BF16-19] + _ = x[AVX512BITALG-20] + _ = x[AVX512BW-21] + _ = x[AVX512CD-22] + _ = x[AVX512DQ-23] + _ = x[AVX512ER-24] + _ = x[AVX512F-25] + _ = x[AVX512FP16-26] + _ = x[AVX512IFMA-27] + _ = x[AVX512PF-28] + _ = x[AVX512VBMI-29] + _ = x[AVX512VBMI2-30] + _ = x[AVX512VL-31] + _ = x[AVX512VNNI-32] + _ = x[AVX512VP2INTERSECT-33] + _ = x[AVX512VPOPCNTDQ-34] + _ = x[AVXIFMA-35] + _ = x[AVXNECONVERT-36] + _ = x[AVXSLOW-37] + _ = x[AVXVNNI-38] + _ = x[AVXVNNIINT8-39] + _ = x[AVXVNNIINT16-40] + _ = x[BHI_CTRL-41] + _ = x[BMI1-42] + _ = x[BMI2-43] + _ = x[CETIBT-44] + _ = x[CETSS-45] + _ = x[CLDEMOTE-46] + _ = x[CLMUL-47] + _ = x[CLZERO-48] + _ = x[CMOV-49] + _ = x[CMPCCXADD-50] + _ = x[CMPSB_SCADBS_SHORT-51] + _ = x[CMPXCHG8-52] + _ = x[CPBOOST-53] + _ = x[CPPC-54] + _ = x[CX16-55] + _ = x[EFER_LMSLE_UNS-56] + _ = x[ENQCMD-57] + _ = x[ERMS-58] + _ = x[F16C-59] + _ = x[FLUSH_L1D-60] + _ = x[FMA3-61] + _ = x[FMA4-62] + _ = x[FP128-63] + _ = x[FP256-64] + _ = x[FSRM-65] + _ = x[FXSR-66] + _ = x[FXSROPT-67] + _ = x[GFNI-68] + _ = x[HLE-69] + _ = x[HRESET-70] + _ = x[HTT-71] + _ = x[HWA-72] + _ = x[HYBRID_CPU-73] + _ = x[HYPERVISOR-74] + _ = x[IA32_ARCH_CAP-75] + _ = x[IA32_CORE_CAP-76] + _ = x[IBPB-77] + _ = x[IBPB_BRTYPE-78] + _ = x[IBRS-79] + _ = x[IBRS_PREFERRED-80] + _ = x[IBRS_PROVIDES_SMP-81] + _ = x[IBS-82] + _ = x[IBSBRNTRGT-83] + _ = x[IBSFETCHSAM-84] + _ = x[IBSFFV-85] + _ = x[IBSOPCNT-86] + _ = x[IBSOPCNTEXT-87] + _ = x[IBSOPSAM-88] + _ = x[IBSRDWROPCNT-89] + _ = x[IBSRIPINVALIDCHK-90] + _ = x[IBS_FETCH_CTLX-91] + _ = x[IBS_OPDATA4-92] + _ = x[IBS_OPFUSE-93] + _ = x[IBS_PREVENTHOST-94] + _ = x[IBS_ZEN4-95] + _ = x[IDPRED_CTRL-96] + _ = x[INT_WBINVD-97] + _ = x[INVLPGB-98] + _ = x[KEYLOCKER-99] + _ = x[KEYLOCKERW-100] + _ = x[LAHF-101] + _ = x[LAM-102] + _ = x[LBRVIRT-103] + _ = x[LZCNT-104] + _ = x[MCAOVERFLOW-105] + _ = x[MCDT_NO-106] + _ = x[MCOMMIT-107] + _ = x[MD_CLEAR-108] + _ = x[MMX-109] + _ = x[MMXEXT-110] + _ = x[MOVBE-111] + _ = x[MOVDIR64B-112] + _ = x[MOVDIRI-113] + _ = x[MOVSB_ZL-114] + _ = x[MOVU-115] + _ = x[MPX-116] + _ = x[MSRIRC-117] + _ = x[MSRLIST-118] + _ = x[MSR_PAGEFLUSH-119] + _ = x[NRIPS-120] + _ = x[NX-121] + _ = x[OSXSAVE-122] + _ = x[PCONFIG-123] + _ = x[POPCNT-124] + _ = x[PPIN-125] + _ = x[PREFETCHI-126] + _ = x[PSFD-127] + _ = x[RDPRU-128] + _ = x[RDRAND-129] + _ = x[RDSEED-130] + _ = x[RDTSCP-131] + _ = x[RRSBA_CTRL-132] + _ = x[RTM-133] + _ = x[RTM_ALWAYS_ABORT-134] + _ = x[SBPB-135] + _ = x[SERIALIZE-136] + _ = x[SEV-137] + _ = x[SEV_64BIT-138] + _ = x[SEV_ALTERNATIVE-139] + _ = x[SEV_DEBUGSWAP-140] + _ = x[SEV_ES-141] + _ = x[SEV_RESTRICTED-142] + _ = x[SEV_SNP-143] + _ = x[SGX-144] + _ = x[SGXLC-145] + _ = x[SHA-146] + _ = x[SME-147] + _ = x[SME_COHERENT-148] + _ = x[SPEC_CTRL_SSBD-149] + _ = x[SRBDS_CTRL-150] + _ = x[SRSO_MSR_FIX-151] + _ = x[SRSO_NO-152] + _ = x[SRSO_USER_KERNEL_NO-153] + _ = x[SSE-154] + _ = x[SSE2-155] + _ = x[SSE3-156] + _ = x[SSE4-157] + _ = x[SSE42-158] + _ = x[SSE4A-159] + _ = x[SSSE3-160] + _ = x[STIBP-161] + _ = x[STIBP_ALWAYSON-162] + _ = x[STOSB_SHORT-163] + _ = x[SUCCOR-164] + _ = x[SVM-165] + _ = x[SVMDA-166] + _ = x[SVMFBASID-167] + _ = x[SVML-168] + _ = x[SVMNP-169] + _ = x[SVMPF-170] + _ = x[SVMPFT-171] + _ = x[SYSCALL-172] + _ = x[SYSEE-173] + _ = x[TBM-174] + _ = x[TDX_GUEST-175] + _ = x[TLB_FLUSH_NESTED-176] + _ = x[TME-177] + _ = x[TOPEXT-178] + _ = x[TSCRATEMSR-179] + _ = x[TSXLDTRK-180] + _ = x[VAES-181] + _ = x[VMCBCLEAN-182] + _ = x[VMPL-183] + _ = x[VMSA_REGPROT-184] + _ = x[VMX-185] + _ = x[VPCLMULQDQ-186] + _ = x[VTE-187] + _ = x[WAITPKG-188] + _ = x[WBNOINVD-189] + _ = x[WRMSRNS-190] + _ = x[X87-191] + _ = x[XGETBV1-192] + _ = x[XOP-193] + _ = x[XSAVE-194] + _ = x[XSAVEC-195] + _ = x[XSAVEOPT-196] + _ = x[XSAVES-197] + _ = x[AESARM-198] + _ = x[ARMCPUID-199] + _ = x[ASIMD-200] + _ = x[ASIMDDP-201] + _ = x[ASIMDHP-202] + _ = x[ASIMDRDM-203] + _ = x[ATOMICS-204] + _ = x[CRC32-205] + _ = x[DCPOP-206] + _ = x[EVTSTRM-207] + _ = x[FCMA-208] + _ = x[FHM-209] + _ = x[FP-210] + _ = x[FPHP-211] + _ = x[GPA-212] + _ = x[JSCVT-213] + _ = x[LRCPC-214] + _ = x[PMULL-215] + _ = x[RNDR-216] + _ = x[TLB-217] + _ = x[TS-218] + _ = x[SHA1-219] + _ = x[SHA2-220] + _ = x[SHA3-221] + _ = x[SHA512-222] + _ = x[SM3-223] + _ = x[SM4-224] + _ = x[SVE-225] + _ = x[lastID-226] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 90, 93, 98, 107, 116, 125, 129, 139, 151, 159, 167, 175, 183, 190, 200, 210, 218, 228, 239, 247, 257, 275, 290, 297, 309, 316, 323, 334, 346, 354, 358, 362, 368, 373, 381, 386, 392, 396, 405, 423, 431, 438, 442, 446, 460, 466, 470, 474, 483, 487, 491, 496, 501, 505, 509, 516, 520, 523, 529, 532, 535, 545, 555, 568, 581, 585, 596, 600, 614, 631, 634, 644, 655, 661, 669, 680, 688, 700, 716, 730, 741, 751, 766, 774, 785, 795, 802, 811, 821, 825, 828, 835, 840, 851, 858, 865, 873, 876, 882, 887, 896, 903, 911, 915, 918, 924, 931, 944, 949, 951, 958, 965, 971, 975, 984, 988, 993, 999, 1005, 1011, 1021, 1024, 1040, 1044, 1053, 1056, 1065, 1080, 1093, 1099, 1113, 1120, 1123, 1128, 1131, 1134, 1146, 1160, 1170, 1182, 1189, 1208, 1211, 1215, 1219, 1223, 1228, 1233, 1238, 1243, 1257, 1268, 1274, 1277, 1282, 1291, 1295, 1300, 1305, 1311, 1318, 1323, 1326, 1335, 1351, 1354, 1360, 1370, 1378, 1382, 1391, 1395, 1407, 1410, 1420, 1423, 1430, 1438, 1445, 1448, 1455, 1458, 1463, 1469, 1477, 1483, 1489, 1497, 1502, 1509, 1516, 1524, 1531, 1536, 1541, 1548, 1552, 1555, 1557, 1561, 1564, 1569, 1574, 1579, 1583, 1586, 1588, 1592, 1596, 1600, 1606, 1609, 1612, 1615, 1621} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go index 84b1acd21..6f0b33ca6 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -96,9 +96,11 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP) // setFeature(c, "", EVTSTRM) setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA) + setFeature(c, "hw.optional.arm.FEAT_FHM", FHM) setFeature(c, "hw.optional.arm.FEAT_FP", FP) setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP) setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA) + setFeature(c, "hw.optional.arm.FEAT_RNG", RNDR) setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT) setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC) setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL) @@ -106,6 +108,10 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) { setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2) setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3) setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512) + setFeature(c, "hw.optional.arm.FEAT_TLBIOS", TLB) + setFeature(c, "hw.optional.arm.FEAT_TLBIRANGE", TLB) + setFeature(c, "hw.optional.arm.FEAT_FlagM", TS) + setFeature(c, "hw.optional.arm.FEAT_FlagM2", TS) // setFeature(c, "", SM3) // setFeature(c, "", SM4) setFeature(c, "hw.optional.arm.FEAT_SVE", SVE) diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go index ee278b9e4..d96d24438 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -39,6 +39,80 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + hwcap_USCAT = 1 << 25 + hwcap_ILRCPC = 1 << 26 + hwcap_FLAGM = 1 << 27 + hwcap_SSBS = 1 << 28 + hwcap_SB = 1 << 29 + hwcap_PACA = 1 << 30 + hwcap_PACG = 1 << 31 + hwcap_GCS = 1 << 32 + + hwcap2_DCPODP = 1 << 0 + hwcap2_SVE2 = 1 << 1 + hwcap2_SVEAES = 1 << 2 + hwcap2_SVEPMULL = 1 << 3 + hwcap2_SVEBITPERM = 1 << 4 + hwcap2_SVESHA3 = 1 << 5 + hwcap2_SVESM4 = 1 << 6 + hwcap2_FLAGM2 = 1 << 7 + hwcap2_FRINT = 1 << 8 + hwcap2_SVEI8MM = 1 << 9 + hwcap2_SVEF32MM = 1 << 10 + hwcap2_SVEF64MM = 1 << 11 + hwcap2_SVEBF16 = 1 << 12 + hwcap2_I8MM = 1 << 13 + hwcap2_BF16 = 1 << 14 + hwcap2_DGH = 1 << 15 + hwcap2_RNG = 1 << 16 + hwcap2_BTI = 1 << 17 + hwcap2_MTE = 1 << 18 + hwcap2_ECV = 1 << 19 + hwcap2_AFP = 1 << 20 + hwcap2_RPRES = 1 << 21 + hwcap2_MTE3 = 1 << 22 + hwcap2_SME = 1 << 23 + hwcap2_SME_I16I64 = 1 << 24 + hwcap2_SME_F64F64 = 1 << 25 + hwcap2_SME_I8I32 = 1 << 26 + hwcap2_SME_F16F32 = 1 << 27 + hwcap2_SME_B16F32 = 1 << 28 + hwcap2_SME_F32F32 = 1 << 29 + hwcap2_SME_FA64 = 1 << 30 + hwcap2_WFXT = 1 << 31 + hwcap2_EBF16 = 1 << 32 + hwcap2_SVE_EBF16 = 1 << 33 + hwcap2_CSSC = 1 << 34 + hwcap2_RPRFM = 1 << 35 + hwcap2_SVE2P1 = 1 << 36 + hwcap2_SME2 = 1 << 37 + hwcap2_SME2P1 = 1 << 38 + hwcap2_SME_I16I32 = 1 << 39 + hwcap2_SME_BI32I32 = 1 << 40 + hwcap2_SME_B16B16 = 1 << 41 + hwcap2_SME_F16F16 = 1 << 42 + hwcap2_MOPS = 1 << 43 + hwcap2_HBC = 1 << 44 + hwcap2_SVE_B16B16 = 1 << 45 + hwcap2_LRCPC3 = 1 << 46 + hwcap2_LSE128 = 1 << 47 + hwcap2_FPMR = 1 << 48 + hwcap2_LUT = 1 << 49 + hwcap2_FAMINMAX = 1 << 50 + hwcap2_F8CVT = 1 << 51 + hwcap2_F8FMA = 1 << 52 + hwcap2_F8DP4 = 1 << 53 + hwcap2_F8DP2 = 1 << 54 + hwcap2_F8E4M3 = 1 << 55 + hwcap2_F8E5M2 = 1 << 56 + hwcap2_SME_LUTV2 = 1 << 57 + hwcap2_SME_F8F16 = 1 << 58 + hwcap2_SME_F8F32 = 1 << 59 + hwcap2_SME_SF8FMA = 1 << 60 + hwcap2_SME_SF8DP4 = 1 << 61 + hwcap2_SME_SF8DP2 = 1 << 62 + hwcap2_POE = 1 << 63 ) func detectOS(c *CPUInfo) bool { @@ -104,11 +178,15 @@ func detectOS(c *CPUInfo) bool { c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM) c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TLB) + // c.featureSet.setIf(isSet(hwcap, hwcap_), TS) c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md index 1804a89ad..3b43b033e 100644 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -35,7 +35,7 @@ This package follows the official [Golang Release Policy](https://golang.org/doc - [Android](#android) - [ARM](#arm) - [Cross Compile](#cross-compile) -- [Google Cloud Platform](#google-cloud-platform) +- [Compiling](#compiling) - [Linux](#linux) - [Alpine](#alpine) - [Fedora](#fedora) @@ -70,7 +70,6 @@ This package can be installed with the `go get` command: _go-sqlite3_ is *cgo* package. If you want to build your app using go-sqlite3, you need gcc. -However, after you have built and installed _go-sqlite3_ with `go install github.com/mattn/go-sqlite3` (which requires gcc), you can build your app without relying on gcc in future. ***Important: because this is a `CGO` enabled package, you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compiler present within your path.*** @@ -228,11 +227,7 @@ Steps: Please refer to the project's [README](https://github.com/FiloSottile/homebrew-musl-cross#readme) for further information. -# Google Cloud Platform - -Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed. - -Please work only with compiled final binaries. +# Compiling ## Linux diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index b794bcd83..0c518fa2c 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -345,7 +345,8 @@ func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error { if v.Type().Kind() != reflect.String { return fmt.Errorf("cannot convert %s to TEXT", v.Type()) } - C._sqlite3_result_text(ctx, C.CString(v.Interface().(string))) + cstr := C.CString(v.Interface().(string)) + C._sqlite3_result_text(ctx, cstr) return nil } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index ed2a9e2a3..3025a5004 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -381,7 +381,7 @@ type SQLiteStmt struct { s *C.sqlite3_stmt t string closed bool - cls bool + cls bool // True if the statement was created by SQLiteConn.Query } // SQLiteResult implements sql.Result. @@ -393,12 +393,12 @@ type SQLiteResult struct { // SQLiteRows implements driver.Rows. type SQLiteRows struct { s *SQLiteStmt - nc int + nc int32 // Number of columns + cls bool // True if we need to close the parent statement in Close cols []string decltype []string - cls bool - closed bool ctx context.Context // no better alternative to pass context into Next() method + closemu sync.Mutex } type functionInfo struct { @@ -929,6 +929,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []driver.Name s.(*SQLiteStmt).cls = true na := s.NumInput() if len(args)-start < na { + s.Close() return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args)-start) } // consume the number of arguments used in the current @@ -2007,14 +2008,12 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive rows := &SQLiteRows{ s: s, - nc: int(C.sqlite3_column_count(s.s)), + nc: int32(C.sqlite3_column_count(s.s)), + cls: s.cls, cols: nil, decltype: nil, - cls: s.cls, - closed: false, ctx: ctx, } - runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2111,24 +2110,28 @@ func (s *SQLiteStmt) Readonly() bool { // Close the rows. func (rc *SQLiteRows) Close() error { - rc.s.mu.Lock() - if rc.s.closed || rc.closed { - rc.s.mu.Unlock() + rc.closemu.Lock() + defer rc.closemu.Unlock() + s := rc.s + if s == nil { + return nil + } + rc.s = nil // remove reference to SQLiteStmt + s.mu.Lock() + if s.closed { + s.mu.Unlock() return nil } - rc.closed = true if rc.cls { - rc.s.mu.Unlock() - return rc.s.Close() + s.mu.Unlock() + return s.Close() } - rv := C.sqlite3_reset(rc.s.s) + rv := C.sqlite3_reset(s.s) if rv != C.SQLITE_OK { - rc.s.mu.Unlock() - return rc.s.c.lastError() + s.mu.Unlock() + return s.c.lastError() } - rc.s.mu.Unlock() - rc.s = nil - runtime.SetFinalizer(rc, nil) + s.mu.Unlock() return nil } @@ -2136,9 +2139,9 @@ func (rc *SQLiteRows) Close() error { func (rc *SQLiteRows) Columns() []string { rc.s.mu.Lock() defer rc.s.mu.Unlock() - if rc.s.s != nil && rc.nc != len(rc.cols) { + if rc.s.s != nil && int(rc.nc) != len(rc.cols) { rc.cols = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i))) } } @@ -2148,7 +2151,7 @@ func (rc *SQLiteRows) Columns() []string { func (rc *SQLiteRows) declTypes() []string { if rc.s.s != nil && rc.decltype == nil { rc.decltype = make([]string, rc.nc) - for i := 0; i < rc.nc; i++ { + for i := 0; i < int(rc.nc); i++ { rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))) } } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c index fc37b336c..3a00f43de 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.c @@ -5,7 +5,11 @@ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern int unlock_notify_wait(sqlite3 *db); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index 76f7bbfb6..3ac8050a4 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -12,7 +12,11 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY #include +#ifndef USE_LIBSQLITE3 #include "sqlite3-binding.h" +#else +#include +#endif extern void unlock_notify_callback(void *arg, int argc); */ diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index 875b949c6..88442e0cf 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -1,27 +1,72 @@ -linters-settings: - misspell: - locale: US - +version: "2" linters: disable-all: true enable: - - typecheck - - goimports - - misspell - - revive + - durationcheck + - gocritic + - gomodguard - govet - ineffassign - - gosimple + - misspell + - revive + - staticcheck + - unconvert - unused - - gocritic - + - usetesting + - whitespace + settings: + misspell: + locale: US + staticcheck: + checks: + - all + - -SA1008 + - -SA1019 + - -SA4000 + - -SA9004 + - -ST1000 + - -ST1005 + - -ST1016 + - -ST1021 + - -ST1020 + - -U1000 + exclusions: + generated: lax + rules: + - path: (.+)\.go$ + text: "empty-block:" + - path: (.+)\.go$ + text: "unused-parameter:" + - path: (.+)\.go$ + text: "dot-imports:" + - path: (.+)\.go$ + text: "singleCaseSwitch: should rewrite switch statement to if statement" + - path: (.+)\.go$ + text: "unlambda: replace" + - path: (.+)\.go$ + text: "captLocal:" + - path: (.+)\.go$ + text: "should have a package comment" + - path: (.+)\.go$ + text: "ifElseChain:" + - path: (.+)\.go$ + text: "elseif:" + - path: (.+)\.go$ + text: "Error return value of" + - path: (.+)\.go$ + text: "unnecessary conversion" + - path: (.+)\.go$ + text: "Error return value is not checked" issues: - exclude-use-default: false - exclude: - # todo fix these when we get enough time. - - "singleCaseSwitch: should rewrite switch statement to if statement" - - "unlambda: replace" - - "captLocal:" - - "ifElseChain:" - - "elseif:" - - "should have a package comment" + max-issues-per-linter: 100 + max-same-issues: 100 +formatters: + enable: + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go index ad8eada4a..33811b98f 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -251,7 +251,6 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi // Close current connection before looping further. closeResponse(resp) - } }(notificationInfoCh) diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go index 8c84e4f27..045e3c38e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -90,6 +90,7 @@ type BucketVersioningConfiguration struct { // Requires versioning to be enabled ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"` ExcludeFolders bool `xml:",omitempty"` + PurgeOnDelete string `xml:",omitempty"` } // Various supported states diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go index 9041d99e9..5864f0260 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -135,16 +135,16 @@ func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { res := map[string][]string{} for _, g := range grants { - switch { - case g.Permission == "READ": + switch g.Permission { + case "READ": res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) - case g.Permission == "WRITE": + case "WRITE": res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) - case g.Permission == "READ_ACP": + case "READ_ACP": res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) - case g.Permission == "WRITE_ACP": + case "WRITE_ACP": res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) - case g.Permission == "FULL_CONTROL": + case "FULL_CONTROL": res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) } } diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index da387d7ec..26d35c4c2 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -524,7 +524,6 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts } return } - } }(resultCh) return resultCh diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 3ff3b69ef..82c0ae9e4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -350,7 +350,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Part number always starts with '1'. var partNumber int for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Proceed to upload the part. if partNumber == totalPartsCount { partSize = lastPartSize diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 523431351..5b4443ec5 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -392,10 +392,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh defer close(resultCh) // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } + for !finish { count := 0 var batch []ObjectInfo diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 5e015fb82..08a5a7b6e 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -194,7 +194,6 @@ func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (e default: return errors.New("unrecognized option:" + tagName) } - } } return nil diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go index 628d967ff..4fb4db9ba 100644 --- a/vendor/github.com/minio/minio-go/v7/api-select.go +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -609,7 +609,6 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) { closeResponse(s.resp) return } - } }() } @@ -669,7 +668,6 @@ func extractHeader(body io.Reader, myHeaders http.Header) error { } myHeaders.Set(headerTypeName, headerValueName) - } return nil } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index d7b065fdc..b64f57615 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -155,7 +155,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.88" + libraryVersion = "v7.0.89" ) // User Agent should always following the below style. @@ -598,7 +598,7 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) { // If trace is enabled, dump http request and response, // except when the traceErrorsOnly enabled and the response's status code is ok - if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { + if c.isTraceEnabled && (!c.traceErrorsOnly || resp.StatusCode != http.StatusOK) { err = c.dumpHTTP(req, resp) if err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index cd0a641bd..415b07095 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -104,6 +104,8 @@ type STSAssumeRoleOptions struct { RoleARN string RoleSessionName string ExternalID string + + TokenRevokeType string // Optional, used for token revokation (MinIO only extension) } // NewSTSAssumeRole returns a pointer to a new @@ -161,6 +163,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume if opts.ExternalID != "" { v.Set("ExternalId", opts.ExternalID) } + if opts.TokenRevokeType != "" { + v.Set("TokenRevokeType", opts.TokenRevokeType) + } u, err := url.Parse(endpoint) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go index 0021f9315..162f460ee 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -69,6 +69,9 @@ type CustomTokenIdentity struct { // RequestedExpiry is to set the validity of the generated credentials // (this value bounded by server). RequestedExpiry time.Duration + + // Optional, used for token revokation + TokenRevokeType string } // RetrieveWithCredContext with Retrieve optionally cred context @@ -98,6 +101,9 @@ func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Va if c.RequestedExpiry != 0 { v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds()))) } + if c.TokenRevokeType != "" { + v.Set("TokenRevokeType", c.TokenRevokeType) + } u.RawQuery = v.Encode() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index e63997e6e..31fe10ae0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -73,6 +73,9 @@ type LDAPIdentity struct { // RequestedExpiry is the configured expiry duration for credentials // requested from LDAP. RequestedExpiry time.Duration + + // Optional, used for token revokation + TokenRevokeType string } // NewLDAPIdentity returns new credentials object that uses LDAP @@ -152,6 +155,9 @@ func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, er if k.RequestedExpiry != 0 { v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) } + if k.TokenRevokeType != "" { + v.Set("TokenRevokeType", k.TokenRevokeType) + } req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index c904bbeac..2a35a51a4 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -80,6 +80,9 @@ type STSCertificateIdentity struct { // Certificate is the client certificate that is used for // STS authentication. Certificate tls.Certificate + + // Optional, used for token revokation + TokenRevokeType string } // NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates @@ -122,6 +125,9 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value queryValues := url.Values{} queryValues.Set("Action", "AssumeRoleWithCertificate") queryValues.Set("Version", STSVersion) + if i.TokenRevokeType != "" { + queryValues.Set("TokenRevokeType", i.TokenRevokeType) + } endpointURL.RawQuery = queryValues.Encode() req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 235258893..a9987255e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -93,6 +93,9 @@ type STSWebIdentity struct { // roleSessionName is the identifier for the assumed role session. roleSessionName string + + // Optional, used for token revokation + TokenRevokeType string } // NewSTSWebIdentity returns a pointer to a new @@ -135,7 +138,7 @@ func WithPolicy(policy string) func(*STSWebIdentity) { } func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string, - getWebIDTokenExpiry func() (*WebIdentityToken, error), + getWebIDTokenExpiry func() (*WebIdentityToken, error), tokenRevokeType string, ) (AssumeRoleWithWebIdentityResponse, error) { idToken, err := getWebIDTokenExpiry() if err != nil { @@ -168,6 +171,9 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession v.Set("Policy", policy) } v.Set("Version", STSVersion) + if tokenRevokeType != "" { + v.Set("TokenRevokeType", tokenRevokeType) + } u, err := url.Parse(endpoint) if err != nil { @@ -236,7 +242,7 @@ func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) return Value{}, errors.New("STS endpoint unknown") } - a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry) + a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry, m.TokenRevokeType) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 344af2b78..7ed98b0d1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -192,7 +192,7 @@ func (t Transition) IsDaysNull() bool { // IsDateNull returns true if date field is null func (t Transition) IsDateNull() bool { - return t.Date.Time.IsZero() + return t.Date.IsZero() } // IsNull returns true if no storage-class is set. @@ -323,7 +323,7 @@ type ExpirationDate struct { // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDate.Time.IsZero() { + if eDate.IsZero() { return nil } return e.EncodeElement(eDate.Format(time.RFC3339), startElement) @@ -392,7 +392,7 @@ func (e Expiration) IsDaysNull() bool { // IsDateNull returns true if date field is null func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() + return e.Date.IsZero() } // IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 151ca21e8..31f29bcb1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -283,7 +283,6 @@ func (b *Configuration) AddTopic(topicConfig Config) bool { for _, n := range b.TopicConfigs { // If new config matches existing one if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { - existingConfig := set.NewStringSet() for _, v := range n.Events { existingConfig.Add(string(v)) @@ -308,7 +307,6 @@ func (b *Configuration) AddQueue(queueConfig Config) bool { newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} for _, n := range b.QueueConfigs { if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { - existingConfig := set.NewStringSet() for _, v := range n.Events { existingConfig.Add(string(v)) @@ -333,7 +331,6 @@ func (b *Configuration) AddLambda(lambdaConfig Config) bool { newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} for _, n := range b.LambdaConfigs { if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { - existingConfig := set.NewStringSet() for _, v := range n.Events { existingConfig.Add(string(v)) @@ -372,7 +369,7 @@ func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []Eve removeIndex := -1 for i, v := range b.TopicConfigs { // if it matches events and filters, mark the index for deletion - if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { + if v.Topic == arn.String() && v.Equal(events, prefix, suffix) { removeIndex = i break // since we have at most one matching config } @@ -400,7 +397,7 @@ func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []Eve removeIndex := -1 for i, v := range b.QueueConfigs { // if it matches events and filters, mark the index for deletion - if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { + if v.Queue == arn.String() && v.Equal(events, prefix, suffix) { removeIndex = i break // since we have at most one matching config } @@ -428,7 +425,7 @@ func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []Ev removeIndex := -1 for i, v := range b.LambdaConfigs { // if it matches events and filters, mark the index for deletion - if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { + if v.Lambda == arn.String() && v.Equal(events, prefix, suffix) { removeIndex = i break // since we have at most one matching config } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 0239782e5..55636ad48 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -988,10 +988,10 @@ func (q ReplQueueStats) QStats() (r ReplQStats) { // MetricsV2 represents replication metrics for a bucket. type MetricsV2 struct { - Uptime int64 `json:"uptime"` - CurrentStats Metrics `json:"currStats"` - QueueStats ReplQueueStats `json:"queueStats"` - DowntimeInfo DowntimeInfo `json:"downtimeInfo"` + Uptime int64 `json:"uptime"` + CurrentStats Metrics `json:"currStats"` + QueueStats ReplQueueStats `json:"queueStats"` + DowntimeInfo map[string]DowntimeInfo `json:"downtimeInfo"` } // DowntimeInfo represents the downtime info diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go index 77540e2d8..e18002b8d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -212,7 +212,6 @@ func (s *StreamingUSReader) Read(buf []byte) (int, error) { } return 0, err } - } } return s.buf.Read(buf) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index 1c2f1dc9d..fcd0dfd76 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -387,7 +387,6 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } return 0, err } - } } return s.buf.Read(buf) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index fa4f8c91e..f65c36c7d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -148,7 +148,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b // Prepare auth header. authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + fmt.Fprintf(authHeader, "%s %s:", signV2Algorithm, accessKeyID) encoder := base64.NewEncoder(base64.StdEncoding, authHeader) encoder.Write(hm.Sum(nil)) encoder.Close() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index ffd251451..09ece53a0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -128,8 +128,8 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') - switch { - case k == "host": + switch k { + case "host": buf.WriteString(getHostAddr(&req)) buf.WriteByte('\n') default: diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go index fbdd6918d..4d6de02c5 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/config/config.go @@ -27,6 +27,7 @@ type Config struct { FavoriteStorageDrivers map[string]map[string]interface{} `mapstructure:"favorite_storage_drivers"` Version string `mapstructure:"version"` VersionString string `mapstructure:"version_string"` + Edition string `mapstructure:"edition"` Product string `mapstructure:"product"` ProductName string `mapstructure:"product_name"` ProductVersion string `mapstructure:"product_version"` diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/status.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/status.go index 8f3fadae1..c14afc3e4 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/status.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocdav/status.go @@ -34,6 +34,7 @@ func (s *svc) doStatus(w http.ResponseWriter, r *http.Request) { NeedsDBUpgrade: false, Version: s.c.Version, VersionString: s.c.VersionString, + Edition: s.c.Edition, ProductName: s.c.ProductName, ProductVersion: s.c.ProductVersion, Product: s.c.Product, diff --git a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/capabilities/capabilities.go b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/capabilities/capabilities.go index 054bf074e..d8b4fcf33 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/capabilities/capabilities.go +++ b/vendor/github.com/opencloud-eu/reva/v2/internal/http/services/owncloud/ocs/handlers/cloud/capabilities/capabilities.go @@ -69,6 +69,9 @@ func (h *Handler) Init(c *config.Config) { if h.c.Capabilities.Core.Status.VersionString == "" { h.c.Capabilities.Core.Status.VersionString = "10.0.11" // TODO make build determined } + if h.c.Capabilities.Core.Status.Edition == "" { + h.c.Capabilities.Core.Status.Edition = "" // TODO make build determined + } if h.c.Capabilities.Core.Status.ProductName == "" { h.c.Capabilities.Core.Status.ProductName = "reva" // TODO make build determined } @@ -217,6 +220,7 @@ func (h *Handler) Init(c *config.Config) { Minor: 0, Micro: 11, String: "10.0.11", + Edition: "", Product: "reva", ProductVersion: "", } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/logger/logger.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/logger/logger.go index 13e9272e1..f798b29d0 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/logger/logger.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/logger/logger.go @@ -50,6 +50,7 @@ type Option func(l *zerolog.Logger) // New creates a new logger. func New(opts ...Option) *zerolog.Logger { // create a default logger + zerolog.SetGlobalLevel(zerolog.TraceLevel) zl := zerolog.New(os.Stderr).With().Timestamp().Caller().Logger() for _, opt := range opts { opt(&zl) @@ -127,7 +128,7 @@ type LogConf struct { func fromConfig(conf *LogConf) (*zerolog.Logger, error) { if conf.Level == "" { - conf.Level = zerolog.DebugLevel.String() + conf.Level = zerolog.InfoLevel.String() } var opts []Option diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go index ea99267a7..72ed1c9b7 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/micro/ocdav/option.go @@ -297,6 +297,13 @@ func VersionString(val string) Option { } } +// Edition provides a function to set the Edition config option. +func Edition(val string) Option { + return func(o *Options) { + o.config.Edition = val + } +} + // Product provides a function to set the Product config option. func Product(val string) Option { return func(o *Options) { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/owncloud/ocs/capabilities.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/owncloud/ocs/capabilities.go index 3dd9ab93b..8d754cd75 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/owncloud/ocs/capabilities.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/owncloud/ocs/capabilities.go @@ -142,6 +142,7 @@ type Status struct { NeedsDBUpgrade ocsBool `json:"needsDbUpgrade" xml:"needsDbUpgrade"` Version string `json:"version" xml:"version"` VersionString string `json:"versionstring" xml:"versionstring"` + Edition string `json:"edition" xml:"edition"` ProductName string `json:"productname" xml:"productname"` Product string `json:"product" xml:"product"` ProductVersion string `json:"productversion" xml:"productversion"` @@ -308,6 +309,7 @@ type Version struct { Minor int `json:"minor" xml:"minor"` Micro int `json:"micro" xml:"micro"` // = patch level String string `json:"string" xml:"string"` + Edition string `json:"edition" xml:"edition"` Product string `json:"product" xml:"product"` ProductVersion string `json:"productversion" xml:"productversion"` } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go index a93bc3f7d..7504786e2 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/manager/tus/tus.go @@ -242,16 +242,35 @@ type tusdLogger struct { // Handle handles the record func (l tusdLogger) Handle(_ context.Context, r slog.Record) error { + var logev *zerolog.Event switch r.Level { case slog.LevelDebug: - l.log.Debug().Msg(r.Message) + logev = l.log.Debug() case slog.LevelInfo: - l.log.Info().Msg(r.Message) + logev = l.log.Info() case slog.LevelWarn: - l.log.Warn().Msg(r.Message) + logev = l.log.Warn() case slog.LevelError: - l.log.Error().Msg(r.Message) + logev = l.log.Error() } + r.Attrs(func(a slog.Attr) bool { + // Resolve the Attr's value before doing anything else. + a.Value = a.Value.Resolve() + // Ignore empty Attrs. + if a.Equal(slog.Attr{}) { + return true + } + switch a.Value.Kind() { + case slog.KindBool: + logev = logev.Bool(a.Key, a.Value.Bool()) + case slog.KindInt64: + logev = logev.Int64(a.Key, a.Value.Int64()) + default: + logev = logev.Str(a.Key, a.Value.String()) + } + return true + }) + logev.Msg(r.Message) return nil } @@ -262,7 +281,7 @@ func (l tusdLogger) Enabled(_ context.Context, _ slog.Level) bool { return true func (l tusdLogger) WithAttrs(attr []slog.Attr) slog.Handler { fields := make(map[string]interface{}, len(attr)) for _, a := range attr { - fields[a.Key] = a.Value + fields[a.Key] = a.Value.String() } c := l.log.With().Fields(fields).Logger() sLog := tusdLogger{log: &c} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download/range.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download/range.go index 063a67981..c7cfa5d31 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download/range.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download/range.go @@ -29,15 +29,13 @@ import ( // taken from https://golang.org/src/net/http/fs.go -// ErrSeeker is returned by ServeContent's sizeFunc when the content -// doesn't seek properly. The underlying Seeker's error text isn't -// included in the sizeFunc reply so it's not sent over HTTP to end -// users. -var ErrSeeker = errors.New("seeker can't seek") +// ErrInvalidRange is returned by serveContent's parseRange if the Range is +// malformed or invalid. +var ErrInvalidRange = errors.New("invalid range") // ErrNoOverlap is returned by serveContent's parseRange if first-byte-pos of // all of the byte-range-spec values is greater than the content size. -var ErrNoOverlap = errors.New("invalid range: failed to overlap") +var ErrNoOverlap = fmt.Errorf("%w: failed to overlap", ErrInvalidRange) // HTTPRange specifies the byte range to be sent to the client. type HTTPRange struct { @@ -65,7 +63,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) { } const b = "bytes=" if !strings.HasPrefix(s, b) { - return nil, errors.New("invalid range") + return nil, ErrInvalidRange } ranges := []HTTPRange{} noOverlap := false @@ -76,7 +74,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) { } i := strings.Index(ra, "-") if i < 0 { - return nil, errors.New("invalid range") + return nil, ErrInvalidRange } start, end := textproto.TrimString(ra[:i]), textproto.TrimString(ra[i+1:]) var r HTTPRange @@ -85,7 +83,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) { // range start relative to the end of the file. i, err := strconv.ParseInt(end, 10, 64) if err != nil { - return nil, errors.New("invalid range") + return nil, ErrInvalidRange } if i > size { i = size @@ -95,7 +93,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) { } else { i, err := strconv.ParseInt(start, 10, 64) if err != nil || i < 0 { - return nil, errors.New("invalid range") + return nil, ErrInvalidRange } if i >= size { // If the range begins after the size of the content, @@ -110,7 +108,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) { } else { i, err := strconv.ParseInt(end, 10, 64) if err != nil || r.Start > i { - return nil, errors.New("invalid range") + return nil, ErrInvalidRange } if i >= size { i = size - 1 @@ -146,7 +144,7 @@ func RangesMIMESize(ranges []HTTPRange, contentType string, contentSize int64) ( _, _ = mw.CreatePart(ra.MimeHeader(contentType, contentSize)) encSize += ra.Length } - mw.Close() + _ = mw.Close() encSize += int64(w) return } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go index 11f75887c..3c1b45f40 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" "strings" + "time" user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -72,6 +73,7 @@ type Lookup struct { Options *options.Options IDCache IDCache + IDHistoryCache IDCache metadataBackend metadata.Backend userMapper usermapper.Mapper tm node.TimeManager @@ -79,10 +81,15 @@ type Lookup struct { // New returns a new Lookup instance func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.TimeManager) *Lookup { + idHistoryConf := o.Options.IDCache + idHistoryConf.Database = o.Options.IDCache.Table + "_history" + idHistoryConf.TTL = 1 * time.Minute + lu := &Lookup{ Options: o, metadataBackend: b, - IDCache: NewStoreIDCache(&o.Options), + IDCache: NewStoreIDCache(o.Options.IDCache), + IDHistoryCache: NewStoreIDCache(idHistoryConf), userMapper: um, tm: tm, } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/store_idcache.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/store_idcache.go index 2075196a0..7534c9da5 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/store_idcache.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/store_idcache.go @@ -25,7 +25,7 @@ import ( microstore "go-micro.dev/v4/store" "github.com/opencloud-eu/reva/v2/pkg/appctx" - "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options" + "github.com/opencloud-eu/reva/v2/pkg/storage/cache" "github.com/opencloud-eu/reva/v2/pkg/store" ) @@ -34,31 +34,33 @@ type StoreIDCache struct { } // NewMemoryIDCache returns a new MemoryIDCache -func NewStoreIDCache(o *options.Options) *StoreIDCache { +func NewStoreIDCache(c cache.Config) *StoreIDCache { return &StoreIDCache{ cache: store.Create( - store.Store(o.IDCache.Store), - store.Size(o.IDCache.Size), - microstore.Nodes(o.IDCache.Nodes...), - microstore.Database(o.IDCache.Database), - microstore.Table(o.IDCache.Table), - store.DisablePersistence(o.IDCache.DisablePersistence), - store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword), + store.Store(c.Store), + store.Size(c.Size), + microstore.Nodes(c.Nodes...), + microstore.Database(c.Database), + microstore.Table(c.Table), + store.DisablePersistence(c.DisablePersistence), + store.Authentication(c.AuthUsername, c.AuthPassword), ), } } // Delete removes an entry from the cache func (c *StoreIDCache) Delete(_ context.Context, spaceID, nodeID string) error { + var rerr error v, err := c.cache.Read(cacheKey(spaceID, nodeID)) if err == nil { - err := c.cache.Delete(reverseCacheKey(string(v[0].Value))) - if err != nil { - return err - } + rerr = c.cache.Delete(reverseCacheKey(string(v[0].Value))) } - return c.cache.Delete(cacheKey(spaceID, nodeID)) + err = c.cache.Delete(cacheKey(spaceID, nodeID)) + if err != nil { + return err + } + return rerr } // DeleteByPath removes an entry from the cache diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go index 60341a4b4..b5dadc883 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go @@ -44,24 +44,28 @@ type Options struct { WatchType string `mapstructure:"watch_type"` WatchPath string `mapstructure:"watch_path"` WatchFolderKafkaBrokers string `mapstructure:"watch_folder_kafka_brokers"` + + // InotifyWatcher specific options + InotifyStatsFrequency time.Duration `mapstructure:"inotify_stats_frequency"` } // New returns a new Options instance for the given configuration func New(m map[string]interface{}) (*Options, error) { - o := &Options{} - if err := mapstructure.Decode(m, o); err != nil { - err = errors.Wrap(err, "error decoding conf") - return nil, err - } - // default to hybrid metadatabackend for posixfs if _, ok := m["metadata_backend"]; !ok { m["metadata_backend"] = "hybrid" } + if _, ok := m["scan_debounce_delay"]; !ok { + m["scan_debounce_delay"] = 10 * time.Millisecond + } + if _, ok := m["inotify_stats_frequency"]; !ok { + m["inotify_stats_frequency"] = 5 * time.Minute + } - // debounced scan delay - if o.ScanDebounceDelay == 0 { - o.ScanDebounceDelay = 10 * time.Millisecond + o := &Options{} + if err := mapstructure.Decode(m, o); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err } do, err := decomposedoptions.New(m) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go index b4cf11653..3b39a2a43 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go @@ -164,17 +164,22 @@ func (tb *Trashbin) MoveToTrash(ctx context.Context, n *node.Node, path string) return err } - // purge metadata + // 1. "Forget" the node if err = tb.lu.IDCache.DeleteByPath(ctx, path); err != nil { return err } - err = tb.lu.MetadataBackend().Purge(ctx, n) + + // 2. Move the node to the trash + itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem") + err = os.Rename(path, itemTrashPath) if err != nil { return err } - itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem") - return os.Rename(path, itemTrashPath) + // 3. Purge the node from the metadata backend. This will not delete the xattrs from the + // node as it has already been moved but still remove it from the file metadata cache so + // that the metadata is no longer available when reading the node. + return tb.lu.MetadataBackend().Purge(ctx, n) } // ListRecycle returns the list of available recycle items @@ -315,7 +320,7 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, } // TODO the decomposed trash also checks the permissions on the restore node - _, id, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath) + _, id, _, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath) if err != nil { return nil, err } @@ -325,7 +330,7 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key, } // update parent id in case it was restored to a different location - _, parentID, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath)) + _, parentID, _, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath)) if err != nil { return nil, err } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go index 73f683fff..b9be8a7f9 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go @@ -243,17 +243,21 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error { case ActionMoveFrom: t.log.Debug().Str("path", path).Bool("isDir", isDir).Msg("scanning path (ActionMoveFrom)") // 6. file/directory moved out of the watched directory - // -> update directory - err := t.HandleFileDelete(path) - if err != nil { - t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item") - } - err = t.setDirty(filepath.Dir(path), true) - if err != nil { - t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to mark directory as dirty") + // -> remove from caches + + // remember the id of the moved away item + spaceID, nodeID, err := t.lookup.IDsForPath(context.Background(), path) + if err == nil { + err = t.lookup.IDHistoryCache.Set(context.Background(), spaceID, nodeID, path) + if err != nil { + t.log.Error().Err(err).Str("path", path).Msg("failed to cache the id of the moved item") + } } - go func() { _ = t.WarmupIDCache(filepath.Dir(path), false, true) }() + err = t.HandleFileDelete(path, false) // Do not send a item-trashed SSE in case of moves. They trigger a item-renamed event instead. + if err != nil { + t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle moved away item") + } case ActionDelete: t.log.Debug().Str("path", path).Bool("isDir", isDir).Msg("handling deleted item") @@ -261,7 +265,7 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error { // 7. Deleted file or directory // -> update parent and all children - err := t.HandleFileDelete(path) + err := t.HandleFileDelete(path, true) if err != nil { t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item") } @@ -276,12 +280,20 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error { return nil } -func (t *Tree) HandleFileDelete(path string) error { +func (t *Tree) HandleFileDelete(path string, sendSSE bool) error { spaceID, id, err := t.lookup.IDsForPath(context.Background(), path) if err != nil { return err } n := node.NewBaseNode(spaceID, id, t.lookup) + if n.InternalPath() != path { + return fmt.Errorf("internal path does not match path") + } + _, err = os.Stat(path) + if err == nil || !os.IsNotExist(err) { + t.log.Info().Str("path", path).Msg("file that was about to be cleared still exists/exists again. We'll leave it alone") + return nil + } // purge metadata if err := t.lookup.IDCache.DeleteByPath(context.Background(), path); err != nil { @@ -291,6 +303,10 @@ func (t *Tree) HandleFileDelete(path string) error { t.log.Error().Err(err).Str("path", path).Msg("could not purge metadata") } + if !sendSSE { + return nil + } + parentNode, err := t.getNodeForPath(filepath.Dir(path)) if err != nil { return err @@ -355,6 +371,7 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) { } func (t *Tree) assimilate(item scanItem) error { + t.log.Debug().Str("path", item.Path).Bool("rescan", item.ForceRescan).Bool("recurse", item.Recurse).Msg("assimilate") var err error // First find the space id @@ -383,17 +400,20 @@ func (t *Tree) assimilate(item scanItem) error { } // check for the id attribute again after grabbing the lock, maybe the file was assimilated/created by us in the meantime - _, id, mtime, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), item.Path) + _, id, parentID, mtime, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), item.Path) if err != nil { return err } if id != "" { // the file has an id set, we already know it from the past - n := node.NewBaseNode(spaceID, id, t.lookup) + // n := node.NewBaseNode(spaceID, id, t.lookup) previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id) - previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr) + if previousPath == "" || !ok { + previousPath, ok = t.lookup.IDHistoryCache.Get(context.Background(), spaceID, id) + } + // previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr) // compare metadata mtime with actual mtime. if it matches AND the path hasn't changed (move operation) // we can skip the assimilation because the file was handled by us @@ -405,7 +425,7 @@ func (t *Tree) assimilate(item scanItem) error { } // was it moved or copied/restored with a clashing id? - if ok && len(previousParentID) > 0 && previousPath != item.Path { + if ok && len(parentID) > 0 && previousPath != item.Path { _, err := os.Stat(previousPath) if err == nil { // this id clashes with an existing item -> clear metadata and re-assimilate @@ -445,13 +465,13 @@ func (t *Tree) assimilate(item scanItem) error { }() } - parentID := attrs.String(prefixes.ParentidAttr) + newParentID := attrs.String(prefixes.ParentidAttr) if len(parentID) > 0 { ref := &provider.Reference{ ResourceId: &provider.ResourceId{ StorageId: t.options.MountID, SpaceId: spaceID, - OpaqueId: parentID, + OpaqueId: newParentID, }, Path: filepath.Base(item.Path), } @@ -459,7 +479,7 @@ func (t *Tree) assimilate(item scanItem) error { ResourceId: &provider.ResourceId{ StorageId: t.options.MountID, SpaceId: spaceID, - OpaqueId: string(previousParentID), + OpaqueId: parentID, }, Path: filepath.Base(previousPath), } @@ -615,54 +635,56 @@ assimilate: n.SpaceRoot = &node.Node{BaseNode: node.BaseNode{SpaceID: spaceID, ID: spaceID}} - go func() { - // Copy the previous current version to a revision - currentNode := node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup) - currentPath := currentNode.InternalPath() - stat, err := os.Stat(currentPath) - if err != nil { - t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path") - return - } - revisionPath := t.lookup.VersionPath(n.SpaceID, n.ID, stat.ModTime().UTC().Format(time.RFC3339Nano)) + if t.options.EnableFSRevisions { + go func() { + // Copy the previous current version to a revision + currentNode := node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup) + currentPath := currentNode.InternalPath() + stat, err := os.Stat(currentPath) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path") + return + } + revisionPath := t.lookup.VersionPath(n.SpaceID, n.ID, stat.ModTime().UTC().Format(time.RFC3339Nano)) - err = os.Rename(currentPath, revisionPath) - if err != nil { - t.log.Error().Err(err).Str("path", path).Str("revisionPath", revisionPath).Msg("could not create revision") - return - } + err = os.Rename(currentPath, revisionPath) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("revisionPath", revisionPath).Msg("could not create revision") + return + } - // Copy the new version to the current version - w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) - if err != nil { - t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not open current path for writing") - return - } - defer w.Close() - r, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0600) - if err != nil { - t.log.Error().Err(err).Str("path", path).Msg("could not open file for reading") - return - } - defer r.Close() + // Copy the new version to the current version + w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not open current path for writing") + return + } + defer w.Close() + r, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0600) + if err != nil { + t.log.Error().Err(err).Str("path", path).Msg("could not open file for reading") + return + } + defer r.Close() - _, err = io.Copy(w, r) - if err != nil { - t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("could not copy new version to current version") - return - } + _, err = io.Copy(w, r) + if err != nil { + t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("could not copy new version to current version") + return + } - err = t.lookup.CopyMetadata(context.Background(), n, currentNode, func(attributeName string, value []byte) (newValue []byte, copy bool) { - return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }, false) - if err != nil { - t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("failed to copy xattrs to 'current' file") - return - } - }() + err = t.lookup.CopyMetadata(context.Background(), n, currentNode, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("failed to copy xattrs to 'current' file") + return + } + }() + } err = t.Propagate(context.Background(), n, 0) if err != nil { @@ -735,7 +757,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error { sizes[path] += 0 // Make sure to set the size to 0 for empty directories } - nodeSpaceID, id, _, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), path) + nodeSpaceID, id, _, _, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), path) if err == nil && len(id) > 0 { if len(nodeSpaceID) > 0 { spaceID = nodeSpaceID @@ -757,7 +779,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error { break } - spaceID, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate) + spaceID, _, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate) if err == nil && len(spaceID) > 0 { err = scopeSpace(path) if err != nil { @@ -791,7 +813,11 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error { t.log.Error().Err(err).Str("path", path).Msg("could not assimilate item") } } - return t.setDirty(path, false) + + if info.IsDir() { + return t.setDirty(path, false) + } + return nil }) for dir, size := range sizes { diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go index e1a1332ec..eb2c965fb 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher.go @@ -22,24 +22,40 @@ package tree import ( "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + "github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options" "github.com/pablodz/inotifywaitgo/inotifywaitgo" "github.com/rs/zerolog" ) type InotifyWatcher struct { - tree *Tree - log *zerolog.Logger + tree *Tree + options *options.Options + log *zerolog.Logger } -func NewInotifyWatcher(tree *Tree, log *zerolog.Logger) (*InotifyWatcher, error) { +func NewInotifyWatcher(tree *Tree, o *options.Options, log *zerolog.Logger) (*InotifyWatcher, error) { return &InotifyWatcher{ - tree: tree, - log: log, + tree: tree, + options: o, + log: log, }, nil } func (iw *InotifyWatcher) Watch(path string) { + if iw.options.InotifyStatsFrequency > 0 { + go func() { + for { + iw.printStats() + time.Sleep(iw.options.InotifyStatsFrequency) + } + }() + } events := make(chan inotifywaitgo.FileEvent) errors := make(chan error) @@ -104,3 +120,119 @@ func (iw *InotifyWatcher) Watch(path string) { } } } + +// InotifyUsage holds the number of inotify watches and instances. +type InotifyUsage struct { + Watches int + Instances int + MaxWatches int + MaxInstances int +} + +func countInotifyFDs(pid string) (int, int, error) { + fds, err := os.ReadDir(filepath.Join("/proc", pid, "fd")) + if err != nil { + if os.IsNotExist(err) { + return 0, 0, nil // Process may have exited, treat as 0. + } + return 0, 0, fmt.Errorf("failed to read /proc/%s/fd: %w", pid, err) + } + + watches := 0 + instances := 0 + for _, fd := range fds { + if !fd.IsDir() { + if fd.Type()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(filepath.Join("/proc", pid, "fd", fd.Name())) + if err != nil || (link != "inotify" && link != "anon_inode:inotify") { + continue + } + + instances++ + fdinfoPath := filepath.Join("/proc", pid, "fdinfo", fd.Name()) + content, err := os.ReadFile(fdinfoPath) + if err != nil { + return 0, 0, fmt.Errorf("failed to read %s: %w", fdinfoPath, err) + } + + lines := strings.SplitSeq(string(content), "\n") + for line := range lines { + if strings.HasPrefix(line, "inotify") { + watches++ + } + } + } + } + return watches, instances, nil +} + +func GetInotifyUsageFromProc() (InotifyUsage, error) { + usage := InotifyUsage{} + var err error + + usage.MaxWatches, err = readProcFile("sys/fs/inotify/max_user_watches") + if err != nil { + return usage, fmt.Errorf("failed to read max_user_watches: %w", err) + } + usage.MaxInstances, err = readProcFile("sys/fs/inotify/max_user_instances") + if err != nil { + return usage, fmt.Errorf("failed to read max_user_instances: %w", err) + } + + dirs, err := os.ReadDir("/proc") + if err != nil { + return usage, fmt.Errorf("failed to read /proc: %w", err) + } + + totalWatches := 0 + totalInstances := 0 + for _, dir := range dirs { + if dir.IsDir() { + pid := dir.Name() + if _, err := strconv.Atoi(pid); err == nil { + watches, instances, err := countInotifyFDs(pid) + if err != nil { + continue + } + totalWatches += watches + totalInstances += instances + } + } + } + + usage.Watches = totalWatches + usage.Instances = totalInstances + return usage, nil +} + +func readProcFile(filename string) (int, error) { + filePath := filepath.Join("/proc", filename) + content, err := os.ReadFile(filePath) + if err != nil { + return 0, err + } + i, err := strconv.Atoi(strings.TrimSpace(string(content))) + if err != nil { + return 0, fmt.Errorf("failed to parse max_user_watches: %w", err) + } + return i, nil +} + +func (iw *InotifyWatcher) printStats() { + t := time.Now() + usage, err := GetInotifyUsageFromProc() + if err != nil { + iw.log.Error().Err(err).Msg("failed to get inotify usage") + return + } + d := time.Since(t) + + iw.log.Info(). + Str("watches", fmt.Sprintf("%d/%d (%.2f%%)", usage.Watches, usage.MaxWatches, float64(usage.Watches)/float64(usage.MaxWatches)*100)). + Str("instances", fmt.Sprintf("%d/%d (%.2f%%)", usage.Instances, usage.MaxInstances, float64(usage.Instances)/float64(usage.MaxInstances)*100)). + Str("duration", d.String()). + Msg("Inotify usage stats") +} diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher_default.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher_default.go index 96f83c4be..ebe5019a2 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher_default.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/inotifywatcher_default.go @@ -7,6 +7,7 @@ package tree import ( "github.com/opencloud-eu/reva/v2/pkg/errtypes" + "github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options" "github.com/rs/zerolog" ) @@ -17,6 +18,6 @@ type NullWatcher struct{} func (*NullWatcher) Watch(path string) {} // NewInotifyWatcher returns a new inotify watcher -func NewInotifyWatcher(tree *Tree, log *zerolog.Logger) (*NullWatcher, error) { +func NewInotifyWatcher(_ *Tree, _ *options.Options, _ *zerolog.Logger) (*NullWatcher, error) { return nil, errtypes.NotSupported("inotify watcher is not supported on this platform") } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go index 590db5f10..41dcb9aed 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go @@ -129,7 +129,7 @@ func New(lu node.PathLookup, bs node.Blobstore, um usermapper.Mapper, trashbin * return nil, err } default: - t.watcher, err = NewInotifyWatcher(t, log) + t.watcher, err = NewInotifyWatcher(t, o, log) if err != nil { return nil, err } @@ -299,9 +299,11 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) return errors.Wrap(err, "Decomposedfs: Move: error deleting target node "+newNode.ID) } } - - // we are moving the node to a new parent, any target has been removed - // bring old node to the new parent + oldParent := oldNode.ParentPath() + newParent := newNode.ParentPath() + if newNode.ID == "" { + newNode.ID = oldNode.ID + } // update target parentid and name attribs := node.Attributes{} @@ -311,29 +313,25 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) return errors.Wrap(err, "Decomposedfs: could not update old node attributes") } - // rename node - err = os.Rename( - filepath.Join(oldNode.ParentPath(), oldNode.Name), - filepath.Join(newNode.ParentPath(), newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "Decomposedfs: could not move child") - } - // update the id cache - if newNode.ID == "" { - newNode.ID = oldNode.ID - } // invalidate old tree err = t.lookup.IDCache.DeleteByPath(ctx, filepath.Join(oldNode.ParentPath(), oldNode.Name)) if err != nil { return err } - if err := t.lookup.CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil { t.log.Error().Err(err).Str("spaceID", newNode.SpaceID).Str("id", newNode.ID).Str("path", filepath.Join(newNode.ParentPath(), newNode.Name)).Msg("could not cache id") } + // rename node + err = os.Rename( + filepath.Join(oldParent, oldNode.Name), + filepath.Join(newParent, newNode.Name), + ) + if err != nil { + return errors.Wrap(err, "Decomposedfs: could not move child") + } + // rename the lock (if it exists) if _, err := os.Stat(lockFilePath); err == nil { err = os.Rename(lockFilePath, newNode.LockFilePath()) diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go index 7f3244b81..6e299cbc5 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go @@ -45,13 +45,14 @@ func NewHybridBackend(offloadLimit int, metadataPathFunc MetadataPathFunc, o cac func (HybridBackend) Name() string { return "hybrid" } // IdentifyPath returns the space id, node id and mtime of a file -func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) { +func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) { spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr) id, _ := xattr.Get(path, prefixes.IDAttr) + parentID, _ := xattr.Get(path, prefixes.ParentidAttr) mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr) mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr)) - return string(spaceID), string(id), mtime, nil + return string(spaceID), string(id), string(parentID), mtime, nil } // Get an extended attribute value for the given key diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/messagepack_backend.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/messagepack_backend.go index 40de764fe..023a184db 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/messagepack_backend.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/messagepack_backend.go @@ -54,33 +54,34 @@ func NewMessagePackBackend(o cache.Config) MessagePackBackend { func (MessagePackBackend) Name() string { return "messagepack" } // IdentifyPath returns the id and mtime of a file -func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) { +func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) { metaPath := filepath.Clean(path + ".mpk") source, err := os.Open(metaPath) // // No cached entry found. Read from storage and store in cache if err != nil { - return "", "", time.Time{}, err + return "", "", "", time.Time{}, err } msgBytes, err := io.ReadAll(source) if err != nil || len(msgBytes) == 0 { - return "", "", time.Time{}, err + return "", "", "", time.Time{}, err } attribs := map[string][]byte{} err = msgpack.Unmarshal(msgBytes, &attribs) if err != nil { - return "", "", time.Time{}, err + return "", "", "", time.Time{}, err } spaceID := attribs[prefixes.IDAttr] id := attribs[prefixes.IDAttr] + parentID := attribs[prefixes.ParentidAttr] mtimeAttr := attribs[prefixes.MTimeAttr] mtime, err := time.Parse(time.RFC3339Nano, string(mtimeAttr)) if err != nil { - return "", "", time.Time{}, err + return "", "", "", time.Time{}, err } - return string(spaceID), string(id), mtime, nil + return string(spaceID), string(id), string(parentID), mtime, nil } // All reads all extended attributes for a node diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go index 715f03ff2..1350b6b5a 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go @@ -47,7 +47,7 @@ type MetadataNode interface { // Backend defines the interface for file attribute backends type Backend interface { Name() string - IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error) + IdentifyPath(ctx context.Context, path string) (string, string, string, time.Time, error) All(ctx context.Context, n MetadataNode) (map[string][]byte, error) AllWithLockedSource(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error) @@ -74,8 +74,8 @@ type NullBackend struct{} func (NullBackend) Name() string { return "null" } // IdentifyPath returns the ids and mtime of a file -func (NullBackend) IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error) { - return "", "", time.Time{}, errUnconfiguredError +func (NullBackend) IdentifyPath(ctx context.Context, path string) (string, string, string, time.Time, error) { + return "", "", "", time.Time{}, errUnconfiguredError } // All reads all extended attributes for a node diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/xattrs_backend.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/xattrs_backend.go index 8fbd4940e..259c47663 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/xattrs_backend.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/xattrs_backend.go @@ -51,13 +51,14 @@ func NewXattrsBackend(o cache.Config) XattrsBackend { func (XattrsBackend) Name() string { return "xattrs" } // IdentifyPath returns the space id, node id and mtime of a file -func (b XattrsBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) { +func (b XattrsBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) { spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr) id, _ := xattr.Get(path, prefixes.IDAttr) + parentID, _ := xattr.Get(path, prefixes.ParentidAttr) mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr) mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr)) - return string(spaceID), string(id), mtime, nil + return string(spaceID), string(id), string(parentID), mtime, nil } // Get an extended attribute value for the given key diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go index d087380cc..aabeed272 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go @@ -178,7 +178,9 @@ type BaseNode struct { SpaceID string ID string - lu PathLookup + lu PathLookup + internalPathID string + internalPath string } func NewBaseNode(spaceID, nodeID string, lu PathLookup) *BaseNode { @@ -194,7 +196,13 @@ func (n *BaseNode) GetID() string { return n.ID } // InternalPath returns the internal path of the Node func (n *BaseNode) InternalPath() string { - return n.lu.InternalPath(n.SpaceID, n.ID) + if len(n.internalPath) > 0 && n.ID == n.internalPathID { + return n.internalPath + } + + n.internalPath = n.lu.InternalPath(n.SpaceID, n.ID) + n.internalPathID = n.ID + return n.internalPath } // Node represents a node in the tree and provides methods to get a Parent or Child instance @@ -209,6 +217,7 @@ type Node struct { SpaceRoot *Node xattrsCache map[string][]byte + disabled *bool nodeType *provider.ResourceType } @@ -912,7 +921,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "scantime", date.Format(time.RFC3339Nano)) } - sublog.Debug(). + sublog.Trace(). Interface("ri", ri). Msg("AsResourceInfo") @@ -978,7 +987,7 @@ func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInf appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Str("quota", v).Msg("malformed quota") } case metadata.IsAttrUnset(err): - appctx.GetLogger(ctx).Debug().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("quota not set") + appctx.GetLogger(ctx).Trace().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("quota not set") default: appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("could not read quota") } @@ -996,10 +1005,17 @@ func (n *Node) HasPropagation(ctx context.Context) (propagation bool) { // only used to check if a space is disabled // FIXME confusing with the trash logic func (n *Node) IsDisabled(ctx context.Context) bool { - if _, err := n.GetDTime(ctx); err == nil { - return true + if n.disabled != nil { + return *n.disabled } - return false + if _, err := n.GetDTime(ctx); err == nil { + v := true + n.disabled = &v + } else { + v := false + n.disabled = &v + } + return *n.disabled } // GetTreeSize reads the treesize from the extended attributes @@ -1116,7 +1132,7 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro } } - appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning aggregated permissions") + appctx.GetLogger(ctx).Trace().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning aggregated permissions") return ap, false, nil } diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go index ed70486a7..2462eaa3b 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload.go @@ -73,8 +73,12 @@ func (fs *Decomposedfs) Upload(ctx context.Context, req storage.UploadRequest, u if err != nil { return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error opening assembled file") } - defer fd.Close() - defer os.RemoveAll(assembledFile) + defer func() { + _ = fd.Close() + }() + defer func() { + _ = os.RemoveAll(assembledFile) + }() req.Body = fd size, err := session.WriteChunk(ctx, 0, req.Body) @@ -347,6 +351,7 @@ func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) { composer.UseTerminater(fs) composer.UseConcater(fs) composer.UseLengthDeferrer(fs) + composer.UseContentServer(fs) } // To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol @@ -354,10 +359,16 @@ func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) { // - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload // NewUpload returns a new tus Upload instance -func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (tusd.Upload, error) { +func (fs *Decomposedfs) NewUpload(_ context.Context, _ tusd.FileInfo) (tusd.Upload, error) { return nil, fmt.Errorf("not implemented, use InitiateUpload on the CS3 API to start a new upload") } +// AsServableUpload returns a ServableUpload +// which implements the tusd.ServableUpload interface and +func (fs *Decomposedfs) AsServableUpload(u tusd.Upload) tusd.ServableUpload { + return u.(*upload.DecomposedFsSession) +} + // GetUpload returns the Upload for the given upload id func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { var ul tusd.Upload diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go index d7223a3b1..2f5c5d9a2 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/session.go @@ -32,6 +32,7 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/opencloud-eu/reva/v2/pkg/appctx" ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" @@ -46,49 +47,49 @@ type DecomposedFsSession struct { } // Context returns a context with the user, logger and lockid used when initiating the upload session -func (s *DecomposedFsSession) Context(ctx context.Context) context.Context { // restore logger from file info - sub := s.store.log.With().Int("pid", os.Getpid()).Logger() +func (session *DecomposedFsSession) Context(ctx context.Context) context.Context { // restore logger from file info + sub := session.store.log.With().Int("pid", os.Getpid()).Logger() ctx = appctx.WithLogger(ctx, &sub) - ctx = ctxpkg.ContextSetLockID(ctx, s.lockID()) - ctx = ctxpkg.ContextSetUser(ctx, s.executantUser()) - return ctxpkg.ContextSetInitiator(ctx, s.InitiatorID()) + ctx = ctxpkg.ContextSetLockID(ctx, session.lockID()) + ctx = ctxpkg.ContextSetUser(ctx, session.executantUser()) + return ctxpkg.ContextSetInitiator(ctx, session.InitiatorID()) } -func (s *DecomposedFsSession) lockID() string { - return s.info.MetaData["lockid"] +func (session *DecomposedFsSession) lockID() string { + return session.info.MetaData["lockid"] } -func (s *DecomposedFsSession) executantUser() *userpb.User { +func (session *DecomposedFsSession) executantUser() *userpb.User { var o *typespb.Opaque - _ = json.Unmarshal([]byte(s.info.Storage["UserOpaque"]), &o) + _ = json.Unmarshal([]byte(session.info.Storage["UserOpaque"]), &o) return &userpb.User{ Id: &userpb.UserId{ - Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]), - Idp: s.info.Storage["Idp"], - OpaqueId: s.info.Storage["UserId"], + Type: userpb.UserType(userpb.UserType_value[session.info.Storage["UserType"]]), + Idp: session.info.Storage["Idp"], + OpaqueId: session.info.Storage["UserId"], }, - Username: s.info.Storage["UserName"], - DisplayName: s.info.Storage["UserDisplayName"], + Username: session.info.Storage["UserName"], + DisplayName: session.info.Storage["UserDisplayName"], Opaque: o, } } // Purge deletes the upload session metadata and written binary data -func (s *DecomposedFsSession) Purge(ctx context.Context) error { +func (session *DecomposedFsSession) Purge(ctx context.Context) error { _, span := tracer.Start(ctx, "Purge") defer span.End() - sessionPath := sessionPath(s.store.root, s.info.ID) + sessionPath := sessionPath(session.store.root, session.info.ID) if err := os.Remove(sessionPath); err != nil { return err } - if err := os.Remove(s.binPath()); err != nil { + if err := os.Remove(session.binPath()); err != nil { return err } return nil } // TouchBin creates a file to contain the binary data. It's size will be used to keep track of the tus upload offset. -func (s *DecomposedFsSession) TouchBin() error { - file, err := os.OpenFile(s.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm) +func (session *DecomposedFsSession) TouchBin() error { + file, err := os.OpenFile(session.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm) if err != nil { return err } @@ -98,17 +99,17 @@ func (s *DecomposedFsSession) TouchBin() error { // Persist writes the upload session metadata to disk // events can update the scan outcome and the finished event might read an empty file because of race conditions // so we need to lock the file while writing and use atomic writes -func (s *DecomposedFsSession) Persist(ctx context.Context) error { +func (session *DecomposedFsSession) Persist(ctx context.Context) error { _, span := tracer.Start(ctx, "Persist") defer span.End() - sessionPath := sessionPath(s.store.root, s.info.ID) + sessionPath := sessionPath(session.store.root, session.info.ID) // create folder structure (if needed) if err := os.MkdirAll(filepath.Dir(sessionPath), 0700); err != nil { return err } var d []byte - d, err := json.Marshal(s.info) + d, err := json.Marshal(session.info) if err != nil { return err } @@ -116,28 +117,28 @@ func (s *DecomposedFsSession) Persist(ctx context.Context) error { } // ToFileInfo returns tus compatible FileInfo so the tus handler can access the upload offset -func (s *DecomposedFsSession) ToFileInfo() tusd.FileInfo { - return s.info +func (session *DecomposedFsSession) ToFileInfo() tusd.FileInfo { + return session.info } // ProviderID returns the provider id -func (s *DecomposedFsSession) ProviderID() string { - return s.info.MetaData["providerID"] +func (session *DecomposedFsSession) ProviderID() string { + return session.info.MetaData["providerID"] } // SpaceID returns the space id -func (s *DecomposedFsSession) SpaceID() string { - return s.info.Storage["SpaceRoot"] +func (session *DecomposedFsSession) SpaceID() string { + return session.info.Storage["SpaceRoot"] } // NodeID returns the node id -func (s *DecomposedFsSession) NodeID() string { - return s.info.Storage["NodeId"] +func (session *DecomposedFsSession) NodeID() string { + return session.info.Storage["NodeId"] } // NodeParentID returns the nodes parent id -func (s *DecomposedFsSession) NodeParentID() string { - return s.info.Storage["NodeParentId"] +func (session *DecomposedFsSession) NodeParentID() string { + return session.info.Storage["NodeParentId"] } // NodeExists returns wether or not the node existed during InitiateUpload. @@ -148,63 +149,63 @@ func (s *DecomposedFsSession) NodeParentID() string { // A node should be created as part of InitiateUpload. When listing a directory // we can decide if we want to skip the entry, or expose uploed progress // information. But that is a bigger change and might involve client work. -func (s *DecomposedFsSession) NodeExists() bool { - return s.info.Storage["NodeExists"] == "true" +func (session *DecomposedFsSession) NodeExists() bool { + return session.info.Storage["NodeExists"] == "true" } // HeaderIfMatch returns the if-match header for the upload session -func (s *DecomposedFsSession) HeaderIfMatch() string { - return s.info.MetaData["if-match"] +func (session *DecomposedFsSession) HeaderIfMatch() string { + return session.info.MetaData["if-match"] } // HeaderIfNoneMatch returns the if-none-match header for the upload session -func (s *DecomposedFsSession) HeaderIfNoneMatch() string { - return s.info.MetaData["if-none-match"] +func (session *DecomposedFsSession) HeaderIfNoneMatch() string { + return session.info.MetaData["if-none-match"] } // HeaderIfUnmodifiedSince returns the if-unmodified-since header for the upload session -func (s *DecomposedFsSession) HeaderIfUnmodifiedSince() string { - return s.info.MetaData["if-unmodified-since"] +func (session *DecomposedFsSession) HeaderIfUnmodifiedSince() string { + return session.info.MetaData["if-unmodified-since"] } // Node returns the node for the session -func (s *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) { - return node.ReadNode(ctx, s.store.lu, s.SpaceID(), s.info.Storage["NodeId"], false, nil, true) +func (session *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) { + return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], false, nil, true) } // ID returns the upload session id -func (s *DecomposedFsSession) ID() string { - return s.info.ID +func (session *DecomposedFsSession) ID() string { + return session.info.ID } // Filename returns the name of the node which is not the same as the name af the file being uploaded for legacy chunked uploads -func (s *DecomposedFsSession) Filename() string { - return s.info.Storage["NodeName"] +func (session *DecomposedFsSession) Filename() string { + return session.info.Storage["NodeName"] } // Chunk returns the chunk name when a legacy chunked upload was started -func (s *DecomposedFsSession) Chunk() string { - return s.info.Storage["Chunk"] +func (session *DecomposedFsSession) Chunk() string { + return session.info.Storage["Chunk"] } // SetMetadata is used to fill the upload metadata that will be exposed to the end user -func (s *DecomposedFsSession) SetMetadata(key, value string) { - s.info.MetaData[key] = value +func (session *DecomposedFsSession) SetMetadata(key, value string) { + session.info.MetaData[key] = value } // SetStorageValue is used to set metadata only relevant for the upload session implementation -func (s *DecomposedFsSession) SetStorageValue(key, value string) { - s.info.Storage[key] = value +func (session *DecomposedFsSession) SetStorageValue(key, value string) { + session.info.Storage[key] = value } // SetSize will set the upload size of the underlying tus info. -func (s *DecomposedFsSession) SetSize(size int64) { - s.info.Size = size +func (session *DecomposedFsSession) SetSize(size int64) { + session.info.Size = size } // SetSizeIsDeferred is uset to change the SizeIsDeferred property of the underlying tus info. -func (s *DecomposedFsSession) SetSizeIsDeferred(value bool) { - s.info.SizeIsDeferred = value +func (session *DecomposedFsSession) SetSizeIsDeferred(value bool) { + session.info.SizeIsDeferred = value } // Dir returns the directory to which the upload is made @@ -227,115 +228,115 @@ func (s *DecomposedFsSession) SetSizeIsDeferred(value bool) { // // I think we can safely determine the path later, right before emitting the // event. And maybe make it configurable, because only audit needs it, anyway. -func (s *DecomposedFsSession) Dir() string { - return s.info.Storage["Dir"] +func (session *DecomposedFsSession) Dir() string { + return session.info.Storage["Dir"] } // Size returns the upload size -func (s *DecomposedFsSession) Size() int64 { - return s.info.Size +func (session *DecomposedFsSession) Size() int64 { + return session.info.Size } // SizeDiff returns the size diff that was calculated after postprocessing -func (s *DecomposedFsSession) SizeDiff() int64 { - sizeDiff, _ := strconv.ParseInt(s.info.MetaData["sizeDiff"], 10, 64) +func (session *DecomposedFsSession) SizeDiff() int64 { + sizeDiff, _ := strconv.ParseInt(session.info.MetaData["sizeDiff"], 10, 64) return sizeDiff } // Reference returns a reference that can be used to access the uploaded resource -func (s *DecomposedFsSession) Reference() provider.Reference { +func (session *DecomposedFsSession) Reference() provider.Reference { return provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: s.info.MetaData["providerID"], - SpaceId: s.info.Storage["SpaceRoot"], - OpaqueId: s.info.Storage["NodeId"], + StorageId: session.info.MetaData["providerID"], + SpaceId: session.info.Storage["SpaceRoot"], + OpaqueId: session.info.Storage["NodeId"], }, // Path is not used } } // Executant returns the id of the user that initiated the upload session -func (s *DecomposedFsSession) Executant() userpb.UserId { +func (session *DecomposedFsSession) Executant() userpb.UserId { return userpb.UserId{ - Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]), - Idp: s.info.Storage["Idp"], - OpaqueId: s.info.Storage["UserId"], + Type: userpb.UserType(userpb.UserType_value[session.info.Storage["UserType"]]), + Idp: session.info.Storage["Idp"], + OpaqueId: session.info.Storage["UserId"], } } // SetExecutant is used to remember the user that initiated the upload session -func (s *DecomposedFsSession) SetExecutant(u *userpb.User) { - s.info.Storage["Idp"] = u.GetId().GetIdp() - s.info.Storage["UserId"] = u.GetId().GetOpaqueId() - s.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type) - s.info.Storage["UserName"] = u.GetUsername() - s.info.Storage["UserDisplayName"] = u.GetDisplayName() +func (session *DecomposedFsSession) SetExecutant(u *userpb.User) { + session.info.Storage["Idp"] = u.GetId().GetIdp() + session.info.Storage["UserId"] = u.GetId().GetOpaqueId() + session.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type) + session.info.Storage["UserName"] = u.GetUsername() + session.info.Storage["UserDisplayName"] = u.GetDisplayName() b, _ := json.Marshal(u.GetOpaque()) - s.info.Storage["UserOpaque"] = string(b) + session.info.Storage["UserOpaque"] = string(b) } // Offset returns the current upload offset -func (s *DecomposedFsSession) Offset() int64 { - return s.info.Offset +func (session *DecomposedFsSession) Offset() int64 { + return session.info.Offset } // SpaceOwner returns the id of the space owner -func (s *DecomposedFsSession) SpaceOwner() *userpb.UserId { +func (session *DecomposedFsSession) SpaceOwner() *userpb.UserId { return &userpb.UserId{ // idp and type do not seem to be consumed and the node currently only stores the user id anyway - OpaqueId: s.info.Storage["SpaceOwnerOrManager"], + OpaqueId: session.info.Storage["SpaceOwnerOrManager"], } } // Expires returns the time the upload session expires -func (s *DecomposedFsSession) Expires() time.Time { +func (session *DecomposedFsSession) Expires() time.Time { var t time.Time - if value, ok := s.info.MetaData["expires"]; ok { + if value, ok := session.info.MetaData["expires"]; ok { t, _ = utils.MTimeToTime(value) } return t } // MTime returns the mtime to use for the uploaded file -func (s *DecomposedFsSession) MTime() time.Time { +func (session *DecomposedFsSession) MTime() time.Time { var t time.Time - if value, ok := s.info.MetaData["mtime"]; ok { + if value, ok := session.info.MetaData["mtime"]; ok { t, _ = utils.MTimeToTime(value) } return t } // IsProcessing returns true if all bytes have been received. The session then has entered postprocessing state. -func (s *DecomposedFsSession) IsProcessing() bool { +func (session *DecomposedFsSession) IsProcessing() bool { // We might need a more sophisticated way to determine processing status soon - return s.info.Size == s.info.Offset && s.info.MetaData["scanResult"] == "" + return session.info.Size == session.info.Offset && session.info.MetaData["scanResult"] == "" } // binPath returns the path to the file storing the binary data. -func (s *DecomposedFsSession) binPath() string { - return filepath.Join(s.store.root, "uploads", s.info.ID) +func (session *DecomposedFsSession) binPath() string { + return filepath.Join(session.store.root, "uploads", session.info.ID) } // InitiatorID returns the id of the initiating client -func (s *DecomposedFsSession) InitiatorID() string { - return s.info.MetaData["initiatorid"] +func (session *DecomposedFsSession) InitiatorID() string { + return session.info.MetaData["initiatorid"] } // SetScanData sets virus scan data to the upload session -func (s *DecomposedFsSession) SetScanData(result string, date time.Time) { - s.info.MetaData["scanResult"] = result - s.info.MetaData["scanDate"] = date.Format(time.RFC3339) +func (session *DecomposedFsSession) SetScanData(result string, date time.Time) { + session.info.MetaData["scanResult"] = result + session.info.MetaData["scanDate"] = date.Format(time.RFC3339) } // ScanData returns the virus scan data -func (s *DecomposedFsSession) ScanData() (string, time.Time) { - date := s.info.MetaData["scanDate"] +func (session *DecomposedFsSession) ScanData() (string, time.Time) { + date := session.info.MetaData["scanDate"] if date == "" { return "", time.Time{} } d, _ := time.Parse(time.RFC3339, date) - return s.info.MetaData["scanResult"], d + return session.info.MetaData["scanResult"], d } // sessionPath returns the path to the .info file storing the file's info. diff --git a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go index c37f5216b..1b334a48e 100644 --- a/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go +++ b/vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go @@ -44,15 +44,15 @@ import ( "github.com/opencloud-eu/reva/v2/pkg/errtypes" "github.com/opencloud-eu/reva/v2/pkg/events" "github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/metrics" + "github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes" "github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node" "github.com/opencloud-eu/reva/v2/pkg/utils" ) var ( - tracer trace.Tracer - ErrAlreadyExists = tusd.NewError("ERR_ALREADY_EXISTS", "file already exists", http.StatusConflict) - defaultFilePerm = os.FileMode(0664) + tracer trace.Tracer + defaultFilePerm = os.FileMode(0664) ) func init() { @@ -60,7 +60,7 @@ func init() { } // WriteChunk writes the stream from the reader to the given offset of the upload -func (session *DecomposedFsSession) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { +func (session *DecomposedFsSession) WriteChunk(ctx context.Context, _ int64, src io.Reader) (int64, error) { ctx, span := tracer.Start(session.Context(ctx), "WriteChunk") defer span.End() _, subspan := tracer.Start(ctx, "os.OpenFile") @@ -69,7 +69,9 @@ func (session *DecomposedFsSession) WriteChunk(ctx context.Context, offset int64 if err != nil { return 0, err } - defer file.Close() + defer func() { + _ = file.Close() + }() // calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum // TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ... @@ -259,7 +261,9 @@ func (session *DecomposedFsSession) ConcatUploads(_ context.Context, uploads []t if err != nil { return err } - defer file.Close() + defer func() { + _ = file.Close() + }() for _, partialUpload := range uploads { fileUpload := partialUpload.(*DecomposedFsSession) @@ -268,7 +272,9 @@ func (session *DecomposedFsSession) ConcatUploads(_ context.Context, uploads []t if err != nil { return err } - defer src.Close() + defer func() { + _ = src.Close() + }() if _, err := io.Copy(file, src); err != nil { return err @@ -298,9 +304,9 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) { } func checkHash(expected string, h hash.Hash) error { - hash := hex.EncodeToString(h.Sum(nil)) - if expected != hash { - return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", expected, hash)) + shash := hex.EncodeToString(h.Sum(nil)) + if expected != shash { + return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", expected, shash)) } return nil } @@ -399,6 +405,57 @@ func (session *DecomposedFsSession) URL(_ context.Context) (string, error) { return joinurl(session.store.tknopts.DataGatewayEndpoint, tkn), nil } +// ServeContent serves the content of the upload and implements the http.ServeContent interface needed by tusd, +// it is used by the tusd handler to serve the content of the upload and supports range requests +func (session *DecomposedFsSession) ServeContent(ctx context.Context, w http.ResponseWriter, req *http.Request) error { + _, span := tracer.Start(session.Context(ctx), "ServeContent") + defer span.End() + + f, err := os.Open(session.binPath()) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + + info, err := f.Stat() + if err != nil { + return err + } + + var r io.Reader = f + if err := func() error { + if req.Header.Get("Range") == "" { + return nil + } + + ranges, err := download.ParseRange(req.Header.Get("Range"), info.Size()) + switch { + case len(ranges) == 0: + fallthrough + case errors.Is(err, download.ErrInvalidRange): + // ignore invalid range and return the whole file + return nil + case err != nil: + return err + } + + r = io.NewSectionReader(f, ranges[0].Start, ranges[0].Length) + w.WriteHeader(http.StatusPartialContent) + w.Header().Set("Content-Range", ranges[0].ContentRange(info.Size())) + return nil + }(); err != nil { + return err + } + + if _, err := io.Copy(w, r); err != nil { + return err + } + + return nil +} + // replace with url.JoinPath after switching to go1.19 func joinurl(paths ...string) string { var s strings.Builder diff --git a/vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go b/vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go index 1e1998a79..9cb7be627 100644 --- a/vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go +++ b/vendor/github.com/tus/tusd/v2/pkg/handler/unrouted_handler.go @@ -34,7 +34,6 @@ const ( var ( reForwardedHost = regexp.MustCompile(`host="?([^;"]+)`) reForwardedProto = regexp.MustCompile(`proto=(https?)`) - reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`) // We only allow certain URL-safe characters in upload IDs. URL-safe in this means // that their are allowed in a URI's path component according to RFC 3986. // See https://datatracker.ietf.org/doc/html/rfc3986#section-3.3 @@ -1104,7 +1103,10 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) } handler.sendResp(c, resp) - io.Copy(w, src) + if _, err := io.Copy(w, src); err != nil { + handler.sendError(c, err) + return + } src.Close() } @@ -1112,9 +1114,9 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) // mimeInlineBrowserWhitelist is a map containing MIME types which should be // allowed to be rendered by browser inline, instead of being forced to be // downloaded. For example, HTML or SVG files are not allowed, since they may -// contain malicious JavaScript. In a similiar fashion PDF is not on this list +// contain malicious JavaScript. In a similar fashion, PDF is not on this list // as their parsers commonly contain vulnerabilities which can be exploited. -// The values of this map does not convey any meaning and are therefore just +// The values of this map do not convey any meaning and are therefore just // empty structs. var mimeInlineBrowserWhitelist = map[string]struct{}{ "text/plain": {}, @@ -1125,14 +1127,17 @@ var mimeInlineBrowserWhitelist = map[string]struct{}{ "image/bmp": {}, "image/webp": {}, - "audio/wave": {}, - "audio/wav": {}, - "audio/x-wav": {}, - "audio/x-pn-wav": {}, - "audio/webm": {}, - "video/webm": {}, - "audio/ogg": {}, - "video/ogg": {}, + "audio/wave": {}, + "audio/wav": {}, + "audio/x-wav": {}, + "audio/x-pn-wav": {}, + "audio/webm": {}, + "audio/ogg": {}, + + "video/mp4": {}, + "video/webm": {}, + "video/ogg": {}, + "application/ogg": {}, } @@ -1140,23 +1145,22 @@ var mimeInlineBrowserWhitelist = map[string]struct{}{ // Content-Disposition headers for a given upload. These values should be used // in responses for GET requests to ensure that only non-malicious file types // are shown directly in the browser. It will extract the file name and type -// from the "fileame" and "filetype". +// from the "filename" and "filetype". // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition func filterContentType(info FileInfo) (contentType string, contentDisposition string) { filetype := info.MetaData["filetype"] - if reMimeType.MatchString(filetype) { - // If the filetype from metadata is well formed, we forward use this - // for the Content-Type header. However, only whitelisted mime types - // will be allowed to be shown inline in the browser + if ft, _, err := mime.ParseMediaType(filetype); err == nil { + // If the filetype from metadata is well-formed, we forward use this for the Content-Type header. + // However, only allowlisted mime types will be allowed to be shown inline in the browser contentType = filetype - if _, isWhitelisted := mimeInlineBrowserWhitelist[filetype]; isWhitelisted { + if _, isWhitelisted := mimeInlineBrowserWhitelist[ft]; isWhitelisted { contentDisposition = "inline" } else { contentDisposition = "attachment" } } else { - // If the filetype from the metadata is not well formed, we use a + // If the filetype from the metadata is not well-formed, we use a // default type and force the browser to download the content. contentType = "application/octet-stream" contentDisposition = "attachment" diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index a6c647013..7b93f692b 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -44,15 +44,19 @@ var ( // // It implements the [resolver.Resolver] interface. type delegatingResolver struct { - target resolver.Target // parsed target URI to be resolved - cc resolver.ClientConn // gRPC ClientConn - targetResolver resolver.Resolver // resolver for the target URI, based on its scheme - proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured - proxyURL *url.URL // proxy URL, derived from proxy environment and target + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target mu sync.Mutex // protects all the fields below targetResolverState *resolver.State // state of the target resolver proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured } // nopResolver is a resolver that does nothing. @@ -111,6 +115,10 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti logger.Infof("Proxy URL detected : %s", r.proxyURL) } + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() // When the scheme is 'dns' and target resolution on client is not enabled, // resolution should be handled by the proxy, not the client. Therefore, we // bypass the target resolver and store the unresolved target address. @@ -165,11 +173,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol } func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.ResolveNow(o) r.proxyResolver.ResolveNow(o) } func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() r.targetResolver.Close() r.targetResolver = nil @@ -267,11 +279,17 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro err := r.updateClientConnStateLocked() // Another possible approach was to block until updates are received from // both resolvers. But this is not used because calling `New()` triggers - // `Build()` for the first resolver, which calls `UpdateState()`. And the + // `Build()` for the first resolver, which calls `UpdateState()`. And the // second resolver hasn't sent an update yet, so it would cause `New()` to // block indefinitely. if err != nil { - r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return err } @@ -291,7 +309,13 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err r.targetResolverState = &state err := r.updateClientConnStateLocked() if err != nil { - r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() } return nil } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb7..975b49970 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -18,6 +18,12 @@ package resolver +import ( + "encoding/base64" + "sort" + "strings" +) + type addressMapEntry struct { addr Address value any @@ -137,66 +143,61 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. type EndpointMap struct { - endpoints map[*endpointNode]any + endpoints map[endpointMapKey]endpointData +} + +type endpointData struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value any } // NewEndpointMap creates a new EndpointMap. func NewEndpointMap() *EndpointMap { return &EndpointMap{ - endpoints: make(map[*endpointNode]any), + endpoints: make(map[endpointMapKey]endpointData), } } +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) + } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) +} + // Get returns the value for the address in the map, if present. func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } return nil, false } // Set updates or adds the value to the address in the map. func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return + en := encodeEndpoint(e) + em.endpoints[en] = endpointData{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. @@ -211,12 +212,8 @@ func (em *EndpointMap) Len() int { // used for EndpointMap accesses. func (em *EndpointMap) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } @@ -225,27 +222,13 @@ func (em *EndpointMap) Keys() []Endpoint { func (em *EndpointMap) Values() []any { ret := make([]any, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 945e24ff8..80e16a327 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -134,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -172,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -210,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index a8ddb0af5..ad20e9dff 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -870,13 +870,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) } - out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool) + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) if err != nil { out.Free() return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) } - if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) { + if out.Len() > maxReceiveMessageSize { out.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) } @@ -885,12 +891,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") } -// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF. -func atEOF(dcReader io.Reader) bool { - n, err := dcReader.Read(make([]byte, 1)) - return n == 0 && err == io.EOF -} - type recvCompressor interface { RecvCompress() string } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 783c41f78..3c148a814 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.71.0" +const Version = "1.71.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index 6ba60bc55..ae6536bbd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -844,8 +844,8 @@ github.com/klauspost/compress/s2 github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.9 -## explicit; go 1.20 +# github.com/klauspost/cpuid/v2 v2.2.10 +## explicit; go 1.22 github.com/klauspost/cpuid/v2 # github.com/kovidgoyal/imaging v1.6.4 ## explicit; go 1.21 @@ -922,7 +922,7 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/mattn/go-sqlite3 v1.14.24 +# github.com/mattn/go-sqlite3 v1.14.27 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b @@ -947,8 +947,8 @@ github.com/minio/highwayhash # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.88 -## explicit; go 1.22 +# github.com/minio/minio-go/v7 v7.0.89 +## explicit; go 1.23.0 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors github.com/minio/minio-go/v7/pkg/credentials @@ -1198,7 +1198,7 @@ github.com/open-policy-agent/opa/v1/types github.com/open-policy-agent/opa/v1/util github.com/open-policy-agent/opa/v1/util/decoding github.com/open-policy-agent/opa/v1/version -# github.com/opencloud-eu/reva/v2 v2.29.1 +# github.com/opencloud-eu/reva/v2 v2.31.0 ## explicit; go 1.24.1 github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace github.com/opencloud-eu/reva/v2/cmd/revad/runtime @@ -1858,7 +1858,7 @@ github.com/trustelem/zxcvbn/internal/mathutils github.com/trustelem/zxcvbn/match github.com/trustelem/zxcvbn/matching github.com/trustelem/zxcvbn/scoring -# github.com/tus/tusd/v2 v2.7.1 +# github.com/tus/tusd/v2 v2.8.0 ## explicit; go 1.23.0 github.com/tus/tusd/v2/pkg/handler # github.com/unrolled/secure v1.16.0 => github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c @@ -2266,8 +2266,8 @@ golang.org/x/tools/internal/versions ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 -## explicit; go 1.21 +# google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb +## explicit; go 1.23.0 google.golang.org/genproto/protobuf/field_mask # google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb ## explicit; go 1.23.0 @@ -2278,7 +2278,7 @@ google.golang.org/genproto/googleapis/api/httpbody ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.71.0 +# google.golang.org/grpc v1.71.1 ## explicit; go 1.22.0 google.golang.org/grpc google.golang.org/grpc/attributes