Merge pull request #599 from opencloud-eu/revaBump2.30

[full-ci] reva bump 2.31.0
This commit is contained in:
Viktor Scharf
2025-04-07 16:32:14 +02:00
committed by GitHub
67 changed files with 1203 additions and 730 deletions

View File

@@ -1,5 +1,4 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
version: 2
builds:
-
@@ -27,16 +26,7 @@ builds:
archives:
-
id: cpuid
name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
name_template: "cpuid-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
format_overrides:
- goos: windows
format: zip
@@ -44,8 +34,6 @@ archives:
- LICENSE
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
@@ -58,7 +46,7 @@ changelog:
nfpms:
-
file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
file_name_template: "cpuid_package_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/cpuid
maintainer: Klaus Post <klauspost@gmail.com>
@@ -67,8 +55,3 @@ nfpms:
formats:
- deb
- rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

View File

@@ -282,7 +282,9 @@ Exit Code 1
| AMXINT8 | Tile computational operations on 8-bit integers |
| AMXFP16 | Tile computational operations on FP16 numbers |
| AMXFP8 | Tile computational operations on FP8 numbers |
| AMXCOMPLEX | Tile computational operations on complex numbers |
| AMXTILE | Tile architecture |
| AMXTF32 | Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile |
| APX_F | Intel APX |
| AVX | AVX functions |
| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported |
@@ -480,12 +482,16 @@ Exit Code 1
| DCPOP | Data cache clean to Point of Persistence (DC CVAP) |
| EVTSTRM | Generic timer |
| FCMA | Floatin point complex number addition and multiplication |
| FHM | FMLAL and FMLSL instructions |
| FP | Single-precision and double-precision floating point |
| FPHP | Half-precision floating point |
| GPA | Generic Pointer Authentication |
| JSCVT | Javascript-style double->int convert (FJCVTZS) |
| LRCPC | Weaker release consistency (LDAPR, etc) |
| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) |
| RNDR | Random Number instructions |
| TLB | Outer Shareable and TLB range maintenance instructions |
| TS | Flag manipulation instructions |
| SHA1 | SHA-1 instructions (SHA1C, etc) |
| SHA2 | SHA-2 instructions (SHA256H, etc) |
| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) |

View File

@@ -83,6 +83,8 @@ const (
AMXINT8 // Tile computational operations on 8-bit integers
AMXFP8 // Tile computational operations on FP8 numbers
AMXTILE // Tile architecture
AMXTF32 // Tile architecture
AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile
APX_F // Intel APX
AVX // AVX functions
AVX10 // If set the Intel AVX10 Converged Vector ISA is supported
@@ -282,12 +284,16 @@ const (
DCPOP // Data cache clean to Point of Persistence (DC CVAP)
EVTSTRM // Generic timer
FCMA // Floatin point complex number addition and multiplication
FHM // FMLAL and FMLSL instructions
FP // Single-precision and double-precision floating point
FPHP // Half-precision floating point
GPA // Generic Pointer Authentication
JSCVT // Javascript-style double->int convert (FJCVTZS)
LRCPC // Weaker release consistency (LDAPR, etc)
PMULL // Polynomial Multiply instructions (PMULL/PMULL2)
RNDR // Random Number instructions
TLB // Outer Shareable and TLB range maintenance instructions
TS // Flag manipulation instructions
SHA1 // SHA-1 instructions (SHA1C, etc)
SHA2 // SHA-2 instructions (SHA256H, etc)
SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX)
@@ -532,7 +538,7 @@ func (c CPUInfo) Ia32TscAux() uint32 {
return ecx
}
// SveLengths returns arm SVE vector and predicate lengths.
// SveLengths returns arm SVE vector and predicate lengths in bits.
// Will return 0, 0 if SVE is not enabled or otherwise unable to detect.
func (c CPUInfo) SveLengths() (vl, pl uint64) {
if !c.Has(SVE) {
@@ -1284,6 +1290,8 @@ func support() flagSet {
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx1&(1<<7) != 0, AMXTF32)
fs.setIf(edx1&(1<<8) != 0, AMXCOMPLEX)
fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16)
fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
fs.setIf(edx1&(1<<19) != 0, AVX10)

View File

@@ -157,6 +157,10 @@ func addInfo(c *CPUInfo, safe bool) {
// x--------------------------------------------------x
// | Name | bits | visible |
// |--------------------------------------------------|
// | RNDR | [63-60] | y |
// |--------------------------------------------------|
// | TLB | [59-56] | y |
// |--------------------------------------------------|
// | TS | [55-52] | y |
// |--------------------------------------------------|
// | FHM | [51-48] | y |
@@ -182,12 +186,10 @@ func addInfo(c *CPUInfo, safe bool) {
// | AES | [7-4] | y |
// x--------------------------------------------------x
// if instAttrReg0&(0xf<<52) != 0 {
// fmt.Println("TS")
// }
// if instAttrReg0&(0xf<<48) != 0 {
// fmt.Println("FHM")
// }
f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR)
f.setIf(instAttrReg0&(0xf<<56) != 0, TLB)
f.setIf(instAttrReg0&(0xf<<52) != 0, TS)
f.setIf(instAttrReg0&(0xf<<48) != 0, FHM)
f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP)
f.setIf(instAttrReg0&(0xf<<40) != 0, SM4)
f.setIf(instAttrReg0&(0xf<<36) != 0, SM3)

View File

@@ -17,223 +17,229 @@ func _() {
_ = x[AMXINT8-7]
_ = x[AMXFP8-8]
_ = x[AMXTILE-9]
_ = x[APX_F-10]
_ = x[AVX-11]
_ = x[AVX10-12]
_ = x[AVX10_128-13]
_ = x[AVX10_256-14]
_ = x[AVX10_512-15]
_ = x[AVX2-16]
_ = x[AVX512BF16-17]
_ = x[AVX512BITALG-18]
_ = x[AVX512BW-19]
_ = x[AVX512CD-20]
_ = x[AVX512DQ-21]
_ = x[AVX512ER-22]
_ = x[AVX512F-23]
_ = x[AVX512FP16-24]
_ = x[AVX512IFMA-25]
_ = x[AVX512PF-26]
_ = x[AVX512VBMI-27]
_ = x[AVX512VBMI2-28]
_ = x[AVX512VL-29]
_ = x[AVX512VNNI-30]
_ = x[AVX512VP2INTERSECT-31]
_ = x[AVX512VPOPCNTDQ-32]
_ = x[AVXIFMA-33]
_ = x[AVXNECONVERT-34]
_ = x[AVXSLOW-35]
_ = x[AVXVNNI-36]
_ = x[AVXVNNIINT8-37]
_ = x[AVXVNNIINT16-38]
_ = x[BHI_CTRL-39]
_ = x[BMI1-40]
_ = x[BMI2-41]
_ = x[CETIBT-42]
_ = x[CETSS-43]
_ = x[CLDEMOTE-44]
_ = x[CLMUL-45]
_ = x[CLZERO-46]
_ = x[CMOV-47]
_ = x[CMPCCXADD-48]
_ = x[CMPSB_SCADBS_SHORT-49]
_ = x[CMPXCHG8-50]
_ = x[CPBOOST-51]
_ = x[CPPC-52]
_ = x[CX16-53]
_ = x[EFER_LMSLE_UNS-54]
_ = x[ENQCMD-55]
_ = x[ERMS-56]
_ = x[F16C-57]
_ = x[FLUSH_L1D-58]
_ = x[FMA3-59]
_ = x[FMA4-60]
_ = x[FP128-61]
_ = x[FP256-62]
_ = x[FSRM-63]
_ = x[FXSR-64]
_ = x[FXSROPT-65]
_ = x[GFNI-66]
_ = x[HLE-67]
_ = x[HRESET-68]
_ = x[HTT-69]
_ = x[HWA-70]
_ = x[HYBRID_CPU-71]
_ = x[HYPERVISOR-72]
_ = x[IA32_ARCH_CAP-73]
_ = x[IA32_CORE_CAP-74]
_ = x[IBPB-75]
_ = x[IBPB_BRTYPE-76]
_ = x[IBRS-77]
_ = x[IBRS_PREFERRED-78]
_ = x[IBRS_PROVIDES_SMP-79]
_ = x[IBS-80]
_ = x[IBSBRNTRGT-81]
_ = x[IBSFETCHSAM-82]
_ = x[IBSFFV-83]
_ = x[IBSOPCNT-84]
_ = x[IBSOPCNTEXT-85]
_ = x[IBSOPSAM-86]
_ = x[IBSRDWROPCNT-87]
_ = x[IBSRIPINVALIDCHK-88]
_ = x[IBS_FETCH_CTLX-89]
_ = x[IBS_OPDATA4-90]
_ = x[IBS_OPFUSE-91]
_ = x[IBS_PREVENTHOST-92]
_ = x[IBS_ZEN4-93]
_ = x[IDPRED_CTRL-94]
_ = x[INT_WBINVD-95]
_ = x[INVLPGB-96]
_ = x[KEYLOCKER-97]
_ = x[KEYLOCKERW-98]
_ = x[LAHF-99]
_ = x[LAM-100]
_ = x[LBRVIRT-101]
_ = x[LZCNT-102]
_ = x[MCAOVERFLOW-103]
_ = x[MCDT_NO-104]
_ = x[MCOMMIT-105]
_ = x[MD_CLEAR-106]
_ = x[MMX-107]
_ = x[MMXEXT-108]
_ = x[MOVBE-109]
_ = x[MOVDIR64B-110]
_ = x[MOVDIRI-111]
_ = x[MOVSB_ZL-112]
_ = x[MOVU-113]
_ = x[MPX-114]
_ = x[MSRIRC-115]
_ = x[MSRLIST-116]
_ = x[MSR_PAGEFLUSH-117]
_ = x[NRIPS-118]
_ = x[NX-119]
_ = x[OSXSAVE-120]
_ = x[PCONFIG-121]
_ = x[POPCNT-122]
_ = x[PPIN-123]
_ = x[PREFETCHI-124]
_ = x[PSFD-125]
_ = x[RDPRU-126]
_ = x[RDRAND-127]
_ = x[RDSEED-128]
_ = x[RDTSCP-129]
_ = x[RRSBA_CTRL-130]
_ = x[RTM-131]
_ = x[RTM_ALWAYS_ABORT-132]
_ = x[SBPB-133]
_ = x[SERIALIZE-134]
_ = x[SEV-135]
_ = x[SEV_64BIT-136]
_ = x[SEV_ALTERNATIVE-137]
_ = x[SEV_DEBUGSWAP-138]
_ = x[SEV_ES-139]
_ = x[SEV_RESTRICTED-140]
_ = x[SEV_SNP-141]
_ = x[SGX-142]
_ = x[SGXLC-143]
_ = x[SHA-144]
_ = x[SME-145]
_ = x[SME_COHERENT-146]
_ = x[SPEC_CTRL_SSBD-147]
_ = x[SRBDS_CTRL-148]
_ = x[SRSO_MSR_FIX-149]
_ = x[SRSO_NO-150]
_ = x[SRSO_USER_KERNEL_NO-151]
_ = x[SSE-152]
_ = x[SSE2-153]
_ = x[SSE3-154]
_ = x[SSE4-155]
_ = x[SSE42-156]
_ = x[SSE4A-157]
_ = x[SSSE3-158]
_ = x[STIBP-159]
_ = x[STIBP_ALWAYSON-160]
_ = x[STOSB_SHORT-161]
_ = x[SUCCOR-162]
_ = x[SVM-163]
_ = x[SVMDA-164]
_ = x[SVMFBASID-165]
_ = x[SVML-166]
_ = x[SVMNP-167]
_ = x[SVMPF-168]
_ = x[SVMPFT-169]
_ = x[SYSCALL-170]
_ = x[SYSEE-171]
_ = x[TBM-172]
_ = x[TDX_GUEST-173]
_ = x[TLB_FLUSH_NESTED-174]
_ = x[TME-175]
_ = x[TOPEXT-176]
_ = x[TSCRATEMSR-177]
_ = x[TSXLDTRK-178]
_ = x[VAES-179]
_ = x[VMCBCLEAN-180]
_ = x[VMPL-181]
_ = x[VMSA_REGPROT-182]
_ = x[VMX-183]
_ = x[VPCLMULQDQ-184]
_ = x[VTE-185]
_ = x[WAITPKG-186]
_ = x[WBNOINVD-187]
_ = x[WRMSRNS-188]
_ = x[X87-189]
_ = x[XGETBV1-190]
_ = x[XOP-191]
_ = x[XSAVE-192]
_ = x[XSAVEC-193]
_ = x[XSAVEOPT-194]
_ = x[XSAVES-195]
_ = x[AESARM-196]
_ = x[ARMCPUID-197]
_ = x[ASIMD-198]
_ = x[ASIMDDP-199]
_ = x[ASIMDHP-200]
_ = x[ASIMDRDM-201]
_ = x[ATOMICS-202]
_ = x[CRC32-203]
_ = x[DCPOP-204]
_ = x[EVTSTRM-205]
_ = x[FCMA-206]
_ = x[FP-207]
_ = x[FPHP-208]
_ = x[GPA-209]
_ = x[JSCVT-210]
_ = x[LRCPC-211]
_ = x[PMULL-212]
_ = x[SHA1-213]
_ = x[SHA2-214]
_ = x[SHA3-215]
_ = x[SHA512-216]
_ = x[SM3-217]
_ = x[SM4-218]
_ = x[SVE-219]
_ = x[lastID-220]
_ = x[AMXTF32-10]
_ = x[AMXCOMPLEX-11]
_ = x[APX_F-12]
_ = x[AVX-13]
_ = x[AVX10-14]
_ = x[AVX10_128-15]
_ = x[AVX10_256-16]
_ = x[AVX10_512-17]
_ = x[AVX2-18]
_ = x[AVX512BF16-19]
_ = x[AVX512BITALG-20]
_ = x[AVX512BW-21]
_ = x[AVX512CD-22]
_ = x[AVX512DQ-23]
_ = x[AVX512ER-24]
_ = x[AVX512F-25]
_ = x[AVX512FP16-26]
_ = x[AVX512IFMA-27]
_ = x[AVX512PF-28]
_ = x[AVX512VBMI-29]
_ = x[AVX512VBMI2-30]
_ = x[AVX512VL-31]
_ = x[AVX512VNNI-32]
_ = x[AVX512VP2INTERSECT-33]
_ = x[AVX512VPOPCNTDQ-34]
_ = x[AVXIFMA-35]
_ = x[AVXNECONVERT-36]
_ = x[AVXSLOW-37]
_ = x[AVXVNNI-38]
_ = x[AVXVNNIINT8-39]
_ = x[AVXVNNIINT16-40]
_ = x[BHI_CTRL-41]
_ = x[BMI1-42]
_ = x[BMI2-43]
_ = x[CETIBT-44]
_ = x[CETSS-45]
_ = x[CLDEMOTE-46]
_ = x[CLMUL-47]
_ = x[CLZERO-48]
_ = x[CMOV-49]
_ = x[CMPCCXADD-50]
_ = x[CMPSB_SCADBS_SHORT-51]
_ = x[CMPXCHG8-52]
_ = x[CPBOOST-53]
_ = x[CPPC-54]
_ = x[CX16-55]
_ = x[EFER_LMSLE_UNS-56]
_ = x[ENQCMD-57]
_ = x[ERMS-58]
_ = x[F16C-59]
_ = x[FLUSH_L1D-60]
_ = x[FMA3-61]
_ = x[FMA4-62]
_ = x[FP128-63]
_ = x[FP256-64]
_ = x[FSRM-65]
_ = x[FXSR-66]
_ = x[FXSROPT-67]
_ = x[GFNI-68]
_ = x[HLE-69]
_ = x[HRESET-70]
_ = x[HTT-71]
_ = x[HWA-72]
_ = x[HYBRID_CPU-73]
_ = x[HYPERVISOR-74]
_ = x[IA32_ARCH_CAP-75]
_ = x[IA32_CORE_CAP-76]
_ = x[IBPB-77]
_ = x[IBPB_BRTYPE-78]
_ = x[IBRS-79]
_ = x[IBRS_PREFERRED-80]
_ = x[IBRS_PROVIDES_SMP-81]
_ = x[IBS-82]
_ = x[IBSBRNTRGT-83]
_ = x[IBSFETCHSAM-84]
_ = x[IBSFFV-85]
_ = x[IBSOPCNT-86]
_ = x[IBSOPCNTEXT-87]
_ = x[IBSOPSAM-88]
_ = x[IBSRDWROPCNT-89]
_ = x[IBSRIPINVALIDCHK-90]
_ = x[IBS_FETCH_CTLX-91]
_ = x[IBS_OPDATA4-92]
_ = x[IBS_OPFUSE-93]
_ = x[IBS_PREVENTHOST-94]
_ = x[IBS_ZEN4-95]
_ = x[IDPRED_CTRL-96]
_ = x[INT_WBINVD-97]
_ = x[INVLPGB-98]
_ = x[KEYLOCKER-99]
_ = x[KEYLOCKERW-100]
_ = x[LAHF-101]
_ = x[LAM-102]
_ = x[LBRVIRT-103]
_ = x[LZCNT-104]
_ = x[MCAOVERFLOW-105]
_ = x[MCDT_NO-106]
_ = x[MCOMMIT-107]
_ = x[MD_CLEAR-108]
_ = x[MMX-109]
_ = x[MMXEXT-110]
_ = x[MOVBE-111]
_ = x[MOVDIR64B-112]
_ = x[MOVDIRI-113]
_ = x[MOVSB_ZL-114]
_ = x[MOVU-115]
_ = x[MPX-116]
_ = x[MSRIRC-117]
_ = x[MSRLIST-118]
_ = x[MSR_PAGEFLUSH-119]
_ = x[NRIPS-120]
_ = x[NX-121]
_ = x[OSXSAVE-122]
_ = x[PCONFIG-123]
_ = x[POPCNT-124]
_ = x[PPIN-125]
_ = x[PREFETCHI-126]
_ = x[PSFD-127]
_ = x[RDPRU-128]
_ = x[RDRAND-129]
_ = x[RDSEED-130]
_ = x[RDTSCP-131]
_ = x[RRSBA_CTRL-132]
_ = x[RTM-133]
_ = x[RTM_ALWAYS_ABORT-134]
_ = x[SBPB-135]
_ = x[SERIALIZE-136]
_ = x[SEV-137]
_ = x[SEV_64BIT-138]
_ = x[SEV_ALTERNATIVE-139]
_ = x[SEV_DEBUGSWAP-140]
_ = x[SEV_ES-141]
_ = x[SEV_RESTRICTED-142]
_ = x[SEV_SNP-143]
_ = x[SGX-144]
_ = x[SGXLC-145]
_ = x[SHA-146]
_ = x[SME-147]
_ = x[SME_COHERENT-148]
_ = x[SPEC_CTRL_SSBD-149]
_ = x[SRBDS_CTRL-150]
_ = x[SRSO_MSR_FIX-151]
_ = x[SRSO_NO-152]
_ = x[SRSO_USER_KERNEL_NO-153]
_ = x[SSE-154]
_ = x[SSE2-155]
_ = x[SSE3-156]
_ = x[SSE4-157]
_ = x[SSE42-158]
_ = x[SSE4A-159]
_ = x[SSSE3-160]
_ = x[STIBP-161]
_ = x[STIBP_ALWAYSON-162]
_ = x[STOSB_SHORT-163]
_ = x[SUCCOR-164]
_ = x[SVM-165]
_ = x[SVMDA-166]
_ = x[SVMFBASID-167]
_ = x[SVML-168]
_ = x[SVMNP-169]
_ = x[SVMPF-170]
_ = x[SVMPFT-171]
_ = x[SYSCALL-172]
_ = x[SYSEE-173]
_ = x[TBM-174]
_ = x[TDX_GUEST-175]
_ = x[TLB_FLUSH_NESTED-176]
_ = x[TME-177]
_ = x[TOPEXT-178]
_ = x[TSCRATEMSR-179]
_ = x[TSXLDTRK-180]
_ = x[VAES-181]
_ = x[VMCBCLEAN-182]
_ = x[VMPL-183]
_ = x[VMSA_REGPROT-184]
_ = x[VMX-185]
_ = x[VPCLMULQDQ-186]
_ = x[VTE-187]
_ = x[WAITPKG-188]
_ = x[WBNOINVD-189]
_ = x[WRMSRNS-190]
_ = x[X87-191]
_ = x[XGETBV1-192]
_ = x[XOP-193]
_ = x[XSAVE-194]
_ = x[XSAVEC-195]
_ = x[XSAVEOPT-196]
_ = x[XSAVES-197]
_ = x[AESARM-198]
_ = x[ARMCPUID-199]
_ = x[ASIMD-200]
_ = x[ASIMDDP-201]
_ = x[ASIMDHP-202]
_ = x[ASIMDRDM-203]
_ = x[ATOMICS-204]
_ = x[CRC32-205]
_ = x[DCPOP-206]
_ = x[EVTSTRM-207]
_ = x[FCMA-208]
_ = x[FHM-209]
_ = x[FP-210]
_ = x[FPHP-211]
_ = x[GPA-212]
_ = x[JSCVT-213]
_ = x[LRCPC-214]
_ = x[PMULL-215]
_ = x[RNDR-216]
_ = x[TLB-217]
_ = x[TS-218]
_ = x[SHA1-219]
_ = x[SHA2-220]
_ = x[SHA3-221]
_ = x[SHA512-222]
_ = x[SM3-223]
_ = x[SM4-224]
_ = x[SVE-225]
_ = x[lastID-226]
_ = x[firstID-0]
}
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID"
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592}
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 90, 93, 98, 107, 116, 125, 129, 139, 151, 159, 167, 175, 183, 190, 200, 210, 218, 228, 239, 247, 257, 275, 290, 297, 309, 316, 323, 334, 346, 354, 358, 362, 368, 373, 381, 386, 392, 396, 405, 423, 431, 438, 442, 446, 460, 466, 470, 474, 483, 487, 491, 496, 501, 505, 509, 516, 520, 523, 529, 532, 535, 545, 555, 568, 581, 585, 596, 600, 614, 631, 634, 644, 655, 661, 669, 680, 688, 700, 716, 730, 741, 751, 766, 774, 785, 795, 802, 811, 821, 825, 828, 835, 840, 851, 858, 865, 873, 876, 882, 887, 896, 903, 911, 915, 918, 924, 931, 944, 949, 951, 958, 965, 971, 975, 984, 988, 993, 999, 1005, 1011, 1021, 1024, 1040, 1044, 1053, 1056, 1065, 1080, 1093, 1099, 1113, 1120, 1123, 1128, 1131, 1134, 1146, 1160, 1170, 1182, 1189, 1208, 1211, 1215, 1219, 1223, 1228, 1233, 1238, 1243, 1257, 1268, 1274, 1277, 1282, 1291, 1295, 1300, 1305, 1311, 1318, 1323, 1326, 1335, 1351, 1354, 1360, 1370, 1378, 1382, 1391, 1395, 1407, 1410, 1420, 1423, 1430, 1438, 1445, 1448, 1455, 1458, 1463, 1469, 1477, 1483, 1489, 1497, 1502, 1509, 1516, 1524, 1531, 1536, 1541, 1548, 1552, 1555, 1557, 1561, 1564, 1569, 1574, 1579, 1583, 1586, 1588, 1592, 1596, 1600, 1606, 1609, 1612, 1615, 1621}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {

View File

@@ -96,9 +96,11 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP)
// setFeature(c, "", EVTSTRM)
setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA)
setFeature(c, "hw.optional.arm.FEAT_FHM", FHM)
setFeature(c, "hw.optional.arm.FEAT_FP", FP)
setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP)
setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA)
setFeature(c, "hw.optional.arm.FEAT_RNG", RNDR)
setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT)
setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC)
setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL)
@@ -106,6 +108,10 @@ func tryToFillCPUInfoFomSysctl(c *CPUInfo) {
setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2)
setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3)
setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512)
setFeature(c, "hw.optional.arm.FEAT_TLBIOS", TLB)
setFeature(c, "hw.optional.arm.FEAT_TLBIRANGE", TLB)
setFeature(c, "hw.optional.arm.FEAT_FlagM", TS)
setFeature(c, "hw.optional.arm.FEAT_FlagM2", TS)
// setFeature(c, "", SM3)
// setFeature(c, "", SM4)
setFeature(c, "hw.optional.arm.FEAT_SVE", SVE)

View File

@@ -39,6 +39,80 @@ const (
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
hwcap_DIT = 1 << 24
hwcap_USCAT = 1 << 25
hwcap_ILRCPC = 1 << 26
hwcap_FLAGM = 1 << 27
hwcap_SSBS = 1 << 28
hwcap_SB = 1 << 29
hwcap_PACA = 1 << 30
hwcap_PACG = 1 << 31
hwcap_GCS = 1 << 32
hwcap2_DCPODP = 1 << 0
hwcap2_SVE2 = 1 << 1
hwcap2_SVEAES = 1 << 2
hwcap2_SVEPMULL = 1 << 3
hwcap2_SVEBITPERM = 1 << 4
hwcap2_SVESHA3 = 1 << 5
hwcap2_SVESM4 = 1 << 6
hwcap2_FLAGM2 = 1 << 7
hwcap2_FRINT = 1 << 8
hwcap2_SVEI8MM = 1 << 9
hwcap2_SVEF32MM = 1 << 10
hwcap2_SVEF64MM = 1 << 11
hwcap2_SVEBF16 = 1 << 12
hwcap2_I8MM = 1 << 13
hwcap2_BF16 = 1 << 14
hwcap2_DGH = 1 << 15
hwcap2_RNG = 1 << 16
hwcap2_BTI = 1 << 17
hwcap2_MTE = 1 << 18
hwcap2_ECV = 1 << 19
hwcap2_AFP = 1 << 20
hwcap2_RPRES = 1 << 21
hwcap2_MTE3 = 1 << 22
hwcap2_SME = 1 << 23
hwcap2_SME_I16I64 = 1 << 24
hwcap2_SME_F64F64 = 1 << 25
hwcap2_SME_I8I32 = 1 << 26
hwcap2_SME_F16F32 = 1 << 27
hwcap2_SME_B16F32 = 1 << 28
hwcap2_SME_F32F32 = 1 << 29
hwcap2_SME_FA64 = 1 << 30
hwcap2_WFXT = 1 << 31
hwcap2_EBF16 = 1 << 32
hwcap2_SVE_EBF16 = 1 << 33
hwcap2_CSSC = 1 << 34
hwcap2_RPRFM = 1 << 35
hwcap2_SVE2P1 = 1 << 36
hwcap2_SME2 = 1 << 37
hwcap2_SME2P1 = 1 << 38
hwcap2_SME_I16I32 = 1 << 39
hwcap2_SME_BI32I32 = 1 << 40
hwcap2_SME_B16B16 = 1 << 41
hwcap2_SME_F16F16 = 1 << 42
hwcap2_MOPS = 1 << 43
hwcap2_HBC = 1 << 44
hwcap2_SVE_B16B16 = 1 << 45
hwcap2_LRCPC3 = 1 << 46
hwcap2_LSE128 = 1 << 47
hwcap2_FPMR = 1 << 48
hwcap2_LUT = 1 << 49
hwcap2_FAMINMAX = 1 << 50
hwcap2_F8CVT = 1 << 51
hwcap2_F8FMA = 1 << 52
hwcap2_F8DP4 = 1 << 53
hwcap2_F8DP2 = 1 << 54
hwcap2_F8E4M3 = 1 << 55
hwcap2_F8E5M2 = 1 << 56
hwcap2_SME_LUTV2 = 1 << 57
hwcap2_SME_F8F16 = 1 << 58
hwcap2_SME_F8F32 = 1 << 59
hwcap2_SME_SF8FMA = 1 << 60
hwcap2_SME_SF8DP4 = 1 << 61
hwcap2_SME_SF8DP2 = 1 << 62
hwcap2_POE = 1 << 63
)
func detectOS(c *CPUInfo) bool {
@@ -104,11 +178,15 @@ func detectOS(c *CPUInfo) bool {
c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP)
c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM)
c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA)
c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM)
c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP)
c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP)
c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT)
c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC)
c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL)
c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR)
// c.featureSet.setIf(isSet(hwcap, hwcap_), TLB)
// c.featureSet.setIf(isSet(hwcap, hwcap_), TS)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2)
c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3)

View File

@@ -35,7 +35,7 @@ This package follows the official [Golang Release Policy](https://golang.org/doc
- [Android](#android)
- [ARM](#arm)
- [Cross Compile](#cross-compile)
- [Google Cloud Platform](#google-cloud-platform)
- [Compiling](#compiling)
- [Linux](#linux)
- [Alpine](#alpine)
- [Fedora](#fedora)
@@ -70,7 +70,6 @@ This package can be installed with the `go get` command:
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
However, after you have built and installed _go-sqlite3_ with `go install github.com/mattn/go-sqlite3` (which requires gcc), you can build your app without relying on gcc in future.
***Important: because this is a `CGO` enabled package, you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compiler present within your path.***
@@ -228,11 +227,7 @@ Steps:
Please refer to the project's [README](https://github.com/FiloSottile/homebrew-musl-cross#readme) for further information.
# Google Cloud Platform
Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed.
Please work only with compiled final binaries.
# Compiling
## Linux

View File

@@ -345,7 +345,8 @@ func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.String {
return fmt.Errorf("cannot convert %s to TEXT", v.Type())
}
C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
cstr := C.CString(v.Interface().(string))
C._sqlite3_result_text(ctx, cstr)
return nil
}

View File

@@ -381,7 +381,7 @@ type SQLiteStmt struct {
s *C.sqlite3_stmt
t string
closed bool
cls bool
cls bool // True if the statement was created by SQLiteConn.Query
}
// SQLiteResult implements sql.Result.
@@ -393,12 +393,12 @@ type SQLiteResult struct {
// SQLiteRows implements driver.Rows.
type SQLiteRows struct {
s *SQLiteStmt
nc int
nc int32 // Number of columns
cls bool // True if we need to close the parent statement in Close
cols []string
decltype []string
cls bool
closed bool
ctx context.Context // no better alternative to pass context into Next() method
closemu sync.Mutex
}
type functionInfo struct {
@@ -929,6 +929,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []driver.Name
s.(*SQLiteStmt).cls = true
na := s.NumInput()
if len(args)-start < na {
s.Close()
return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args)-start)
}
// consume the number of arguments used in the current
@@ -2007,14 +2008,12 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive
rows := &SQLiteRows{
s: s,
nc: int(C.sqlite3_column_count(s.s)),
nc: int32(C.sqlite3_column_count(s.s)),
cls: s.cls,
cols: nil,
decltype: nil,
cls: s.cls,
closed: false,
ctx: ctx,
}
runtime.SetFinalizer(rows, (*SQLiteRows).Close)
return rows, nil
}
@@ -2111,24 +2110,28 @@ func (s *SQLiteStmt) Readonly() bool {
// Close the rows.
func (rc *SQLiteRows) Close() error {
rc.s.mu.Lock()
if rc.s.closed || rc.closed {
rc.s.mu.Unlock()
rc.closemu.Lock()
defer rc.closemu.Unlock()
s := rc.s
if s == nil {
return nil
}
rc.s = nil // remove reference to SQLiteStmt
s.mu.Lock()
if s.closed {
s.mu.Unlock()
return nil
}
rc.closed = true
if rc.cls {
rc.s.mu.Unlock()
return rc.s.Close()
s.mu.Unlock()
return s.Close()
}
rv := C.sqlite3_reset(rc.s.s)
rv := C.sqlite3_reset(s.s)
if rv != C.SQLITE_OK {
rc.s.mu.Unlock()
return rc.s.c.lastError()
s.mu.Unlock()
return s.c.lastError()
}
rc.s.mu.Unlock()
rc.s = nil
runtime.SetFinalizer(rc, nil)
s.mu.Unlock()
return nil
}
@@ -2136,9 +2139,9 @@ func (rc *SQLiteRows) Close() error {
func (rc *SQLiteRows) Columns() []string {
rc.s.mu.Lock()
defer rc.s.mu.Unlock()
if rc.s.s != nil && rc.nc != len(rc.cols) {
if rc.s.s != nil && int(rc.nc) != len(rc.cols) {
rc.cols = make([]string, rc.nc)
for i := 0; i < rc.nc; i++ {
for i := 0; i < int(rc.nc); i++ {
rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i)))
}
}
@@ -2148,7 +2151,7 @@ func (rc *SQLiteRows) Columns() []string {
func (rc *SQLiteRows) declTypes() []string {
if rc.s.s != nil && rc.decltype == nil {
rc.decltype = make([]string, rc.nc)
for i := 0; i < rc.nc; i++ {
for i := 0; i < int(rc.nc); i++ {
rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))))
}
}

View File

@@ -5,7 +5,11 @@
#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdio.h>
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
extern int unlock_notify_wait(sqlite3 *db);

View File

@@ -12,7 +12,11 @@ package sqlite3
#cgo CFLAGS: -DSQLITE_ENABLE_UNLOCK_NOTIFY
#include <stdlib.h>
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
extern void unlock_notify_callback(void *arg, int argc);
*/

View File

@@ -1,27 +1,72 @@
linters-settings:
misspell:
locale: US
version: "2"
linters:
disable-all: true
enable:
- typecheck
- goimports
- misspell
- revive
- durationcheck
- gocritic
- gomodguard
- govet
- ineffassign
- gosimple
- misspell
- revive
- staticcheck
- unconvert
- unused
- gocritic
- usetesting
- whitespace
settings:
misspell:
locale: US
staticcheck:
checks:
- all
- -SA1008
- -SA1019
- -SA4000
- -SA9004
- -ST1000
- -ST1005
- -ST1016
- -ST1021
- -ST1020
- -U1000
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: "empty-block:"
- path: (.+)\.go$
text: "unused-parameter:"
- path: (.+)\.go$
text: "dot-imports:"
- path: (.+)\.go$
text: "singleCaseSwitch: should rewrite switch statement to if statement"
- path: (.+)\.go$
text: "unlambda: replace"
- path: (.+)\.go$
text: "captLocal:"
- path: (.+)\.go$
text: "should have a package comment"
- path: (.+)\.go$
text: "ifElseChain:"
- path: (.+)\.go$
text: "elseif:"
- path: (.+)\.go$
text: "Error return value of"
- path: (.+)\.go$
text: "unnecessary conversion"
- path: (.+)\.go$
text: "Error return value is not checked"
issues:
exclude-use-default: false
exclude:
# todo fix these when we get enough time.
- "singleCaseSwitch: should rewrite switch statement to if statement"
- "unlambda: replace"
- "captLocal:"
- "ifElseChain:"
- "elseif:"
- "should have a package comment"
max-issues-per-linter: 100
max-same-issues: 100
formatters:
enable:
- gofumpt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View File

@@ -251,7 +251,6 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
// Close current connection before looping further.
closeResponse(resp)
}
}(notificationInfoCh)

View File

@@ -90,6 +90,7 @@ type BucketVersioningConfiguration struct {
// Requires versioning to be enabled
ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"`
ExcludeFolders bool `xml:",omitempty"`
PurgeOnDelete string `xml:",omitempty"`
}
// Various supported states

View File

@@ -135,16 +135,16 @@ func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
res := map[string][]string{}
for _, g := range grants {
switch {
case g.Permission == "READ":
switch g.Permission {
case "READ":
res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
case g.Permission == "WRITE":
case "WRITE":
res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
case g.Permission == "READ_ACP":
case "READ_ACP":
res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
case g.Permission == "WRITE_ACP":
case "WRITE_ACP":
res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
case g.Permission == "FULL_CONTROL":
case "FULL_CONTROL":
res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
}
}

View File

@@ -524,7 +524,6 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
}
return
}
}
}(resultCh)
return resultCh

View File

@@ -350,7 +350,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize

View File

@@ -392,10 +392,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
defer close(resultCh)
// Loop over entries by 1000 and call MultiDelete requests
for {
if finish {
break
}
for !finish {
count := 0
var batch []ObjectInfo

View File

@@ -194,7 +194,6 @@ func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (e
default:
return errors.New("unrecognized option:" + tagName)
}
}
}
return nil

View File

@@ -609,7 +609,6 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
closeResponse(s.resp)
return
}
}
}()
}
@@ -669,7 +668,6 @@ func extractHeader(body io.Reader, myHeaders http.Header) error {
}
myHeaders.Set(headerTypeName, headerValueName)
}
return nil
}

View File

@@ -155,7 +155,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.88"
libraryVersion = "v7.0.89"
)
// User Agent should always following the below style.
@@ -598,7 +598,7 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
// If trace is enabled, dump http request and response,
// except when the traceErrorsOnly enabled and the response's status code is ok
if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
if c.isTraceEnabled && (!c.traceErrorsOnly || resp.StatusCode != http.StatusOK) {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err

View File

@@ -104,6 +104,8 @@ type STSAssumeRoleOptions struct {
RoleARN string
RoleSessionName string
ExternalID string
TokenRevokeType string // Optional, used for token revokation (MinIO only extension)
}
// NewSTSAssumeRole returns a pointer to a new
@@ -161,6 +163,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
if opts.ExternalID != "" {
v.Set("ExternalId", opts.ExternalID)
}
if opts.TokenRevokeType != "" {
v.Set("TokenRevokeType", opts.TokenRevokeType)
}
u, err := url.Parse(endpoint)
if err != nil {

View File

@@ -69,6 +69,9 @@ type CustomTokenIdentity struct {
// RequestedExpiry is to set the validity of the generated credentials
// (this value bounded by server).
RequestedExpiry time.Duration
// Optional, used for token revokation
TokenRevokeType string
}
// RetrieveWithCredContext with Retrieve optionally cred context
@@ -98,6 +101,9 @@ func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Va
if c.RequestedExpiry != 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds())))
}
if c.TokenRevokeType != "" {
v.Set("TokenRevokeType", c.TokenRevokeType)
}
u.RawQuery = v.Encode()

View File

@@ -73,6 +73,9 @@ type LDAPIdentity struct {
// RequestedExpiry is the configured expiry duration for credentials
// requested from LDAP.
RequestedExpiry time.Duration
// Optional, used for token revokation
TokenRevokeType string
}
// NewLDAPIdentity returns new credentials object that uses LDAP
@@ -152,6 +155,9 @@ func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, er
if k.RequestedExpiry != 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds())))
}
if k.TokenRevokeType != "" {
v.Set("TokenRevokeType", k.TokenRevokeType)
}
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
if err != nil {

View File

@@ -80,6 +80,9 @@ type STSCertificateIdentity struct {
// Certificate is the client certificate that is used for
// STS authentication.
Certificate tls.Certificate
// Optional, used for token revokation
TokenRevokeType string
}
// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
@@ -122,6 +125,9 @@ func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value
queryValues := url.Values{}
queryValues.Set("Action", "AssumeRoleWithCertificate")
queryValues.Set("Version", STSVersion)
if i.TokenRevokeType != "" {
queryValues.Set("TokenRevokeType", i.TokenRevokeType)
}
endpointURL.RawQuery = queryValues.Encode()
req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil)

View File

@@ -93,6 +93,9 @@ type STSWebIdentity struct {
// roleSessionName is the identifier for the assumed role session.
roleSessionName string
// Optional, used for token revokation
TokenRevokeType string
}
// NewSTSWebIdentity returns a pointer to a new
@@ -135,7 +138,7 @@ func WithPolicy(policy string) func(*STSWebIdentity) {
}
func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string,
getWebIDTokenExpiry func() (*WebIdentityToken, error),
getWebIDTokenExpiry func() (*WebIdentityToken, error), tokenRevokeType string,
) (AssumeRoleWithWebIdentityResponse, error) {
idToken, err := getWebIDTokenExpiry()
if err != nil {
@@ -168,6 +171,9 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
v.Set("Policy", policy)
}
v.Set("Version", STSVersion)
if tokenRevokeType != "" {
v.Set("TokenRevokeType", tokenRevokeType)
}
u, err := url.Parse(endpoint)
if err != nil {
@@ -236,7 +242,7 @@ func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error)
return Value{}, errors.New("STS endpoint unknown")
}
a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry, m.TokenRevokeType)
if err != nil {
return Value{}, err
}

View File

@@ -192,7 +192,7 @@ func (t Transition) IsDaysNull() bool {
// IsDateNull returns true if date field is null
func (t Transition) IsDateNull() bool {
return t.Date.Time.IsZero()
return t.Date.IsZero()
}
// IsNull returns true if no storage-class is set.
@@ -323,7 +323,7 @@ type ExpirationDate struct {
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if eDate.Time.IsZero() {
if eDate.IsZero() {
return nil
}
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
@@ -392,7 +392,7 @@ func (e Expiration) IsDaysNull() bool {
// IsDateNull returns true if date field is null
func (e Expiration) IsDateNull() bool {
return e.Date.Time.IsZero()
return e.Date.IsZero()
}
// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled

View File

@@ -283,7 +283,6 @@ func (b *Configuration) AddTopic(topicConfig Config) bool {
for _, n := range b.TopicConfigs {
// If new config matches existing one
if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
@@ -308,7 +307,6 @@ func (b *Configuration) AddQueue(queueConfig Config) bool {
newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
for _, n := range b.QueueConfigs {
if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
@@ -333,7 +331,6 @@ func (b *Configuration) AddLambda(lambdaConfig Config) bool {
newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
for _, n := range b.LambdaConfigs {
if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
@@ -372,7 +369,7 @@ func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []Eve
removeIndex := -1
for i, v := range b.TopicConfigs {
// if it matches events and filters, mark the index for deletion
if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
if v.Topic == arn.String() && v.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}
@@ -400,7 +397,7 @@ func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []Eve
removeIndex := -1
for i, v := range b.QueueConfigs {
// if it matches events and filters, mark the index for deletion
if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
if v.Queue == arn.String() && v.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}
@@ -428,7 +425,7 @@ func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []Ev
removeIndex := -1
for i, v := range b.LambdaConfigs {
// if it matches events and filters, mark the index for deletion
if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
if v.Lambda == arn.String() && v.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}

View File

@@ -988,10 +988,10 @@ func (q ReplQueueStats) QStats() (r ReplQStats) {
// MetricsV2 represents replication metrics for a bucket.
type MetricsV2 struct {
Uptime int64 `json:"uptime"`
CurrentStats Metrics `json:"currStats"`
QueueStats ReplQueueStats `json:"queueStats"`
DowntimeInfo DowntimeInfo `json:"downtimeInfo"`
Uptime int64 `json:"uptime"`
CurrentStats Metrics `json:"currStats"`
QueueStats ReplQueueStats `json:"queueStats"`
DowntimeInfo map[string]DowntimeInfo `json:"downtimeInfo"`
}
// DowntimeInfo represents the downtime info

View File

@@ -212,7 +212,6 @@ func (s *StreamingUSReader) Read(buf []byte) (int, error) {
}
return 0, err
}
}
}
return s.buf.Read(buf)

View File

@@ -387,7 +387,6 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
}
return 0, err
}
}
}
return s.buf.Read(buf)

View File

@@ -148,7 +148,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b
// Prepare auth header.
authHeader := new(bytes.Buffer)
authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
fmt.Fprintf(authHeader, "%s %s:", signV2Algorithm, accessKeyID)
encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil))
encoder.Close()

View File

@@ -128,8 +128,8 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
switch k {
case "host":
buf.WriteString(getHostAddr(&req))
buf.WriteByte('\n')
default:

View File

@@ -27,6 +27,7 @@ type Config struct {
FavoriteStorageDrivers map[string]map[string]interface{} `mapstructure:"favorite_storage_drivers"`
Version string `mapstructure:"version"`
VersionString string `mapstructure:"version_string"`
Edition string `mapstructure:"edition"`
Product string `mapstructure:"product"`
ProductName string `mapstructure:"product_name"`
ProductVersion string `mapstructure:"product_version"`

View File

@@ -34,6 +34,7 @@ func (s *svc) doStatus(w http.ResponseWriter, r *http.Request) {
NeedsDBUpgrade: false,
Version: s.c.Version,
VersionString: s.c.VersionString,
Edition: s.c.Edition,
ProductName: s.c.ProductName,
ProductVersion: s.c.ProductVersion,
Product: s.c.Product,

View File

@@ -69,6 +69,9 @@ func (h *Handler) Init(c *config.Config) {
if h.c.Capabilities.Core.Status.VersionString == "" {
h.c.Capabilities.Core.Status.VersionString = "10.0.11" // TODO make build determined
}
if h.c.Capabilities.Core.Status.Edition == "" {
h.c.Capabilities.Core.Status.Edition = "" // TODO make build determined
}
if h.c.Capabilities.Core.Status.ProductName == "" {
h.c.Capabilities.Core.Status.ProductName = "reva" // TODO make build determined
}
@@ -217,6 +220,7 @@ func (h *Handler) Init(c *config.Config) {
Minor: 0,
Micro: 11,
String: "10.0.11",
Edition: "",
Product: "reva",
ProductVersion: "",
}

View File

@@ -50,6 +50,7 @@ type Option func(l *zerolog.Logger)
// New creates a new logger.
func New(opts ...Option) *zerolog.Logger {
// create a default logger
zerolog.SetGlobalLevel(zerolog.TraceLevel)
zl := zerolog.New(os.Stderr).With().Timestamp().Caller().Logger()
for _, opt := range opts {
opt(&zl)
@@ -127,7 +128,7 @@ type LogConf struct {
func fromConfig(conf *LogConf) (*zerolog.Logger, error) {
if conf.Level == "" {
conf.Level = zerolog.DebugLevel.String()
conf.Level = zerolog.InfoLevel.String()
}
var opts []Option

View File

@@ -297,6 +297,13 @@ func VersionString(val string) Option {
}
}
// Edition provides a function to set the Edition config option.
func Edition(val string) Option {
return func(o *Options) {
o.config.Edition = val
}
}
// Product provides a function to set the Product config option.
func Product(val string) Option {
return func(o *Options) {

View File

@@ -142,6 +142,7 @@ type Status struct {
NeedsDBUpgrade ocsBool `json:"needsDbUpgrade" xml:"needsDbUpgrade"`
Version string `json:"version" xml:"version"`
VersionString string `json:"versionstring" xml:"versionstring"`
Edition string `json:"edition" xml:"edition"`
ProductName string `json:"productname" xml:"productname"`
Product string `json:"product" xml:"product"`
ProductVersion string `json:"productversion" xml:"productversion"`
@@ -308,6 +309,7 @@ type Version struct {
Minor int `json:"minor" xml:"minor"`
Micro int `json:"micro" xml:"micro"` // = patch level
String string `json:"string" xml:"string"`
Edition string `json:"edition" xml:"edition"`
Product string `json:"product" xml:"product"`
ProductVersion string `json:"productversion" xml:"productversion"`
}

View File

@@ -242,16 +242,35 @@ type tusdLogger struct {
// Handle handles the record
func (l tusdLogger) Handle(_ context.Context, r slog.Record) error {
var logev *zerolog.Event
switch r.Level {
case slog.LevelDebug:
l.log.Debug().Msg(r.Message)
logev = l.log.Debug()
case slog.LevelInfo:
l.log.Info().Msg(r.Message)
logev = l.log.Info()
case slog.LevelWarn:
l.log.Warn().Msg(r.Message)
logev = l.log.Warn()
case slog.LevelError:
l.log.Error().Msg(r.Message)
logev = l.log.Error()
}
r.Attrs(func(a slog.Attr) bool {
// Resolve the Attr's value before doing anything else.
a.Value = a.Value.Resolve()
// Ignore empty Attrs.
if a.Equal(slog.Attr{}) {
return true
}
switch a.Value.Kind() {
case slog.KindBool:
logev = logev.Bool(a.Key, a.Value.Bool())
case slog.KindInt64:
logev = logev.Int64(a.Key, a.Value.Int64())
default:
logev = logev.Str(a.Key, a.Value.String())
}
return true
})
logev.Msg(r.Message)
return nil
}
@@ -262,7 +281,7 @@ func (l tusdLogger) Enabled(_ context.Context, _ slog.Level) bool { return true
func (l tusdLogger) WithAttrs(attr []slog.Attr) slog.Handler {
fields := make(map[string]interface{}, len(attr))
for _, a := range attr {
fields[a.Key] = a.Value
fields[a.Key] = a.Value.String()
}
c := l.log.With().Fields(fields).Logger()
sLog := tusdLogger{log: &c}

View File

@@ -29,15 +29,13 @@ import (
// taken from https://golang.org/src/net/http/fs.go
// ErrSeeker is returned by ServeContent's sizeFunc when the content
// doesn't seek properly. The underlying Seeker's error text isn't
// included in the sizeFunc reply so it's not sent over HTTP to end
// users.
var ErrSeeker = errors.New("seeker can't seek")
// ErrInvalidRange is returned by serveContent's parseRange if the Range is
// malformed or invalid.
var ErrInvalidRange = errors.New("invalid range")
// ErrNoOverlap is returned by serveContent's parseRange if first-byte-pos of
// all of the byte-range-spec values is greater than the content size.
var ErrNoOverlap = errors.New("invalid range: failed to overlap")
var ErrNoOverlap = fmt.Errorf("%w: failed to overlap", ErrInvalidRange)
// HTTPRange specifies the byte range to be sent to the client.
type HTTPRange struct {
@@ -65,7 +63,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) {
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, errors.New("invalid range")
return nil, ErrInvalidRange
}
ranges := []HTTPRange{}
noOverlap := false
@@ -76,7 +74,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) {
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, errors.New("invalid range")
return nil, ErrInvalidRange
}
start, end := textproto.TrimString(ra[:i]), textproto.TrimString(ra[i+1:])
var r HTTPRange
@@ -85,7 +83,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) {
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, errors.New("invalid range")
return nil, ErrInvalidRange
}
if i > size {
i = size
@@ -95,7 +93,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) {
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i < 0 {
return nil, errors.New("invalid range")
return nil, ErrInvalidRange
}
if i >= size {
// If the range begins after the size of the content,
@@ -110,7 +108,7 @@ func ParseRange(s string, size int64) ([]HTTPRange, error) {
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.Start > i {
return nil, errors.New("invalid range")
return nil, ErrInvalidRange
}
if i >= size {
i = size - 1
@@ -146,7 +144,7 @@ func RangesMIMESize(ranges []HTTPRange, contentType string, contentSize int64) (
_, _ = mw.CreatePart(ra.MimeHeader(contentType, contentSize))
encSize += ra.Length
}
mw.Close()
_ = mw.Close()
encSize += int64(w)
return
}

View File

@@ -25,6 +25,7 @@ import (
"os"
"path/filepath"
"strings"
"time"
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
@@ -72,6 +73,7 @@ type Lookup struct {
Options *options.Options
IDCache IDCache
IDHistoryCache IDCache
metadataBackend metadata.Backend
userMapper usermapper.Mapper
tm node.TimeManager
@@ -79,10 +81,15 @@ type Lookup struct {
// New returns a new Lookup instance
func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.TimeManager) *Lookup {
idHistoryConf := o.Options.IDCache
idHistoryConf.Database = o.Options.IDCache.Table + "_history"
idHistoryConf.TTL = 1 * time.Minute
lu := &Lookup{
Options: o,
metadataBackend: b,
IDCache: NewStoreIDCache(&o.Options),
IDCache: NewStoreIDCache(o.Options.IDCache),
IDHistoryCache: NewStoreIDCache(idHistoryConf),
userMapper: um,
tm: tm,
}

View File

@@ -25,7 +25,7 @@ import (
microstore "go-micro.dev/v4/store"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
"github.com/opencloud-eu/reva/v2/pkg/store"
)
@@ -34,31 +34,33 @@ type StoreIDCache struct {
}
// NewMemoryIDCache returns a new MemoryIDCache
func NewStoreIDCache(o *options.Options) *StoreIDCache {
func NewStoreIDCache(c cache.Config) *StoreIDCache {
return &StoreIDCache{
cache: store.Create(
store.Store(o.IDCache.Store),
store.Size(o.IDCache.Size),
microstore.Nodes(o.IDCache.Nodes...),
microstore.Database(o.IDCache.Database),
microstore.Table(o.IDCache.Table),
store.DisablePersistence(o.IDCache.DisablePersistence),
store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword),
store.Store(c.Store),
store.Size(c.Size),
microstore.Nodes(c.Nodes...),
microstore.Database(c.Database),
microstore.Table(c.Table),
store.DisablePersistence(c.DisablePersistence),
store.Authentication(c.AuthUsername, c.AuthPassword),
),
}
}
// Delete removes an entry from the cache
func (c *StoreIDCache) Delete(_ context.Context, spaceID, nodeID string) error {
var rerr error
v, err := c.cache.Read(cacheKey(spaceID, nodeID))
if err == nil {
err := c.cache.Delete(reverseCacheKey(string(v[0].Value)))
if err != nil {
return err
}
rerr = c.cache.Delete(reverseCacheKey(string(v[0].Value)))
}
return c.cache.Delete(cacheKey(spaceID, nodeID))
err = c.cache.Delete(cacheKey(spaceID, nodeID))
if err != nil {
return err
}
return rerr
}
// DeleteByPath removes an entry from the cache

View File

@@ -44,24 +44,28 @@ type Options struct {
WatchType string `mapstructure:"watch_type"`
WatchPath string `mapstructure:"watch_path"`
WatchFolderKafkaBrokers string `mapstructure:"watch_folder_kafka_brokers"`
// InotifyWatcher specific options
InotifyStatsFrequency time.Duration `mapstructure:"inotify_stats_frequency"`
}
// New returns a new Options instance for the given configuration
func New(m map[string]interface{}) (*Options, error) {
o := &Options{}
if err := mapstructure.Decode(m, o); err != nil {
err = errors.Wrap(err, "error decoding conf")
return nil, err
}
// default to hybrid metadatabackend for posixfs
if _, ok := m["metadata_backend"]; !ok {
m["metadata_backend"] = "hybrid"
}
if _, ok := m["scan_debounce_delay"]; !ok {
m["scan_debounce_delay"] = 10 * time.Millisecond
}
if _, ok := m["inotify_stats_frequency"]; !ok {
m["inotify_stats_frequency"] = 5 * time.Minute
}
// debounced scan delay
if o.ScanDebounceDelay == 0 {
o.ScanDebounceDelay = 10 * time.Millisecond
o := &Options{}
if err := mapstructure.Decode(m, o); err != nil {
err = errors.Wrap(err, "error decoding conf")
return nil, err
}
do, err := decomposedoptions.New(m)

View File

@@ -164,17 +164,22 @@ func (tb *Trashbin) MoveToTrash(ctx context.Context, n *node.Node, path string)
return err
}
// purge metadata
// 1. "Forget" the node
if err = tb.lu.IDCache.DeleteByPath(ctx, path); err != nil {
return err
}
err = tb.lu.MetadataBackend().Purge(ctx, n)
// 2. Move the node to the trash
itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem")
err = os.Rename(path, itemTrashPath)
if err != nil {
return err
}
itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem")
return os.Rename(path, itemTrashPath)
// 3. Purge the node from the metadata backend. This will not delete the xattrs from the
// node as it has already been moved but still remove it from the file metadata cache so
// that the metadata is no longer available when reading the node.
return tb.lu.MetadataBackend().Purge(ctx, n)
}
// ListRecycle returns the list of available recycle items
@@ -315,7 +320,7 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key,
}
// TODO the decomposed trash also checks the permissions on the restore node
_, id, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath)
_, id, _, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath)
if err != nil {
return nil, err
}
@@ -325,7 +330,7 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key,
}
// update parent id in case it was restored to a different location
_, parentID, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath))
_, parentID, _, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath))
if err != nil {
return nil, err
}

View File

@@ -243,17 +243,21 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
case ActionMoveFrom:
t.log.Debug().Str("path", path).Bool("isDir", isDir).Msg("scanning path (ActionMoveFrom)")
// 6. file/directory moved out of the watched directory
// -> update directory
err := t.HandleFileDelete(path)
if err != nil {
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item")
}
err = t.setDirty(filepath.Dir(path), true)
if err != nil {
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to mark directory as dirty")
// -> remove from caches
// remember the id of the moved away item
spaceID, nodeID, err := t.lookup.IDsForPath(context.Background(), path)
if err == nil {
err = t.lookup.IDHistoryCache.Set(context.Background(), spaceID, nodeID, path)
if err != nil {
t.log.Error().Err(err).Str("path", path).Msg("failed to cache the id of the moved item")
}
}
go func() { _ = t.WarmupIDCache(filepath.Dir(path), false, true) }()
err = t.HandleFileDelete(path, false) // Do not send a item-trashed SSE in case of moves. They trigger a item-renamed event instead.
if err != nil {
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle moved away item")
}
case ActionDelete:
t.log.Debug().Str("path", path).Bool("isDir", isDir).Msg("handling deleted item")
@@ -261,7 +265,7 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
// 7. Deleted file or directory
// -> update parent and all children
err := t.HandleFileDelete(path)
err := t.HandleFileDelete(path, true)
if err != nil {
t.log.Error().Err(err).Str("path", path).Bool("isDir", isDir).Msg("failed to handle deleted item")
}
@@ -276,12 +280,20 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
return nil
}
func (t *Tree) HandleFileDelete(path string) error {
func (t *Tree) HandleFileDelete(path string, sendSSE bool) error {
spaceID, id, err := t.lookup.IDsForPath(context.Background(), path)
if err != nil {
return err
}
n := node.NewBaseNode(spaceID, id, t.lookup)
if n.InternalPath() != path {
return fmt.Errorf("internal path does not match path")
}
_, err = os.Stat(path)
if err == nil || !os.IsNotExist(err) {
t.log.Info().Str("path", path).Msg("file that was about to be cleared still exists/exists again. We'll leave it alone")
return nil
}
// purge metadata
if err := t.lookup.IDCache.DeleteByPath(context.Background(), path); err != nil {
@@ -291,6 +303,10 @@ func (t *Tree) HandleFileDelete(path string) error {
t.log.Error().Err(err).Str("path", path).Msg("could not purge metadata")
}
if !sendSSE {
return nil
}
parentNode, err := t.getNodeForPath(filepath.Dir(path))
if err != nil {
return err
@@ -355,6 +371,7 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
}
func (t *Tree) assimilate(item scanItem) error {
t.log.Debug().Str("path", item.Path).Bool("rescan", item.ForceRescan).Bool("recurse", item.Recurse).Msg("assimilate")
var err error
// First find the space id
@@ -383,17 +400,20 @@ func (t *Tree) assimilate(item scanItem) error {
}
// check for the id attribute again after grabbing the lock, maybe the file was assimilated/created by us in the meantime
_, id, mtime, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), item.Path)
_, id, parentID, mtime, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), item.Path)
if err != nil {
return err
}
if id != "" {
// the file has an id set, we already know it from the past
n := node.NewBaseNode(spaceID, id, t.lookup)
// n := node.NewBaseNode(spaceID, id, t.lookup)
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id)
previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr)
if previousPath == "" || !ok {
previousPath, ok = t.lookup.IDHistoryCache.Get(context.Background(), spaceID, id)
}
// previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr)
// compare metadata mtime with actual mtime. if it matches AND the path hasn't changed (move operation)
// we can skip the assimilation because the file was handled by us
@@ -405,7 +425,7 @@ func (t *Tree) assimilate(item scanItem) error {
}
// was it moved or copied/restored with a clashing id?
if ok && len(previousParentID) > 0 && previousPath != item.Path {
if ok && len(parentID) > 0 && previousPath != item.Path {
_, err := os.Stat(previousPath)
if err == nil {
// this id clashes with an existing item -> clear metadata and re-assimilate
@@ -445,13 +465,13 @@ func (t *Tree) assimilate(item scanItem) error {
}()
}
parentID := attrs.String(prefixes.ParentidAttr)
newParentID := attrs.String(prefixes.ParentidAttr)
if len(parentID) > 0 {
ref := &provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: t.options.MountID,
SpaceId: spaceID,
OpaqueId: parentID,
OpaqueId: newParentID,
},
Path: filepath.Base(item.Path),
}
@@ -459,7 +479,7 @@ func (t *Tree) assimilate(item scanItem) error {
ResourceId: &provider.ResourceId{
StorageId: t.options.MountID,
SpaceId: spaceID,
OpaqueId: string(previousParentID),
OpaqueId: parentID,
},
Path: filepath.Base(previousPath),
}
@@ -615,54 +635,56 @@ assimilate:
n.SpaceRoot = &node.Node{BaseNode: node.BaseNode{SpaceID: spaceID, ID: spaceID}}
go func() {
// Copy the previous current version to a revision
currentNode := node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup)
currentPath := currentNode.InternalPath()
stat, err := os.Stat(currentPath)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path")
return
}
revisionPath := t.lookup.VersionPath(n.SpaceID, n.ID, stat.ModTime().UTC().Format(time.RFC3339Nano))
if t.options.EnableFSRevisions {
go func() {
// Copy the previous current version to a revision
currentNode := node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup)
currentPath := currentNode.InternalPath()
stat, err := os.Stat(currentPath)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path")
return
}
revisionPath := t.lookup.VersionPath(n.SpaceID, n.ID, stat.ModTime().UTC().Format(time.RFC3339Nano))
err = os.Rename(currentPath, revisionPath)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("revisionPath", revisionPath).Msg("could not create revision")
return
}
err = os.Rename(currentPath, revisionPath)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("revisionPath", revisionPath).Msg("could not create revision")
return
}
// Copy the new version to the current version
w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not open current path for writing")
return
}
defer w.Close()
r, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0600)
if err != nil {
t.log.Error().Err(err).Str("path", path).Msg("could not open file for reading")
return
}
defer r.Close()
// Copy the new version to the current version
w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not open current path for writing")
return
}
defer w.Close()
r, err := os.OpenFile(n.InternalPath(), os.O_RDONLY, 0600)
if err != nil {
t.log.Error().Err(err).Str("path", path).Msg("could not open file for reading")
return
}
defer r.Close()
_, err = io.Copy(w, r)
if err != nil {
t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("could not copy new version to current version")
return
}
_, err = io.Copy(w, r)
if err != nil {
t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("could not copy new version to current version")
return
}
err = t.lookup.CopyMetadata(context.Background(), n, currentNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr
}, false)
if err != nil {
t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("failed to copy xattrs to 'current' file")
return
}
}()
err = t.lookup.CopyMetadata(context.Background(), n, currentNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
attributeName == prefixes.TypeAttr ||
attributeName == prefixes.BlobIDAttr ||
attributeName == prefixes.BlobsizeAttr
}, false)
if err != nil {
t.log.Error().Err(err).Str("currentPath", currentPath).Str("path", path).Msg("failed to copy xattrs to 'current' file")
return
}
}()
}
err = t.Propagate(context.Background(), n, 0)
if err != nil {
@@ -735,7 +757,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
sizes[path] += 0 // Make sure to set the size to 0 for empty directories
}
nodeSpaceID, id, _, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), path)
nodeSpaceID, id, _, _, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), path)
if err == nil && len(id) > 0 {
if len(nodeSpaceID) > 0 {
spaceID = nodeSpaceID
@@ -757,7 +779,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
break
}
spaceID, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate)
spaceID, _, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate)
if err == nil && len(spaceID) > 0 {
err = scopeSpace(path)
if err != nil {
@@ -791,7 +813,11 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
t.log.Error().Err(err).Str("path", path).Msg("could not assimilate item")
}
}
return t.setDirty(path, false)
if info.IsDir() {
return t.setDirty(path, false)
}
return nil
})
for dir, size := range sizes {

View File

@@ -22,24 +22,40 @@ package tree
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/pablodz/inotifywaitgo/inotifywaitgo"
"github.com/rs/zerolog"
)
type InotifyWatcher struct {
tree *Tree
log *zerolog.Logger
tree *Tree
options *options.Options
log *zerolog.Logger
}
func NewInotifyWatcher(tree *Tree, log *zerolog.Logger) (*InotifyWatcher, error) {
func NewInotifyWatcher(tree *Tree, o *options.Options, log *zerolog.Logger) (*InotifyWatcher, error) {
return &InotifyWatcher{
tree: tree,
log: log,
tree: tree,
options: o,
log: log,
}, nil
}
func (iw *InotifyWatcher) Watch(path string) {
if iw.options.InotifyStatsFrequency > 0 {
go func() {
for {
iw.printStats()
time.Sleep(iw.options.InotifyStatsFrequency)
}
}()
}
events := make(chan inotifywaitgo.FileEvent)
errors := make(chan error)
@@ -104,3 +120,119 @@ func (iw *InotifyWatcher) Watch(path string) {
}
}
}
// InotifyUsage holds the number of inotify watches and instances.
type InotifyUsage struct {
Watches int
Instances int
MaxWatches int
MaxInstances int
}
func countInotifyFDs(pid string) (int, int, error) {
fds, err := os.ReadDir(filepath.Join("/proc", pid, "fd"))
if err != nil {
if os.IsNotExist(err) {
return 0, 0, nil // Process may have exited, treat as 0.
}
return 0, 0, fmt.Errorf("failed to read /proc/%s/fd: %w", pid, err)
}
watches := 0
instances := 0
for _, fd := range fds {
if !fd.IsDir() {
if fd.Type()&os.ModeSymlink == 0 {
continue
}
link, err := os.Readlink(filepath.Join("/proc", pid, "fd", fd.Name()))
if err != nil || (link != "inotify" && link != "anon_inode:inotify") {
continue
}
instances++
fdinfoPath := filepath.Join("/proc", pid, "fdinfo", fd.Name())
content, err := os.ReadFile(fdinfoPath)
if err != nil {
return 0, 0, fmt.Errorf("failed to read %s: %w", fdinfoPath, err)
}
lines := strings.SplitSeq(string(content), "\n")
for line := range lines {
if strings.HasPrefix(line, "inotify") {
watches++
}
}
}
}
return watches, instances, nil
}
func GetInotifyUsageFromProc() (InotifyUsage, error) {
usage := InotifyUsage{}
var err error
usage.MaxWatches, err = readProcFile("sys/fs/inotify/max_user_watches")
if err != nil {
return usage, fmt.Errorf("failed to read max_user_watches: %w", err)
}
usage.MaxInstances, err = readProcFile("sys/fs/inotify/max_user_instances")
if err != nil {
return usage, fmt.Errorf("failed to read max_user_instances: %w", err)
}
dirs, err := os.ReadDir("/proc")
if err != nil {
return usage, fmt.Errorf("failed to read /proc: %w", err)
}
totalWatches := 0
totalInstances := 0
for _, dir := range dirs {
if dir.IsDir() {
pid := dir.Name()
if _, err := strconv.Atoi(pid); err == nil {
watches, instances, err := countInotifyFDs(pid)
if err != nil {
continue
}
totalWatches += watches
totalInstances += instances
}
}
}
usage.Watches = totalWatches
usage.Instances = totalInstances
return usage, nil
}
func readProcFile(filename string) (int, error) {
filePath := filepath.Join("/proc", filename)
content, err := os.ReadFile(filePath)
if err != nil {
return 0, err
}
i, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("failed to parse max_user_watches: %w", err)
}
return i, nil
}
func (iw *InotifyWatcher) printStats() {
t := time.Now()
usage, err := GetInotifyUsageFromProc()
if err != nil {
iw.log.Error().Err(err).Msg("failed to get inotify usage")
return
}
d := time.Since(t)
iw.log.Info().
Str("watches", fmt.Sprintf("%d/%d (%.2f%%)", usage.Watches, usage.MaxWatches, float64(usage.Watches)/float64(usage.MaxWatches)*100)).
Str("instances", fmt.Sprintf("%d/%d (%.2f%%)", usage.Instances, usage.MaxInstances, float64(usage.Instances)/float64(usage.MaxInstances)*100)).
Str("duration", d.String()).
Msg("Inotify usage stats")
}

View File

@@ -7,6 +7,7 @@ package tree
import (
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
"github.com/rs/zerolog"
)
@@ -17,6 +18,6 @@ type NullWatcher struct{}
func (*NullWatcher) Watch(path string) {}
// NewInotifyWatcher returns a new inotify watcher
func NewInotifyWatcher(tree *Tree, log *zerolog.Logger) (*NullWatcher, error) {
func NewInotifyWatcher(_ *Tree, _ *options.Options, _ *zerolog.Logger) (*NullWatcher, error) {
return nil, errtypes.NotSupported("inotify watcher is not supported on this platform")
}

View File

@@ -129,7 +129,7 @@ func New(lu node.PathLookup, bs node.Blobstore, um usermapper.Mapper, trashbin *
return nil, err
}
default:
t.watcher, err = NewInotifyWatcher(t, log)
t.watcher, err = NewInotifyWatcher(t, o, log)
if err != nil {
return nil, err
}
@@ -299,9 +299,11 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return errors.Wrap(err, "Decomposedfs: Move: error deleting target node "+newNode.ID)
}
}
// we are moving the node to a new parent, any target has been removed
// bring old node to the new parent
oldParent := oldNode.ParentPath()
newParent := newNode.ParentPath()
if newNode.ID == "" {
newNode.ID = oldNode.ID
}
// update target parentid and name
attribs := node.Attributes{}
@@ -311,29 +313,25 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
return errors.Wrap(err, "Decomposedfs: could not update old node attributes")
}
// rename node
err = os.Rename(
filepath.Join(oldNode.ParentPath(), oldNode.Name),
filepath.Join(newNode.ParentPath(), newNode.Name),
)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not move child")
}
// update the id cache
if newNode.ID == "" {
newNode.ID = oldNode.ID
}
// invalidate old tree
err = t.lookup.IDCache.DeleteByPath(ctx, filepath.Join(oldNode.ParentPath(), oldNode.Name))
if err != nil {
return err
}
if err := t.lookup.CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil {
t.log.Error().Err(err).Str("spaceID", newNode.SpaceID).Str("id", newNode.ID).Str("path", filepath.Join(newNode.ParentPath(), newNode.Name)).Msg("could not cache id")
}
// rename node
err = os.Rename(
filepath.Join(oldParent, oldNode.Name),
filepath.Join(newParent, newNode.Name),
)
if err != nil {
return errors.Wrap(err, "Decomposedfs: could not move child")
}
// rename the lock (if it exists)
if _, err := os.Stat(lockFilePath); err == nil {
err = os.Rename(lockFilePath, newNode.LockFilePath())

View File

@@ -45,13 +45,14 @@ func NewHybridBackend(offloadLimit int, metadataPathFunc MetadataPathFunc, o cac
func (HybridBackend) Name() string { return "hybrid" }
// IdentifyPath returns the space id, node id and mtime of a file
func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) {
spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr)
id, _ := xattr.Get(path, prefixes.IDAttr)
parentID, _ := xattr.Get(path, prefixes.ParentidAttr)
mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr)
mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr))
return string(spaceID), string(id), mtime, nil
return string(spaceID), string(id), string(parentID), mtime, nil
}
// Get an extended attribute value for the given key

View File

@@ -54,33 +54,34 @@ func NewMessagePackBackend(o cache.Config) MessagePackBackend {
func (MessagePackBackend) Name() string { return "messagepack" }
// IdentifyPath returns the id and mtime of a file
func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) {
metaPath := filepath.Clean(path + ".mpk")
source, err := os.Open(metaPath)
// // No cached entry found. Read from storage and store in cache
if err != nil {
return "", "", time.Time{}, err
return "", "", "", time.Time{}, err
}
msgBytes, err := io.ReadAll(source)
if err != nil || len(msgBytes) == 0 {
return "", "", time.Time{}, err
return "", "", "", time.Time{}, err
}
attribs := map[string][]byte{}
err = msgpack.Unmarshal(msgBytes, &attribs)
if err != nil {
return "", "", time.Time{}, err
return "", "", "", time.Time{}, err
}
spaceID := attribs[prefixes.IDAttr]
id := attribs[prefixes.IDAttr]
parentID := attribs[prefixes.ParentidAttr]
mtimeAttr := attribs[prefixes.MTimeAttr]
mtime, err := time.Parse(time.RFC3339Nano, string(mtimeAttr))
if err != nil {
return "", "", time.Time{}, err
return "", "", "", time.Time{}, err
}
return string(spaceID), string(id), mtime, nil
return string(spaceID), string(id), string(parentID), mtime, nil
}
// All reads all extended attributes for a node

View File

@@ -47,7 +47,7 @@ type MetadataNode interface {
// Backend defines the interface for file attribute backends
type Backend interface {
Name() string
IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error)
IdentifyPath(ctx context.Context, path string) (string, string, string, time.Time, error)
All(ctx context.Context, n MetadataNode) (map[string][]byte, error)
AllWithLockedSource(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error)
@@ -74,8 +74,8 @@ type NullBackend struct{}
func (NullBackend) Name() string { return "null" }
// IdentifyPath returns the ids and mtime of a file
func (NullBackend) IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error) {
return "", "", time.Time{}, errUnconfiguredError
func (NullBackend) IdentifyPath(ctx context.Context, path string) (string, string, string, time.Time, error) {
return "", "", "", time.Time{}, errUnconfiguredError
}
// All reads all extended attributes for a node

View File

@@ -51,13 +51,14 @@ func NewXattrsBackend(o cache.Config) XattrsBackend {
func (XattrsBackend) Name() string { return "xattrs" }
// IdentifyPath returns the space id, node id and mtime of a file
func (b XattrsBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
func (b XattrsBackend) IdentifyPath(_ context.Context, path string) (string, string, string, time.Time, error) {
spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr)
id, _ := xattr.Get(path, prefixes.IDAttr)
parentID, _ := xattr.Get(path, prefixes.ParentidAttr)
mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr)
mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr))
return string(spaceID), string(id), mtime, nil
return string(spaceID), string(id), string(parentID), mtime, nil
}
// Get an extended attribute value for the given key

View File

@@ -178,7 +178,9 @@ type BaseNode struct {
SpaceID string
ID string
lu PathLookup
lu PathLookup
internalPathID string
internalPath string
}
func NewBaseNode(spaceID, nodeID string, lu PathLookup) *BaseNode {
@@ -194,7 +196,13 @@ func (n *BaseNode) GetID() string { return n.ID }
// InternalPath returns the internal path of the Node
func (n *BaseNode) InternalPath() string {
return n.lu.InternalPath(n.SpaceID, n.ID)
if len(n.internalPath) > 0 && n.ID == n.internalPathID {
return n.internalPath
}
n.internalPath = n.lu.InternalPath(n.SpaceID, n.ID)
n.internalPathID = n.ID
return n.internalPath
}
// Node represents a node in the tree and provides methods to get a Parent or Child instance
@@ -209,6 +217,7 @@ type Node struct {
SpaceRoot *Node
xattrsCache map[string][]byte
disabled *bool
nodeType *provider.ResourceType
}
@@ -912,7 +921,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi
ri.Opaque = utils.AppendPlainToOpaque(ri.Opaque, "scantime", date.Format(time.RFC3339Nano))
}
sublog.Debug().
sublog.Trace().
Interface("ri", ri).
Msg("AsResourceInfo")
@@ -978,7 +987,7 @@ func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInf
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Str("quota", v).Msg("malformed quota")
}
case metadata.IsAttrUnset(err):
appctx.GetLogger(ctx).Debug().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("quota not set")
appctx.GetLogger(ctx).Trace().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("quota not set")
default:
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("nodepath", n.InternalPath()).Msg("could not read quota")
}
@@ -996,10 +1005,17 @@ func (n *Node) HasPropagation(ctx context.Context) (propagation bool) {
// only used to check if a space is disabled
// FIXME confusing with the trash logic
func (n *Node) IsDisabled(ctx context.Context) bool {
if _, err := n.GetDTime(ctx); err == nil {
return true
if n.disabled != nil {
return *n.disabled
}
return false
if _, err := n.GetDTime(ctx); err == nil {
v := true
n.disabled = &v
} else {
v := false
n.disabled = &v
}
return *n.disabled
}
// GetTreeSize reads the treesize from the extended attributes
@@ -1116,7 +1132,7 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro
}
}
appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning aggregated permissions")
appctx.GetLogger(ctx).Trace().Interface("permissions", ap).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Interface("user", u).Msg("returning aggregated permissions")
return ap, false, nil
}

View File

@@ -73,8 +73,12 @@ func (fs *Decomposedfs) Upload(ctx context.Context, req storage.UploadRequest, u
if err != nil {
return &provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error opening assembled file")
}
defer fd.Close()
defer os.RemoveAll(assembledFile)
defer func() {
_ = fd.Close()
}()
defer func() {
_ = os.RemoveAll(assembledFile)
}()
req.Body = fd
size, err := session.WriteChunk(ctx, 0, req.Body)
@@ -347,6 +351,7 @@ func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) {
composer.UseTerminater(fs)
composer.UseConcater(fs)
composer.UseLengthDeferrer(fs)
composer.UseContentServer(fs)
}
// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol
@@ -354,10 +359,16 @@ func (fs *Decomposedfs) UseIn(composer *tusd.StoreComposer) {
// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload
// NewUpload returns a new tus Upload instance
func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (tusd.Upload, error) {
func (fs *Decomposedfs) NewUpload(_ context.Context, _ tusd.FileInfo) (tusd.Upload, error) {
return nil, fmt.Errorf("not implemented, use InitiateUpload on the CS3 API to start a new upload")
}
// AsServableUpload returns a ServableUpload
// which implements the tusd.ServableUpload interface and
func (fs *Decomposedfs) AsServableUpload(u tusd.Upload) tusd.ServableUpload {
return u.(*upload.DecomposedFsSession)
}
// GetUpload returns the Upload for the given upload id
func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) {
var ul tusd.Upload

View File

@@ -32,6 +32,7 @@ import (
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
typespb "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
@@ -46,49 +47,49 @@ type DecomposedFsSession struct {
}
// Context returns a context with the user, logger and lockid used when initiating the upload session
func (s *DecomposedFsSession) Context(ctx context.Context) context.Context { // restore logger from file info
sub := s.store.log.With().Int("pid", os.Getpid()).Logger()
func (session *DecomposedFsSession) Context(ctx context.Context) context.Context { // restore logger from file info
sub := session.store.log.With().Int("pid", os.Getpid()).Logger()
ctx = appctx.WithLogger(ctx, &sub)
ctx = ctxpkg.ContextSetLockID(ctx, s.lockID())
ctx = ctxpkg.ContextSetUser(ctx, s.executantUser())
return ctxpkg.ContextSetInitiator(ctx, s.InitiatorID())
ctx = ctxpkg.ContextSetLockID(ctx, session.lockID())
ctx = ctxpkg.ContextSetUser(ctx, session.executantUser())
return ctxpkg.ContextSetInitiator(ctx, session.InitiatorID())
}
func (s *DecomposedFsSession) lockID() string {
return s.info.MetaData["lockid"]
func (session *DecomposedFsSession) lockID() string {
return session.info.MetaData["lockid"]
}
func (s *DecomposedFsSession) executantUser() *userpb.User {
func (session *DecomposedFsSession) executantUser() *userpb.User {
var o *typespb.Opaque
_ = json.Unmarshal([]byte(s.info.Storage["UserOpaque"]), &o)
_ = json.Unmarshal([]byte(session.info.Storage["UserOpaque"]), &o)
return &userpb.User{
Id: &userpb.UserId{
Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]),
Idp: s.info.Storage["Idp"],
OpaqueId: s.info.Storage["UserId"],
Type: userpb.UserType(userpb.UserType_value[session.info.Storage["UserType"]]),
Idp: session.info.Storage["Idp"],
OpaqueId: session.info.Storage["UserId"],
},
Username: s.info.Storage["UserName"],
DisplayName: s.info.Storage["UserDisplayName"],
Username: session.info.Storage["UserName"],
DisplayName: session.info.Storage["UserDisplayName"],
Opaque: o,
}
}
// Purge deletes the upload session metadata and written binary data
func (s *DecomposedFsSession) Purge(ctx context.Context) error {
func (session *DecomposedFsSession) Purge(ctx context.Context) error {
_, span := tracer.Start(ctx, "Purge")
defer span.End()
sessionPath := sessionPath(s.store.root, s.info.ID)
sessionPath := sessionPath(session.store.root, session.info.ID)
if err := os.Remove(sessionPath); err != nil {
return err
}
if err := os.Remove(s.binPath()); err != nil {
if err := os.Remove(session.binPath()); err != nil {
return err
}
return nil
}
// TouchBin creates a file to contain the binary data. It's size will be used to keep track of the tus upload offset.
func (s *DecomposedFsSession) TouchBin() error {
file, err := os.OpenFile(s.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
func (session *DecomposedFsSession) TouchBin() error {
file, err := os.OpenFile(session.binPath(), os.O_CREATE|os.O_WRONLY, defaultFilePerm)
if err != nil {
return err
}
@@ -98,17 +99,17 @@ func (s *DecomposedFsSession) TouchBin() error {
// Persist writes the upload session metadata to disk
// events can update the scan outcome and the finished event might read an empty file because of race conditions
// so we need to lock the file while writing and use atomic writes
func (s *DecomposedFsSession) Persist(ctx context.Context) error {
func (session *DecomposedFsSession) Persist(ctx context.Context) error {
_, span := tracer.Start(ctx, "Persist")
defer span.End()
sessionPath := sessionPath(s.store.root, s.info.ID)
sessionPath := sessionPath(session.store.root, session.info.ID)
// create folder structure (if needed)
if err := os.MkdirAll(filepath.Dir(sessionPath), 0700); err != nil {
return err
}
var d []byte
d, err := json.Marshal(s.info)
d, err := json.Marshal(session.info)
if err != nil {
return err
}
@@ -116,28 +117,28 @@ func (s *DecomposedFsSession) Persist(ctx context.Context) error {
}
// ToFileInfo returns tus compatible FileInfo so the tus handler can access the upload offset
func (s *DecomposedFsSession) ToFileInfo() tusd.FileInfo {
return s.info
func (session *DecomposedFsSession) ToFileInfo() tusd.FileInfo {
return session.info
}
// ProviderID returns the provider id
func (s *DecomposedFsSession) ProviderID() string {
return s.info.MetaData["providerID"]
func (session *DecomposedFsSession) ProviderID() string {
return session.info.MetaData["providerID"]
}
// SpaceID returns the space id
func (s *DecomposedFsSession) SpaceID() string {
return s.info.Storage["SpaceRoot"]
func (session *DecomposedFsSession) SpaceID() string {
return session.info.Storage["SpaceRoot"]
}
// NodeID returns the node id
func (s *DecomposedFsSession) NodeID() string {
return s.info.Storage["NodeId"]
func (session *DecomposedFsSession) NodeID() string {
return session.info.Storage["NodeId"]
}
// NodeParentID returns the nodes parent id
func (s *DecomposedFsSession) NodeParentID() string {
return s.info.Storage["NodeParentId"]
func (session *DecomposedFsSession) NodeParentID() string {
return session.info.Storage["NodeParentId"]
}
// NodeExists returns wether or not the node existed during InitiateUpload.
@@ -148,63 +149,63 @@ func (s *DecomposedFsSession) NodeParentID() string {
// A node should be created as part of InitiateUpload. When listing a directory
// we can decide if we want to skip the entry, or expose uploed progress
// information. But that is a bigger change and might involve client work.
func (s *DecomposedFsSession) NodeExists() bool {
return s.info.Storage["NodeExists"] == "true"
func (session *DecomposedFsSession) NodeExists() bool {
return session.info.Storage["NodeExists"] == "true"
}
// HeaderIfMatch returns the if-match header for the upload session
func (s *DecomposedFsSession) HeaderIfMatch() string {
return s.info.MetaData["if-match"]
func (session *DecomposedFsSession) HeaderIfMatch() string {
return session.info.MetaData["if-match"]
}
// HeaderIfNoneMatch returns the if-none-match header for the upload session
func (s *DecomposedFsSession) HeaderIfNoneMatch() string {
return s.info.MetaData["if-none-match"]
func (session *DecomposedFsSession) HeaderIfNoneMatch() string {
return session.info.MetaData["if-none-match"]
}
// HeaderIfUnmodifiedSince returns the if-unmodified-since header for the upload session
func (s *DecomposedFsSession) HeaderIfUnmodifiedSince() string {
return s.info.MetaData["if-unmodified-since"]
func (session *DecomposedFsSession) HeaderIfUnmodifiedSince() string {
return session.info.MetaData["if-unmodified-since"]
}
// Node returns the node for the session
func (s *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) {
return node.ReadNode(ctx, s.store.lu, s.SpaceID(), s.info.Storage["NodeId"], false, nil, true)
func (session *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) {
return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], false, nil, true)
}
// ID returns the upload session id
func (s *DecomposedFsSession) ID() string {
return s.info.ID
func (session *DecomposedFsSession) ID() string {
return session.info.ID
}
// Filename returns the name of the node which is not the same as the name af the file being uploaded for legacy chunked uploads
func (s *DecomposedFsSession) Filename() string {
return s.info.Storage["NodeName"]
func (session *DecomposedFsSession) Filename() string {
return session.info.Storage["NodeName"]
}
// Chunk returns the chunk name when a legacy chunked upload was started
func (s *DecomposedFsSession) Chunk() string {
return s.info.Storage["Chunk"]
func (session *DecomposedFsSession) Chunk() string {
return session.info.Storage["Chunk"]
}
// SetMetadata is used to fill the upload metadata that will be exposed to the end user
func (s *DecomposedFsSession) SetMetadata(key, value string) {
s.info.MetaData[key] = value
func (session *DecomposedFsSession) SetMetadata(key, value string) {
session.info.MetaData[key] = value
}
// SetStorageValue is used to set metadata only relevant for the upload session implementation
func (s *DecomposedFsSession) SetStorageValue(key, value string) {
s.info.Storage[key] = value
func (session *DecomposedFsSession) SetStorageValue(key, value string) {
session.info.Storage[key] = value
}
// SetSize will set the upload size of the underlying tus info.
func (s *DecomposedFsSession) SetSize(size int64) {
s.info.Size = size
func (session *DecomposedFsSession) SetSize(size int64) {
session.info.Size = size
}
// SetSizeIsDeferred is uset to change the SizeIsDeferred property of the underlying tus info.
func (s *DecomposedFsSession) SetSizeIsDeferred(value bool) {
s.info.SizeIsDeferred = value
func (session *DecomposedFsSession) SetSizeIsDeferred(value bool) {
session.info.SizeIsDeferred = value
}
// Dir returns the directory to which the upload is made
@@ -227,115 +228,115 @@ func (s *DecomposedFsSession) SetSizeIsDeferred(value bool) {
//
// I think we can safely determine the path later, right before emitting the
// event. And maybe make it configurable, because only audit needs it, anyway.
func (s *DecomposedFsSession) Dir() string {
return s.info.Storage["Dir"]
func (session *DecomposedFsSession) Dir() string {
return session.info.Storage["Dir"]
}
// Size returns the upload size
func (s *DecomposedFsSession) Size() int64 {
return s.info.Size
func (session *DecomposedFsSession) Size() int64 {
return session.info.Size
}
// SizeDiff returns the size diff that was calculated after postprocessing
func (s *DecomposedFsSession) SizeDiff() int64 {
sizeDiff, _ := strconv.ParseInt(s.info.MetaData["sizeDiff"], 10, 64)
func (session *DecomposedFsSession) SizeDiff() int64 {
sizeDiff, _ := strconv.ParseInt(session.info.MetaData["sizeDiff"], 10, 64)
return sizeDiff
}
// Reference returns a reference that can be used to access the uploaded resource
func (s *DecomposedFsSession) Reference() provider.Reference {
func (session *DecomposedFsSession) Reference() provider.Reference {
return provider.Reference{
ResourceId: &provider.ResourceId{
StorageId: s.info.MetaData["providerID"],
SpaceId: s.info.Storage["SpaceRoot"],
OpaqueId: s.info.Storage["NodeId"],
StorageId: session.info.MetaData["providerID"],
SpaceId: session.info.Storage["SpaceRoot"],
OpaqueId: session.info.Storage["NodeId"],
},
// Path is not used
}
}
// Executant returns the id of the user that initiated the upload session
func (s *DecomposedFsSession) Executant() userpb.UserId {
func (session *DecomposedFsSession) Executant() userpb.UserId {
return userpb.UserId{
Type: userpb.UserType(userpb.UserType_value[s.info.Storage["UserType"]]),
Idp: s.info.Storage["Idp"],
OpaqueId: s.info.Storage["UserId"],
Type: userpb.UserType(userpb.UserType_value[session.info.Storage["UserType"]]),
Idp: session.info.Storage["Idp"],
OpaqueId: session.info.Storage["UserId"],
}
}
// SetExecutant is used to remember the user that initiated the upload session
func (s *DecomposedFsSession) SetExecutant(u *userpb.User) {
s.info.Storage["Idp"] = u.GetId().GetIdp()
s.info.Storage["UserId"] = u.GetId().GetOpaqueId()
s.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type)
s.info.Storage["UserName"] = u.GetUsername()
s.info.Storage["UserDisplayName"] = u.GetDisplayName()
func (session *DecomposedFsSession) SetExecutant(u *userpb.User) {
session.info.Storage["Idp"] = u.GetId().GetIdp()
session.info.Storage["UserId"] = u.GetId().GetOpaqueId()
session.info.Storage["UserType"] = utils.UserTypeToString(u.GetId().Type)
session.info.Storage["UserName"] = u.GetUsername()
session.info.Storage["UserDisplayName"] = u.GetDisplayName()
b, _ := json.Marshal(u.GetOpaque())
s.info.Storage["UserOpaque"] = string(b)
session.info.Storage["UserOpaque"] = string(b)
}
// Offset returns the current upload offset
func (s *DecomposedFsSession) Offset() int64 {
return s.info.Offset
func (session *DecomposedFsSession) Offset() int64 {
return session.info.Offset
}
// SpaceOwner returns the id of the space owner
func (s *DecomposedFsSession) SpaceOwner() *userpb.UserId {
func (session *DecomposedFsSession) SpaceOwner() *userpb.UserId {
return &userpb.UserId{
// idp and type do not seem to be consumed and the node currently only stores the user id anyway
OpaqueId: s.info.Storage["SpaceOwnerOrManager"],
OpaqueId: session.info.Storage["SpaceOwnerOrManager"],
}
}
// Expires returns the time the upload session expires
func (s *DecomposedFsSession) Expires() time.Time {
func (session *DecomposedFsSession) Expires() time.Time {
var t time.Time
if value, ok := s.info.MetaData["expires"]; ok {
if value, ok := session.info.MetaData["expires"]; ok {
t, _ = utils.MTimeToTime(value)
}
return t
}
// MTime returns the mtime to use for the uploaded file
func (s *DecomposedFsSession) MTime() time.Time {
func (session *DecomposedFsSession) MTime() time.Time {
var t time.Time
if value, ok := s.info.MetaData["mtime"]; ok {
if value, ok := session.info.MetaData["mtime"]; ok {
t, _ = utils.MTimeToTime(value)
}
return t
}
// IsProcessing returns true if all bytes have been received. The session then has entered postprocessing state.
func (s *DecomposedFsSession) IsProcessing() bool {
func (session *DecomposedFsSession) IsProcessing() bool {
// We might need a more sophisticated way to determine processing status soon
return s.info.Size == s.info.Offset && s.info.MetaData["scanResult"] == ""
return session.info.Size == session.info.Offset && session.info.MetaData["scanResult"] == ""
}
// binPath returns the path to the file storing the binary data.
func (s *DecomposedFsSession) binPath() string {
return filepath.Join(s.store.root, "uploads", s.info.ID)
func (session *DecomposedFsSession) binPath() string {
return filepath.Join(session.store.root, "uploads", session.info.ID)
}
// InitiatorID returns the id of the initiating client
func (s *DecomposedFsSession) InitiatorID() string {
return s.info.MetaData["initiatorid"]
func (session *DecomposedFsSession) InitiatorID() string {
return session.info.MetaData["initiatorid"]
}
// SetScanData sets virus scan data to the upload session
func (s *DecomposedFsSession) SetScanData(result string, date time.Time) {
s.info.MetaData["scanResult"] = result
s.info.MetaData["scanDate"] = date.Format(time.RFC3339)
func (session *DecomposedFsSession) SetScanData(result string, date time.Time) {
session.info.MetaData["scanResult"] = result
session.info.MetaData["scanDate"] = date.Format(time.RFC3339)
}
// ScanData returns the virus scan data
func (s *DecomposedFsSession) ScanData() (string, time.Time) {
date := s.info.MetaData["scanDate"]
func (session *DecomposedFsSession) ScanData() (string, time.Time) {
date := session.info.MetaData["scanDate"]
if date == "" {
return "", time.Time{}
}
d, _ := time.Parse(time.RFC3339, date)
return s.info.MetaData["scanResult"], d
return session.info.MetaData["scanResult"], d
}
// sessionPath returns the path to the .info file storing the file's info.

View File

@@ -44,15 +44,15 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/events"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/metrics"
"github.com/opencloud-eu/reva/v2/pkg/rhttp/datatx/utils/download"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
"github.com/opencloud-eu/reva/v2/pkg/utils"
)
var (
tracer trace.Tracer
ErrAlreadyExists = tusd.NewError("ERR_ALREADY_EXISTS", "file already exists", http.StatusConflict)
defaultFilePerm = os.FileMode(0664)
tracer trace.Tracer
defaultFilePerm = os.FileMode(0664)
)
func init() {
@@ -60,7 +60,7 @@ func init() {
}
// WriteChunk writes the stream from the reader to the given offset of the upload
func (session *DecomposedFsSession) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
func (session *DecomposedFsSession) WriteChunk(ctx context.Context, _ int64, src io.Reader) (int64, error) {
ctx, span := tracer.Start(session.Context(ctx), "WriteChunk")
defer span.End()
_, subspan := tracer.Start(ctx, "os.OpenFile")
@@ -69,7 +69,9 @@ func (session *DecomposedFsSession) WriteChunk(ctx context.Context, offset int64
if err != nil {
return 0, err
}
defer file.Close()
defer func() {
_ = file.Close()
}()
// calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum
// TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ...
@@ -259,7 +261,9 @@ func (session *DecomposedFsSession) ConcatUploads(_ context.Context, uploads []t
if err != nil {
return err
}
defer file.Close()
defer func() {
_ = file.Close()
}()
for _, partialUpload := range uploads {
fileUpload := partialUpload.(*DecomposedFsSession)
@@ -268,7 +272,9 @@ func (session *DecomposedFsSession) ConcatUploads(_ context.Context, uploads []t
if err != nil {
return err
}
defer src.Close()
defer func() {
_ = src.Close()
}()
if _, err := io.Copy(file, src); err != nil {
return err
@@ -298,9 +304,9 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) {
}
func checkHash(expected string, h hash.Hash) error {
hash := hex.EncodeToString(h.Sum(nil))
if expected != hash {
return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", expected, hash))
shash := hex.EncodeToString(h.Sum(nil))
if expected != shash {
return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", expected, shash))
}
return nil
}
@@ -399,6 +405,57 @@ func (session *DecomposedFsSession) URL(_ context.Context) (string, error) {
return joinurl(session.store.tknopts.DataGatewayEndpoint, tkn), nil
}
// ServeContent serves the content of the upload and implements the http.ServeContent interface needed by tusd,
// it is used by the tusd handler to serve the content of the upload and supports range requests
func (session *DecomposedFsSession) ServeContent(ctx context.Context, w http.ResponseWriter, req *http.Request) error {
_, span := tracer.Start(session.Context(ctx), "ServeContent")
defer span.End()
f, err := os.Open(session.binPath())
if err != nil {
return err
}
defer func() {
_ = f.Close()
}()
info, err := f.Stat()
if err != nil {
return err
}
var r io.Reader = f
if err := func() error {
if req.Header.Get("Range") == "" {
return nil
}
ranges, err := download.ParseRange(req.Header.Get("Range"), info.Size())
switch {
case len(ranges) == 0:
fallthrough
case errors.Is(err, download.ErrInvalidRange):
// ignore invalid range and return the whole file
return nil
case err != nil:
return err
}
r = io.NewSectionReader(f, ranges[0].Start, ranges[0].Length)
w.WriteHeader(http.StatusPartialContent)
w.Header().Set("Content-Range", ranges[0].ContentRange(info.Size()))
return nil
}(); err != nil {
return err
}
if _, err := io.Copy(w, r); err != nil {
return err
}
return nil
}
// replace with url.JoinPath after switching to go1.19
func joinurl(paths ...string) string {
var s strings.Builder

View File

@@ -34,7 +34,6 @@ const (
var (
reForwardedHost = regexp.MustCompile(`host="?([^;"]+)`)
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
// We only allow certain URL-safe characters in upload IDs. URL-safe in this means
// that their are allowed in a URI's path component according to RFC 3986.
// See https://datatracker.ietf.org/doc/html/rfc3986#section-3.3
@@ -1104,7 +1103,10 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
}
handler.sendResp(c, resp)
io.Copy(w, src)
if _, err := io.Copy(w, src); err != nil {
handler.sendError(c, err)
return
}
src.Close()
}
@@ -1112,9 +1114,9 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
// allowed to be rendered by browser inline, instead of being forced to be
// downloaded. For example, HTML or SVG files are not allowed, since they may
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
// contain malicious JavaScript. In a similar fashion, PDF is not on this list
// as their parsers commonly contain vulnerabilities which can be exploited.
// The values of this map does not convey any meaning and are therefore just
// The values of this map do not convey any meaning and are therefore just
// empty structs.
var mimeInlineBrowserWhitelist = map[string]struct{}{
"text/plain": {},
@@ -1125,14 +1127,17 @@ var mimeInlineBrowserWhitelist = map[string]struct{}{
"image/bmp": {},
"image/webp": {},
"audio/wave": {},
"audio/wav": {},
"audio/x-wav": {},
"audio/x-pn-wav": {},
"audio/webm": {},
"video/webm": {},
"audio/ogg": {},
"video/ogg": {},
"audio/wave": {},
"audio/wav": {},
"audio/x-wav": {},
"audio/x-pn-wav": {},
"audio/webm": {},
"audio/ogg": {},
"video/mp4": {},
"video/webm": {},
"video/ogg": {},
"application/ogg": {},
}
@@ -1140,23 +1145,22 @@ var mimeInlineBrowserWhitelist = map[string]struct{}{
// Content-Disposition headers for a given upload. These values should be used
// in responses for GET requests to ensure that only non-malicious file types
// are shown directly in the browser. It will extract the file name and type
// from the "fileame" and "filetype".
// from the "filename" and "filetype".
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
func filterContentType(info FileInfo) (contentType string, contentDisposition string) {
filetype := info.MetaData["filetype"]
if reMimeType.MatchString(filetype) {
// If the filetype from metadata is well formed, we forward use this
// for the Content-Type header. However, only whitelisted mime types
// will be allowed to be shown inline in the browser
if ft, _, err := mime.ParseMediaType(filetype); err == nil {
// If the filetype from metadata is well-formed, we forward use this for the Content-Type header.
// However, only allowlisted mime types will be allowed to be shown inline in the browser
contentType = filetype
if _, isWhitelisted := mimeInlineBrowserWhitelist[filetype]; isWhitelisted {
if _, isWhitelisted := mimeInlineBrowserWhitelist[ft]; isWhitelisted {
contentDisposition = "inline"
} else {
contentDisposition = "attachment"
}
} else {
// If the filetype from the metadata is not well formed, we use a
// If the filetype from the metadata is not well-formed, we use a
// default type and force the browser to download the content.
contentType = "application/octet-stream"
contentDisposition = "attachment"