mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-05-02 00:44:53 -05:00
bump reva to 4eb591e
Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>
This commit is contained in:
+202
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
|
||||
## crc64nvme
|
||||
|
||||
This Golang package calculates CRC64 checksums using carryless-multiplication accelerated with SIMD instructions for both ARM and x86. It is based on the NVME polynomial as specified in the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf).
|
||||
|
||||
The code is based on the [crc64fast-nvme](https://github.com/awesomized/crc64fast-nvme.git) package in Rust and is released under the Apache 2.0 license.
|
||||
|
||||
For more background on the exact technique used, see this [Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction](https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf) paper.
|
||||
|
||||
### Performance
|
||||
|
||||
To follow.
|
||||
|
||||
### Requirements
|
||||
|
||||
All Go versions >= 1.22 are supported.
|
||||
|
||||
### Contributing
|
||||
|
||||
Contributions are welcome, please send PRs for any enhancements.
|
||||
+180
@@ -0,0 +1,180 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package crc64nvme implements the 64-bit cyclic redundancy check with NVME polynomial.
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// The size of a CRC-64 checksum in bytes.
|
||||
Size = 8
|
||||
|
||||
// The NVME polynoimial (reversed, as used by Go)
|
||||
NVME = 0x9a6c9329ac4bc9b5
|
||||
)
|
||||
|
||||
var (
|
||||
// precalculated table.
|
||||
nvmeTable = makeTable(NVME)
|
||||
)
|
||||
|
||||
// table is a 256-word table representing the polynomial for efficient processing.
|
||||
type table [256]uint64
|
||||
|
||||
var (
|
||||
slicing8TablesBuildOnce sync.Once
|
||||
slicing8TableNVME *[8]table
|
||||
)
|
||||
|
||||
func buildSlicing8TablesOnce() {
|
||||
slicing8TablesBuildOnce.Do(buildSlicing8Tables)
|
||||
}
|
||||
|
||||
func buildSlicing8Tables() {
|
||||
slicing8TableNVME = makeSlicingBy8Table(makeTable(NVME))
|
||||
}
|
||||
|
||||
func makeTable(poly uint64) *table {
|
||||
t := new(table)
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := uint64(i)
|
||||
for j := 0; j < 8; j++ {
|
||||
if crc&1 == 1 {
|
||||
crc = (crc >> 1) ^ poly
|
||||
} else {
|
||||
crc >>= 1
|
||||
}
|
||||
}
|
||||
t[i] = crc
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func makeSlicingBy8Table(t *table) *[8]table {
|
||||
var helperTable [8]table
|
||||
helperTable[0] = *t
|
||||
for i := 0; i < 256; i++ {
|
||||
crc := t[i]
|
||||
for j := 1; j < 8; j++ {
|
||||
crc = t[crc&0xff] ^ (crc >> 8)
|
||||
helperTable[j][i] = crc
|
||||
}
|
||||
}
|
||||
return &helperTable
|
||||
}
|
||||
|
||||
// digest represents the partial evaluation of a checksum.
|
||||
type digest struct {
|
||||
crc uint64
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash64 computing the CRC-64 checksum using the
|
||||
// NVME polynomial. Its Sum method will lay the
|
||||
// value out in big-endian byte order. The returned Hash64 also
|
||||
// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
|
||||
// marshal and unmarshal the internal state of the hash.
|
||||
func New() hash.Hash64 { return &digest{0} }
|
||||
|
||||
func (d *digest) Size() int { return Size }
|
||||
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
||||
|
||||
const (
|
||||
magic = "crc\x02"
|
||||
marshaledSize = len(magic) + 8 + 8
|
||||
)
|
||||
|
||||
func (d *digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = binary.BigEndian.AppendUint64(b, tableSum)
|
||||
b = binary.BigEndian.AppendUint64(b, d.crc)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("hash/crc64: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("hash/crc64: invalid hash state size")
|
||||
}
|
||||
if tableSum != binary.BigEndian.Uint64(b[4:]) {
|
||||
return errors.New("hash/crc64: tables do not match")
|
||||
}
|
||||
d.crc = binary.BigEndian.Uint64(b[12:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func update(crc uint64, p []byte) uint64 {
|
||||
if hasAsm && len(p) > 127 {
|
||||
ptr := unsafe.Pointer(&p[0])
|
||||
if align := (uintptr(ptr)+15)&^0xf - uintptr(ptr); align > 0 {
|
||||
// Align to 16-byte boundary.
|
||||
crc = update(crc, p[:align])
|
||||
p = p[align:]
|
||||
}
|
||||
runs := len(p) / 128
|
||||
crc = updateAsm(crc, p[:128*runs])
|
||||
return update(crc, p[128*runs:])
|
||||
}
|
||||
|
||||
buildSlicing8TablesOnce()
|
||||
crc = ^crc
|
||||
// table comparison is somewhat expensive, so avoid it for small sizes
|
||||
for len(p) >= 64 {
|
||||
var helperTable = slicing8TableNVME
|
||||
// Update using slicing-by-8
|
||||
for len(p) > 8 {
|
||||
crc ^= binary.LittleEndian.Uint64(p)
|
||||
crc = helperTable[7][crc&0xff] ^
|
||||
helperTable[6][(crc>>8)&0xff] ^
|
||||
helperTable[5][(crc>>16)&0xff] ^
|
||||
helperTable[4][(crc>>24)&0xff] ^
|
||||
helperTable[3][(crc>>32)&0xff] ^
|
||||
helperTable[2][(crc>>40)&0xff] ^
|
||||
helperTable[1][(crc>>48)&0xff] ^
|
||||
helperTable[0][crc>>56]
|
||||
p = p[8:]
|
||||
}
|
||||
}
|
||||
// For reminders or small sizes
|
||||
for _, v := range p {
|
||||
crc = nvmeTable[byte(crc)^v] ^ (crc >> 8)
|
||||
}
|
||||
return ^crc
|
||||
}
|
||||
|
||||
// Update returns the result of adding the bytes in p to the crc.
|
||||
func Update(crc uint64, p []byte) uint64 {
|
||||
return update(crc, p)
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
d.crc = update(d.crc, p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
|
||||
}
|
||||
|
||||
// Checksum returns the CRC-64 checksum of data
|
||||
// using the NVME polynomial.
|
||||
func Checksum(data []byte) uint64 { return update(0, data) }
|
||||
|
||||
// ISO tablesum of NVME poly
|
||||
const tableSum = 0x8ddd9ee4402c7163
|
||||
+15
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.SSE2, cpuid.CLMUL, cpuid.SSE4)
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64)
|
||||
+157
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·updateAsm(SB), $0-40
|
||||
MOVQ crc+0(FP), AX // checksum
|
||||
MOVQ p_base+8(FP), SI // start pointer
|
||||
MOVQ p_len+16(FP), CX // length of buffer
|
||||
NOTQ AX
|
||||
SHRQ $7, CX
|
||||
CMPQ CX, $1
|
||||
JLT skip128
|
||||
|
||||
VMOVDQA 0x00(SI), X0
|
||||
VMOVDQA 0x10(SI), X1
|
||||
VMOVDQA 0x20(SI), X2
|
||||
VMOVDQA 0x30(SI), X3
|
||||
VMOVDQA 0x40(SI), X4
|
||||
VMOVDQA 0x50(SI), X5
|
||||
VMOVDQA 0x60(SI), X6
|
||||
VMOVDQA 0x70(SI), X7
|
||||
MOVQ AX, X8
|
||||
PXOR X8, X0
|
||||
CMPQ CX, $1
|
||||
JE tail128
|
||||
|
||||
MOVQ $0xa1ca681e733f9c40, AX
|
||||
MOVQ AX, X8
|
||||
MOVQ $0x5f852fb61e8d92dc, AX
|
||||
PINSRQ $0x1, AX, X9
|
||||
|
||||
loop128:
|
||||
ADDQ $128, SI
|
||||
SUBQ $1, CX
|
||||
VMOVDQA X0, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X0
|
||||
PXOR X10, X0
|
||||
PXOR 0(SI), X0
|
||||
VMOVDQA X1, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X1
|
||||
PXOR X10, X1
|
||||
PXOR 0x10(SI), X1
|
||||
VMOVDQA X2, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X2
|
||||
PXOR X10, X2
|
||||
PXOR 0x20(SI), X2
|
||||
VMOVDQA X3, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X3
|
||||
PXOR X10, X3
|
||||
PXOR 0x30(SI), X3
|
||||
VMOVDQA X4, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X4
|
||||
PXOR X10, X4
|
||||
PXOR 0x40(SI), X4
|
||||
VMOVDQA X5, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X5
|
||||
PXOR X10, X5
|
||||
PXOR 0x50(SI), X5
|
||||
VMOVDQA X6, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X6
|
||||
PXOR X10, X6
|
||||
PXOR 0x60(SI), X6
|
||||
VMOVDQA X7, X10
|
||||
PCLMULQDQ $0x00, X8, X10
|
||||
PCLMULQDQ $0x11, X9, X7
|
||||
PXOR X10, X7
|
||||
PXOR 0x70(SI), X7
|
||||
CMPQ CX, $1
|
||||
JGT loop128
|
||||
|
||||
tail128:
|
||||
MOVQ $0xd083dd594d96319d, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X0, X11
|
||||
MOVQ $0x946588403d4adcbc, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X0
|
||||
PXOR X11, X7
|
||||
PXOR X0, X7
|
||||
MOVQ $0x3c255f5ebc414423, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X1, X11
|
||||
MOVQ $0x34f5a24e22d66e90, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X1
|
||||
PXOR X11, X1
|
||||
PXOR X7, X1
|
||||
MOVQ $0x7b0ab10dd0f809fe, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X2, X11
|
||||
MOVQ $0x03363823e6e791e5, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X2
|
||||
PXOR X11, X2
|
||||
PXOR X1, X2
|
||||
MOVQ $0x0c32cdb31e18a84a, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X3, X11
|
||||
MOVQ $0x62242240ace5045a, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X3
|
||||
PXOR X11, X3
|
||||
PXOR X2, X3
|
||||
MOVQ $0xbdd7ac0ee1a4a0f0, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X4, X11
|
||||
MOVQ $0xa3ffdc1fe8e82a8b, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X4
|
||||
PXOR X11, X4
|
||||
PXOR X3, X4
|
||||
MOVQ $0xb0bc2e589204f500, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X5, X11
|
||||
MOVQ $0xe1e0bb9d45d7a44c, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X5
|
||||
PXOR X11, X5
|
||||
PXOR X4, X5
|
||||
MOVQ $0xeadc41fd2ba3d420, AX
|
||||
MOVQ AX, X11
|
||||
PCLMULQDQ $0x00, X6, X11
|
||||
MOVQ $0x21e9761e252621ac, AX
|
||||
PINSRQ $0x1, AX, X12
|
||||
PCLMULQDQ $0x11, X12, X6
|
||||
PXOR X11, X6
|
||||
PXOR X5, X6
|
||||
MOVQ AX, X5
|
||||
PCLMULQDQ $0x00, X6, X5
|
||||
PSHUFD $0xee, X6, X6
|
||||
PXOR X5, X6
|
||||
MOVQ $0x27ecfa329aef9f77, AX
|
||||
MOVQ AX, X4
|
||||
PCLMULQDQ $0x00, X4, X6
|
||||
PEXTRQ $0, X6, BX
|
||||
MOVQ $0x34d926535897936b, AX
|
||||
MOVQ AX, X4
|
||||
PCLMULQDQ $0x00, X4, X6
|
||||
PXOR X5, X6
|
||||
PEXTRQ $1, X6, AX
|
||||
XORQ BX, AX
|
||||
|
||||
skip128:
|
||||
NOTQ AX
|
||||
MOVQ AX, checksum+32(FP)
|
||||
RET
|
||||
+15
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
package crc64nvme
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
)
|
||||
|
||||
var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD) && cpuid.CPU.Supports(cpuid.PMULL)
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64)
|
||||
+157
@@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build !noasm && !appengine && !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT ·updateAsm(SB), $0-40
|
||||
MOVD crc+0(FP), R0 // checksum
|
||||
MOVD p_base+8(FP), R1 // start pointer
|
||||
MOVD p_len+16(FP), R2 // length of buffer
|
||||
MOVD $·const(SB), R3 // constants
|
||||
MVN R0, R0
|
||||
LSR $7, R2, R2
|
||||
CMP $1, R2
|
||||
BLT skip128
|
||||
|
||||
FLDPQ (R1), (F0, F1)
|
||||
FLDPQ 32(R1), (F2, F3)
|
||||
FLDPQ 64(R1), (F4, F5)
|
||||
FLDPQ 96(R1), (F6, F7)
|
||||
FMOVD R0, F8
|
||||
VMOVI $0, V9.B16
|
||||
VMOV V9.D[0], V8.D[1]
|
||||
VEOR V8.B16, V0.B16, V0.B16
|
||||
CMP $1, R2
|
||||
BEQ tail128
|
||||
|
||||
MOVD 112(R3), R4
|
||||
MOVD 120(R3), R5
|
||||
FMOVD R4, F8
|
||||
VDUP R5, V9.D2
|
||||
|
||||
loop128:
|
||||
ADD $128, R1, R1
|
||||
SUB $1, R2, R2
|
||||
VPMULL V0.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V0.D2, V9.D2, V0.Q1
|
||||
FLDPQ (R1), (F11, F12)
|
||||
VEOR3 V0.B16, V11.B16, V10.B16, V0.B16
|
||||
VPMULL V1.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V1.D2, V9.D2, V1.Q1
|
||||
VEOR3 V1.B16, V12.B16, V10.B16, V1.B16
|
||||
VPMULL V2.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V2.D2, V9.D2, V2.Q1
|
||||
FLDPQ 32(R1), (F11, F12)
|
||||
VEOR3 V2.B16, V11.B16, V10.B16, V2.B16
|
||||
VPMULL V3.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V3.D2, V9.D2, V3.Q1
|
||||
VEOR3 V3.B16, V12.B16, V10.B16, V3.B16
|
||||
VPMULL V4.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V4.D2, V9.D2, V4.Q1
|
||||
FLDPQ 64(R1), (F11, F12)
|
||||
VEOR3 V4.B16, V11.B16, V10.B16, V4.B16
|
||||
VPMULL V5.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V5.D2, V9.D2, V5.Q1
|
||||
VEOR3 V5.B16, V12.B16, V10.B16, V5.B16
|
||||
VPMULL V6.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V6.D2, V9.D2, V6.Q1
|
||||
FLDPQ 96(R1), (F11, F12)
|
||||
VEOR3 V6.B16, V11.B16, V10.B16, V6.B16
|
||||
VPMULL V7.D1, V8.D1, V10.Q1
|
||||
VPMULL2 V7.D2, V9.D2, V7.Q1
|
||||
VEOR3 V7.B16, V12.B16, V10.B16, V7.B16
|
||||
CMP $1, R2
|
||||
BHI loop128
|
||||
|
||||
tail128:
|
||||
MOVD (R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V0.D1, V11.D1, V11.Q1
|
||||
MOVD 8(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V0.D2, V12.D2, V0.Q1
|
||||
VEOR3 V0.B16, V7.B16, V11.B16, V7.B16
|
||||
MOVD 16(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V1.D1, V11.D1, V11.Q1
|
||||
MOVD 24(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V1.D2, V12.D2, V1.Q1
|
||||
VEOR3 V1.B16, V11.B16, V7.B16, V1.B16
|
||||
MOVD 32(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V2.D1, V11.D1, V11.Q1
|
||||
MOVD 40(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V2.D2, V12.D2, V2.Q1
|
||||
VEOR3 V2.B16, V11.B16, V1.B16, V2.B16
|
||||
MOVD 48(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V3.D1, V11.D1, V11.Q1
|
||||
MOVD 56(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V3.D2, V12.D2, V3.Q1
|
||||
VEOR3 V3.B16, V11.B16, V2.B16, V3.B16
|
||||
MOVD 64(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V4.D1, V11.D1, V11.Q1
|
||||
MOVD 72(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V4.D2, V12.D2, V4.Q1
|
||||
VEOR3 V4.B16, V11.B16, V3.B16, V4.B16
|
||||
MOVD 80(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V5.D1, V11.D1, V11.Q1
|
||||
MOVD 88(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V5.D2, V12.D2, V5.Q1
|
||||
VEOR3 V5.B16, V11.B16, V4.B16, V5.B16
|
||||
MOVD 96(R3), R4
|
||||
FMOVD R4, F11
|
||||
VPMULL V6.D1, V11.D1, V11.Q1
|
||||
MOVD 104(R3), R4
|
||||
VDUP R4, V12.D2
|
||||
VPMULL2 V6.D2, V12.D2, V6.Q1
|
||||
VEOR3 V6.B16, V11.B16, V5.B16, V6.B16
|
||||
FMOVD R4, F5
|
||||
VPMULL V6.D1, V5.D1, V5.Q1
|
||||
VDUP V6.D[1], V6.D2
|
||||
VEOR V5.B8, V6.B8, V6.B8
|
||||
MOVD 128(R3), R4
|
||||
FMOVD R4, F4
|
||||
VPMULL V4.D1, V6.D1, V6.Q1
|
||||
FMOVD F6, R4
|
||||
MOVD 136(R3), R5
|
||||
FMOVD R5, F4
|
||||
VPMULL V4.D1, V6.D1, V6.Q1
|
||||
VEOR V6.B16, V5.B16, V6.B16
|
||||
VMOV V6.D[1], R5
|
||||
EOR R4, R5, R0
|
||||
|
||||
skip128:
|
||||
MVN R0, R0
|
||||
MOVD R0, checksum+32(FP)
|
||||
RET
|
||||
|
||||
DATA ·const+0x000(SB)/8, $0xd083dd594d96319d // K_959
|
||||
DATA ·const+0x008(SB)/8, $0x946588403d4adcbc // K_895
|
||||
DATA ·const+0x010(SB)/8, $0x3c255f5ebc414423 // K_831
|
||||
DATA ·const+0x018(SB)/8, $0x34f5a24e22d66e90 // K_767
|
||||
DATA ·const+0x020(SB)/8, $0x7b0ab10dd0f809fe // K_703
|
||||
DATA ·const+0x028(SB)/8, $0x03363823e6e791e5 // K_639
|
||||
DATA ·const+0x030(SB)/8, $0x0c32cdb31e18a84a // K_575
|
||||
DATA ·const+0x038(SB)/8, $0x62242240ace5045a // K_511
|
||||
DATA ·const+0x040(SB)/8, $0xbdd7ac0ee1a4a0f0 // K_447
|
||||
DATA ·const+0x048(SB)/8, $0xa3ffdc1fe8e82a8b // K_383
|
||||
DATA ·const+0x050(SB)/8, $0xb0bc2e589204f500 // K_319
|
||||
DATA ·const+0x058(SB)/8, $0xe1e0bb9d45d7a44c // K_255
|
||||
DATA ·const+0x060(SB)/8, $0xeadc41fd2ba3d420 // K_191
|
||||
DATA ·const+0x068(SB)/8, $0x21e9761e252621ac // K_127
|
||||
DATA ·const+0x070(SB)/8, $0xa1ca681e733f9c40 // K_1087
|
||||
DATA ·const+0x078(SB)/8, $0x5f852fb61e8d92dc // K_1023
|
||||
DATA ·const+0x080(SB)/8, $0x27ecfa329aef9f77 // MU
|
||||
DATA ·const+0x088(SB)/8, $0x34d926535897936b // POLY
|
||||
GLOBL ·const(SB), (NOPTR+RODATA), $144
|
||||
+11
@@ -0,0 +1,11 @@
|
||||
// Copyright (c) 2025 Minio Inc. All rights reserved.
|
||||
// Use of this source code is governed by a license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 || noasm || appengine || gccgo) && (!arm64 || noasm || appengine || gccgo)
|
||||
|
||||
package crc64nvme
|
||||
|
||||
var hasAsm = false
|
||||
|
||||
func updateAsm(crc uint64, p []byte) (checksum uint64) { panic("should not be reached") }
|
||||
+1
-1
@@ -253,7 +253,7 @@ The full API Reference is available here.
|
||||
|
||||
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
|
||||
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
|
||||
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
|
||||
* [removebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketencryption.go)
|
||||
|
||||
### Full Examples : Bucket replication Operations
|
||||
|
||||
|
||||
+6
-3
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
|
||||
@@ -98,8 +99,8 @@ func (opts CopyDestOptions) Marshal(header http.Header) {
|
||||
const replaceDirective = "REPLACE"
|
||||
if opts.ReplaceTags {
|
||||
header.Set(amzTaggingHeaderDirective, replaceDirective)
|
||||
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
|
||||
header.Set(amzTaggingHeader, tags)
|
||||
if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil {
|
||||
header.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,7 +237,9 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
|
||||
}
|
||||
|
||||
if len(dstOpts.UserTags) != 0 {
|
||||
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
|
||||
if tags, _ := tags.NewTags(dstOpts.UserTags, true); tags != nil {
|
||||
headers.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
reqMetadata := requestMetadata{
|
||||
|
||||
+1
-1
@@ -68,7 +68,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
|
||||
Bucket: dst.Bucket,
|
||||
Key: dst.Object,
|
||||
LastModified: cpObjRes.LastModified,
|
||||
ETag: trimEtag(resp.Header.Get("ETag")),
|
||||
ETag: trimEtag(cpObjRes.ETag),
|
||||
VersionID: resp.Header.Get(amzVersionID),
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
+10
-8
@@ -143,10 +143,11 @@ type UploadInfo struct {
|
||||
// Verified checksum values, if any.
|
||||
// Values are base64 (standard) encoded.
|
||||
// For multipart objects this is a checksum of the checksum of each part.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// RestoreInfo contains information of the restore operation of an archived object
|
||||
@@ -215,10 +216,11 @@ type ObjectInfo struct {
|
||||
Restore *RestoreInfo
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
|
||||
Internal *struct {
|
||||
K int // Data blocks
|
||||
|
||||
+2
-2
@@ -318,7 +318,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
response := <-o.resCh
|
||||
|
||||
// Return any error to the top level.
|
||||
if response.Error != nil {
|
||||
if response.Error != nil && response.Error != io.EOF {
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||
o.seekData = false
|
||||
|
||||
return response, nil
|
||||
return response, response.Error
|
||||
}
|
||||
|
||||
// setOffset - handles the setting of offsets for
|
||||
|
||||
+1
-1
@@ -140,7 +140,7 @@ func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
credValues, err := c.credsProvider.Get()
|
||||
credValues, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
+78
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
)
|
||||
|
||||
// PromptObject performs language model inference with the prompt and referenced object as context.
|
||||
// Inference is performed using a Lambda handler that can process the prompt and object.
|
||||
// Currently, this functionality is limited to certain MinIO servers.
|
||||
func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) {
|
||||
// Input validation.
|
||||
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return nil, ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "XMinioInvalidObjectName",
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
opts.AddLambdaArnToReqParams(opts.LambdaArn)
|
||||
opts.SetHeader("Content-Type", "application/json")
|
||||
opts.AddPromptArg("prompt", prompt)
|
||||
promptReqBytes, err := json.Marshal(opts.PromptArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Execute POST on bucket/object.
|
||||
resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
queryValues: opts.toQueryValues(),
|
||||
customHeader: opts.Header(),
|
||||
contentSHA256Hex: sum256Hex(promptReqBytes),
|
||||
contentBody: bytes.NewReader(promptReqBytes),
|
||||
contentLength: int64(len(promptReqBytes)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer closeResponse(resp)
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
+84
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
|
||||
* Copyright 2015-2024 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// PromptObjectOptions provides options to PromptObject call.
|
||||
// LambdaArn is the ARN of the Prompt Lambda to be invoked.
|
||||
// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda.
|
||||
// "prompt" is a reserved key and should not be used as a key in PromptArgs.
|
||||
type PromptObjectOptions struct {
|
||||
LambdaArn string
|
||||
PromptArgs map[string]any
|
||||
headers map[string]string
|
||||
reqParams url.Values
|
||||
}
|
||||
|
||||
// Header returns the http.Header representation of the POST options.
|
||||
func (o PromptObjectOptions) Header() http.Header {
|
||||
headers := make(http.Header, len(o.headers))
|
||||
for k, v := range o.headers {
|
||||
headers.Set(k, v)
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and
|
||||
// the value is a JSON serializable.
|
||||
func (o *PromptObjectOptions) AddPromptArg(key string, value any) {
|
||||
if o.PromptArgs == nil {
|
||||
o.PromptArgs = make(map[string]any)
|
||||
}
|
||||
o.PromptArgs[key] = value
|
||||
}
|
||||
|
||||
// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters.
|
||||
func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) {
|
||||
if o.reqParams == nil {
|
||||
o.reqParams = make(url.Values)
|
||||
}
|
||||
o.reqParams.Add("lambdaArn", lambdaArn)
|
||||
}
|
||||
|
||||
// SetHeader adds a key value pair to the options. The
|
||||
// key-value pair will be part of the HTTP POST request
|
||||
// headers.
|
||||
func (o *PromptObjectOptions) SetHeader(key, value string) {
|
||||
if o.headers == nil {
|
||||
o.headers = make(map[string]string)
|
||||
}
|
||||
o.headers[http.CanonicalHeaderKey(key)] = value
|
||||
}
|
||||
|
||||
// toQueryValues - Convert the reqParams in Options to query string parameters.
|
||||
func (o *PromptObjectOptions) toQueryValues() url.Values {
|
||||
urlValues := make(url.Values)
|
||||
if o.reqParams != nil {
|
||||
for key, values := range o.reqParams {
|
||||
for _, value := range values {
|
||||
urlValues.Add(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return urlValues
|
||||
}
|
||||
+4
-1
@@ -85,7 +85,10 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData
|
||||
policy.SetEncryption(fanOutReq.SSE)
|
||||
|
||||
// Set checksum headers if any.
|
||||
policy.SetChecksum(fanOutReq.Checksum)
|
||||
err := policy.SetChecksum(fanOutReq.Checksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url, formData, err := c.PresignedPostPolicy(ctx, policy)
|
||||
if err != nil {
|
||||
|
||||
+22
-26
@@ -83,10 +83,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
// HTTPS connection.
|
||||
hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256)
|
||||
if len(hashSums) == 0 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
@@ -113,7 +110,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
for partNumber <= totalPartsCount {
|
||||
@@ -154,7 +150,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader}
|
||||
@@ -182,18 +177,21 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -203,12 +201,8 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -354,10 +348,11 @@ func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart
|
||||
// Once successfully uploaded, return completed part.
|
||||
h := resp.Header
|
||||
objPart := ObjectPart{
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}
|
||||
objPart.Size = p.size
|
||||
objPart.PartNumber = p.partNumber
|
||||
@@ -457,9 +452,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||
Expiration: expTime,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
|
||||
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
|
||||
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
|
||||
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
|
||||
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
|
||||
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
|
||||
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
|
||||
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
|
||||
ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME,
|
||||
}, nil
|
||||
}
|
||||
|
||||
+40
-61
@@ -52,7 +52,7 @@ func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objec
|
||||
} else {
|
||||
info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts)
|
||||
}
|
||||
if err != nil {
|
||||
if err != nil && s3utils.IsGoogleEndpoint(*c.endpointURL) {
|
||||
errResp := ToErrorResponse(err)
|
||||
// Verify if multipart functionality is not available, if not
|
||||
// fall back to single PutObject operation.
|
||||
@@ -113,10 +113,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
}
|
||||
withChecksum := c.trailingHeaderSupport
|
||||
if withChecksum {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
// Initiate a new multipart upload.
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
@@ -240,6 +237,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
|
||||
// Gather the responses as they occur and update any
|
||||
// progress bar.
|
||||
allParts := make([]ObjectPart, 0, totalPartsCount)
|
||||
for u := 1; u <= totalPartsCount; u++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -248,16 +246,17 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
if uploadRes.Error != nil {
|
||||
return UploadInfo{}, uploadRes.Error
|
||||
}
|
||||
|
||||
allParts = append(allParts, uploadRes.Part)
|
||||
// Update the totalUploadedSize.
|
||||
totalUploadedSize += uploadRes.Size
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: uploadRes.Part.ETag,
|
||||
PartNumber: uploadRes.Part.PartNumber,
|
||||
ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
|
||||
ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
|
||||
ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
|
||||
ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
|
||||
ETag: uploadRes.Part.ETag,
|
||||
PartNumber: uploadRes.Part.PartNumber,
|
||||
ChecksumCRC32: uploadRes.Part.ChecksumCRC32,
|
||||
ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C,
|
||||
ChecksumSHA1: uploadRes.Part.ChecksumSHA1,
|
||||
ChecksumSHA256: uploadRes.Part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: uploadRes.Part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -275,15 +274,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if withChecksum {
|
||||
// Add hash of hashes.
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
for _, part := range complMultipartUpload.Parts {
|
||||
cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
|
||||
if err == nil {
|
||||
crc.Write(cs)
|
||||
}
|
||||
}
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
}
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
@@ -312,10 +303,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
}
|
||||
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
@@ -342,7 +330,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
md5Hash := c.md5Hasher()
|
||||
@@ -389,7 +376,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
@@ -420,18 +406,21 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -442,12 +431,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -475,10 +459,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Cancel all when an error occurs.
|
||||
@@ -510,7 +491,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
@@ -570,7 +550,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
@@ -630,18 +609,21 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -652,12 +634,8 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
@@ -823,9 +801,10 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
|
||||
ExpirationRuleID: ruleID,
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
+16
-19
@@ -30,6 +30,7 @@ import (
|
||||
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
@@ -229,7 +230,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
|
||||
}
|
||||
|
||||
if len(opts.UserTags) != 0 {
|
||||
header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags))
|
||||
if tags, _ := tags.NewTags(opts.UserTags, true); tags != nil {
|
||||
header.Set(amzTaggingHeader, tags.String())
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range opts.UserMetadata {
|
||||
@@ -387,10 +390,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
opts.AutoChecksum = opts.Checksum
|
||||
}
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
addAutoChecksumHeaders(&opts)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload.
|
||||
@@ -417,7 +417,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
customHeader := make(http.Header)
|
||||
crc := opts.AutoChecksum.Hasher()
|
||||
|
||||
@@ -443,7 +442,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
// Update progress reader appropriately to the latest offset
|
||||
@@ -475,18 +473,21 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
allParts := make([]ObjectPart, 0, len(partsInfo))
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
allParts = append(allParts, part)
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
ChecksumCRC64NVME: part.ChecksumCRC64NVME,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,12 +498,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
AutoChecksum: opts.AutoChecksum,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
applyAutoChecksum(&opts, allParts)
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
|
||||
+8
@@ -213,6 +213,14 @@ type RemoveObjectError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *RemoveObjectError) Error() string {
|
||||
// This should never happen as we will have a non-nil error with no underlying error.
|
||||
if err.Err == nil {
|
||||
return "unexpected remove object error result"
|
||||
}
|
||||
return err.Err.Error()
|
||||
}
|
||||
|
||||
// RemoveObjectResult - container of Multi Delete S3 API result
|
||||
type RemoveObjectResult struct {
|
||||
ObjectName string
|
||||
|
||||
+58
-12
@@ -18,6 +18,7 @@
|
||||
package minio
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
@@ -276,10 +277,45 @@ type ObjectPart struct {
|
||||
Size int64
|
||||
|
||||
// Checksum values of each part.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// Checksum will return the checksum for the given type.
|
||||
// Will return the empty string if not set.
|
||||
func (c ObjectPart) Checksum(t ChecksumType) string {
|
||||
switch {
|
||||
case t.Is(ChecksumCRC32C):
|
||||
return c.ChecksumCRC32C
|
||||
case t.Is(ChecksumCRC32):
|
||||
return c.ChecksumCRC32
|
||||
case t.Is(ChecksumSHA1):
|
||||
return c.ChecksumSHA1
|
||||
case t.Is(ChecksumSHA256):
|
||||
return c.ChecksumSHA256
|
||||
case t.Is(ChecksumCRC64NVME):
|
||||
return c.ChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChecksumRaw returns the decoded checksum from the part.
|
||||
func (c ObjectPart) ChecksumRaw(t ChecksumType) ([]byte, error) {
|
||||
b64 := c.Checksum(t)
|
||||
if b64 == "" {
|
||||
return nil, errors.New("no checksum set")
|
||||
}
|
||||
decoded, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(decoded) != t.RawByteLen() {
|
||||
return nil, errors.New("checksum length mismatch")
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
// ListObjectPartsResult container for ListObjectParts response.
|
||||
@@ -296,6 +332,12 @@ type ListObjectPartsResult struct {
|
||||
NextPartNumberMarker int
|
||||
MaxParts int
|
||||
|
||||
// ChecksumAlgorithm will be CRC32, CRC32C, etc.
|
||||
ChecksumAlgorithm string
|
||||
|
||||
// ChecksumType is FULL_OBJECT or COMPOSITE (assume COMPOSITE when unset)
|
||||
ChecksumType string
|
||||
|
||||
// Indicates whether the returned list of parts is truncated.
|
||||
IsTruncated bool
|
||||
ObjectParts []ObjectPart `xml:"Part"`
|
||||
@@ -320,10 +362,11 @@ type completeMultipartUploadResult struct {
|
||||
ETag string
|
||||
|
||||
// Checksum values, hash of hashes of parts.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
ChecksumCRC64NVME string
|
||||
}
|
||||
|
||||
// CompletePart sub container lists individual part numbers and their
|
||||
@@ -334,10 +377,11 @@ type CompletePart struct {
|
||||
ETag string
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"`
|
||||
ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"`
|
||||
ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"`
|
||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
// Checksum will return the checksum for the given type.
|
||||
@@ -352,6 +396,8 @@ func (c CompletePart) Checksum(t ChecksumType) string {
|
||||
return c.ChecksumSHA1
|
||||
case t.Is(ChecksumSHA256):
|
||||
return c.ChecksumSHA256
|
||||
case t.Is(ChecksumCRC64NVME):
|
||||
return c.ChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
+62
-4
@@ -92,6 +92,9 @@ type Client struct {
|
||||
// default to Auto.
|
||||
lookup BucketLookupType
|
||||
|
||||
// lookupFn is a custom function to return URL lookup type supported by the server.
|
||||
lookupFn func(u url.URL, bucketName string) BucketLookupType
|
||||
|
||||
// Factory for MD5 hash functions.
|
||||
md5Hasher func() md5simd.Hasher
|
||||
sha256Hasher func() md5simd.Hasher
|
||||
@@ -99,6 +102,7 @@ type Client struct {
|
||||
healthStatus int32
|
||||
|
||||
trailingHeaderSupport bool
|
||||
maxRetries int
|
||||
}
|
||||
|
||||
// Options for New method
|
||||
@@ -116,6 +120,25 @@ type Options struct {
|
||||
// function to perform region lookups appropriately.
|
||||
CustomRegionViaURL func(u url.URL) string
|
||||
|
||||
// Provide a custom function that returns BucketLookupType based
|
||||
// on the input URL, this is just like s3utils.IsVirtualHostSupported()
|
||||
// function but allows users to provide their own implementation.
|
||||
// Once this is set it overrides all settings for opts.BucketLookup
|
||||
// if this function returns BucketLookupAuto then default detection
|
||||
// via s3utils.IsVirtualHostSupported() is used, otherwise the
|
||||
// function is expected to return appropriate value as expected for
|
||||
// the URL the user wishes to honor.
|
||||
//
|
||||
// BucketName is passed additionally for the caller to ensure
|
||||
// handle situations where `bucketNames` have multiple `.` separators
|
||||
// in such case HTTPs certs will not work properly for *.<domain>
|
||||
// wildcards, so you need to specifically handle these situations
|
||||
// and not return bucket as part of DNS since those requests may fail.
|
||||
//
|
||||
// For better understanding look at s3utils.IsVirtualHostSupported()
|
||||
// implementation.
|
||||
BucketLookupViaURL func(u url.URL, bucketName string) BucketLookupType
|
||||
|
||||
// TrailingHeaders indicates server support of trailing headers.
|
||||
// Only supported for v4 signatures.
|
||||
TrailingHeaders bool
|
||||
@@ -123,12 +146,16 @@ type Options struct {
|
||||
// Custom hash routines. Leave nil to use standard.
|
||||
CustomMD5 func() md5simd.Hasher
|
||||
CustomSHA256 func() md5simd.Hasher
|
||||
|
||||
// Number of times a request is retried. Defaults to 10 retries if this option is not configured.
|
||||
// Set to 1 to disable retries.
|
||||
MaxRetries int
|
||||
}
|
||||
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.78"
|
||||
libraryVersion = "v7.0.87"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@@ -274,10 +301,16 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
|
||||
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
|
||||
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
|
||||
clnt.lookup = opts.BucketLookup
|
||||
clnt.lookupFn = opts.BucketLookupViaURL
|
||||
|
||||
// healthcheck is not initialized
|
||||
clnt.healthStatus = unknown
|
||||
|
||||
clnt.maxRetries = MaxRetry
|
||||
if opts.MaxRetries > 0 {
|
||||
clnt.maxRetries = opts.MaxRetries
|
||||
}
|
||||
|
||||
// Return.
|
||||
return clnt, nil
|
||||
}
|
||||
@@ -592,7 +625,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||
|
||||
var retryable bool // Indicates if request can be retried.
|
||||
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
|
||||
reqRetry := MaxRetry // Indicates how many times we can retry the request
|
||||
reqRetry := c.maxRetries // Indicates how many times we can retry the request
|
||||
|
||||
if metadata.contentBody != nil {
|
||||
// Check if body is seekable then it is retryable.
|
||||
@@ -798,7 +831,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||
}
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
value, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -993,6 +1026,18 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
|
||||
|
||||
// returns true if virtual hosted style requests are to be used.
|
||||
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
|
||||
if c.lookupFn != nil {
|
||||
lookup := c.lookupFn(url, bucketName)
|
||||
switch lookup {
|
||||
case BucketLookupDNS:
|
||||
return true
|
||||
case BucketLookupPath:
|
||||
return false
|
||||
}
|
||||
// if its auto then we fallback to default detection.
|
||||
return s3utils.IsVirtualHostSupported(url, bucketName)
|
||||
}
|
||||
|
||||
if bucketName == "" {
|
||||
return false
|
||||
}
|
||||
@@ -1000,11 +1045,24 @@ func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool
|
||||
if c.lookup == BucketLookupDNS {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.lookup == BucketLookupPath {
|
||||
return false
|
||||
}
|
||||
|
||||
// default to virtual only for Amazon/Google storage. In all other cases use
|
||||
// default to virtual only for Amazon/Google storage. In all other cases use
|
||||
// path style requests
|
||||
return s3utils.IsVirtualHostSupported(url, bucketName)
|
||||
}
|
||||
|
||||
// CredContext returns the context for fetching credentials
|
||||
func (c *Client) CredContext() *credentials.CredContext {
|
||||
httpClient := c.httpClient
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
return &credentials.CredContext{
|
||||
Client: httpClient,
|
||||
Endpoint: c.endpointURL.String(),
|
||||
}
|
||||
}
|
||||
|
||||
+1
-1
@@ -212,7 +212,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
|
||||
c.setUserAgent(req)
|
||||
|
||||
// Get credentials from the configured credentials provider.
|
||||
value, err := c.credsProvider.Get()
|
||||
value, err := c.credsProvider.GetWithContext(c.CredContext())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
+203
-6
@@ -21,11 +21,17 @@ import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"math/bits"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/crc64nvme"
|
||||
)
|
||||
|
||||
// ChecksumType contains information about the checksum type.
|
||||
@@ -41,23 +47,41 @@ const (
|
||||
ChecksumCRC32
|
||||
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
|
||||
ChecksumCRC32C
|
||||
// ChecksumCRC64NVME indicates CRC64 with 0xad93d23594c93659 polynomial.
|
||||
ChecksumCRC64NVME
|
||||
|
||||
// Keep after all valid checksums
|
||||
checksumLast
|
||||
|
||||
// ChecksumFullObject is a modifier that can be used on CRC32 and CRC32C
|
||||
// to indicate full object checksums.
|
||||
ChecksumFullObject
|
||||
|
||||
// checksumMask is a mask for valid checksum types.
|
||||
checksumMask = checksumLast - 1
|
||||
|
||||
// ChecksumNone indicates no checksum.
|
||||
ChecksumNone ChecksumType = 0
|
||||
|
||||
amzChecksumAlgo = "x-amz-checksum-algorithm"
|
||||
amzChecksumCRC32 = "x-amz-checksum-crc32"
|
||||
amzChecksumCRC32C = "x-amz-checksum-crc32c"
|
||||
amzChecksumSHA1 = "x-amz-checksum-sha1"
|
||||
amzChecksumSHA256 = "x-amz-checksum-sha256"
|
||||
// ChecksumFullObjectCRC32 indicates full object CRC32
|
||||
ChecksumFullObjectCRC32 = ChecksumCRC32 | ChecksumFullObject
|
||||
|
||||
// ChecksumFullObjectCRC32C indicates full object CRC32C
|
||||
ChecksumFullObjectCRC32C = ChecksumCRC32C | ChecksumFullObject
|
||||
|
||||
amzChecksumAlgo = "x-amz-checksum-algorithm"
|
||||
amzChecksumCRC32 = "x-amz-checksum-crc32"
|
||||
amzChecksumCRC32C = "x-amz-checksum-crc32c"
|
||||
amzChecksumSHA1 = "x-amz-checksum-sha1"
|
||||
amzChecksumSHA256 = "x-amz-checksum-sha256"
|
||||
amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme"
|
||||
)
|
||||
|
||||
// Base returns the base type, without modifiers.
|
||||
func (c ChecksumType) Base() ChecksumType {
|
||||
return c & checksumMask
|
||||
}
|
||||
|
||||
// Is returns if c is all of t.
|
||||
func (c ChecksumType) Is(t ChecksumType) bool {
|
||||
return c&t == t
|
||||
@@ -75,10 +99,39 @@ func (c ChecksumType) Key() string {
|
||||
return amzChecksumSHA1
|
||||
case ChecksumSHA256:
|
||||
return amzChecksumSHA256
|
||||
case ChecksumCRC64NVME:
|
||||
return amzChecksumCRC64NVME
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CanComposite will return if the checksum type can be used for composite multipart upload on AWS.
|
||||
func (c ChecksumType) CanComposite() bool {
|
||||
switch c & checksumMask {
|
||||
case ChecksumSHA256, ChecksumSHA1, ChecksumCRC32, ChecksumCRC32C:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CanMergeCRC will return if the checksum type can be used for multipart upload on AWS.
|
||||
func (c ChecksumType) CanMergeCRC() bool {
|
||||
switch c & checksumMask {
|
||||
case ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FullObjectRequested will return if the checksum type indicates full object checksum was requested.
|
||||
func (c ChecksumType) FullObjectRequested() bool {
|
||||
switch c & (ChecksumFullObject | checksumMask) {
|
||||
case ChecksumFullObjectCRC32C, ChecksumFullObjectCRC32, ChecksumCRC64NVME:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// KeyCapitalized returns the capitalized key as used in HTTP headers.
|
||||
func (c ChecksumType) KeyCapitalized() string {
|
||||
return http.CanonicalHeaderKey(c.Key())
|
||||
@@ -93,10 +146,14 @@ func (c ChecksumType) RawByteLen() int {
|
||||
return sha1.Size
|
||||
case ChecksumSHA256:
|
||||
return sha256.Size
|
||||
case ChecksumCRC64NVME:
|
||||
return crc64.Size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const crc64NVMEPolynomial = 0xad93d23594c93659
|
||||
|
||||
// Hasher returns a hasher corresponding to the checksum type.
|
||||
// Returns nil if no checksum.
|
||||
func (c ChecksumType) Hasher() hash.Hash {
|
||||
@@ -109,13 +166,15 @@ func (c ChecksumType) Hasher() hash.Hash {
|
||||
return sha1.New()
|
||||
case ChecksumSHA256:
|
||||
return sha256.New()
|
||||
case ChecksumCRC64NVME:
|
||||
return crc64nvme.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsSet returns whether the type is valid and known.
|
||||
func (c ChecksumType) IsSet() bool {
|
||||
return bits.OnesCount32(uint32(c)) == 1
|
||||
return bits.OnesCount32(uint32(c&checksumMask)) == 1
|
||||
}
|
||||
|
||||
// SetDefault will set the checksum if not already set.
|
||||
@@ -125,6 +184,16 @@ func (c *ChecksumType) SetDefault(t ChecksumType) {
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeToString the encoded hash value of the content provided in b.
|
||||
func (c ChecksumType) EncodeToString(b []byte) string {
|
||||
if !c.IsSet() {
|
||||
return ""
|
||||
}
|
||||
h := c.Hasher()
|
||||
h.Write(b)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// String returns the type as a string.
|
||||
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
|
||||
// Empty string for unset and "<invalid>" if not valid.
|
||||
@@ -140,6 +209,8 @@ func (c ChecksumType) String() string {
|
||||
return "SHA256"
|
||||
case ChecksumNone:
|
||||
return ""
|
||||
case ChecksumCRC64NVME:
|
||||
return "CRC64NVME"
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
@@ -221,3 +292,129 @@ func (c Checksum) Raw() []byte {
|
||||
}
|
||||
return c.r
|
||||
}
|
||||
|
||||
// CompositeChecksum returns the composite checksum of all provided parts.
|
||||
func (c ChecksumType) CompositeChecksum(p []ObjectPart) (*Checksum, error) {
|
||||
if !c.CanComposite() {
|
||||
return nil, errors.New("cannot do composite checksum")
|
||||
}
|
||||
sort.Slice(p, func(i, j int) bool {
|
||||
return p[i].PartNumber < p[j].PartNumber
|
||||
})
|
||||
c = c.Base()
|
||||
crcBytes := make([]byte, 0, len(p)*c.RawByteLen())
|
||||
for _, part := range p {
|
||||
pCrc, err := part.ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crcBytes = append(crcBytes, pCrc...)
|
||||
}
|
||||
h := c.Hasher()
|
||||
h.Write(crcBytes)
|
||||
return &Checksum{Type: c, r: h.Sum(nil)}, nil
|
||||
}
|
||||
|
||||
// FullObjectChecksum will return the full object checksum from provided parts.
|
||||
func (c ChecksumType) FullObjectChecksum(p []ObjectPart) (*Checksum, error) {
|
||||
if !c.CanMergeCRC() {
|
||||
return nil, errors.New("cannot merge this checksum type")
|
||||
}
|
||||
c = c.Base()
|
||||
sort.Slice(p, func(i, j int) bool {
|
||||
return p[i].PartNumber < p[j].PartNumber
|
||||
})
|
||||
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return nil, errors.New("no parts given")
|
||||
case 1:
|
||||
check, err := p[0].ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: check,
|
||||
}, nil
|
||||
}
|
||||
var merged uint32
|
||||
var merged64 uint64
|
||||
first, err := p[0].ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sz := p[0].Size
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
merged = binary.BigEndian.Uint32(first)
|
||||
case ChecksumCRC64NVME:
|
||||
merged64 = binary.BigEndian.Uint64(first)
|
||||
}
|
||||
|
||||
poly32 := uint32(crc32.IEEE)
|
||||
if c.Is(ChecksumCRC32C) {
|
||||
poly32 = crc32.Castagnoli
|
||||
}
|
||||
for _, part := range p[1:] {
|
||||
if part.Size == 0 {
|
||||
continue
|
||||
}
|
||||
sz += part.Size
|
||||
pCrc, err := part.ChecksumRaw(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
merged = crc32Combine(poly32, merged, binary.BigEndian.Uint32(pCrc), part.Size)
|
||||
case ChecksumCRC64NVME:
|
||||
merged64 = crc64Combine(bits.Reverse64(crc64NVMEPolynomial), merged64, binary.BigEndian.Uint64(pCrc), part.Size)
|
||||
}
|
||||
}
|
||||
var tmp [8]byte
|
||||
switch c {
|
||||
case ChecksumCRC32, ChecksumCRC32C:
|
||||
binary.BigEndian.PutUint32(tmp[:], merged)
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: tmp[:4],
|
||||
}, nil
|
||||
case ChecksumCRC64NVME:
|
||||
binary.BigEndian.PutUint64(tmp[:], merged64)
|
||||
return &Checksum{
|
||||
Type: c,
|
||||
r: tmp[:8],
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.New("unknown checksum type")
|
||||
}
|
||||
}
|
||||
|
||||
func addAutoChecksumHeaders(opts *PutObjectOptions) {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
|
||||
if opts.AutoChecksum.FullObjectRequested() {
|
||||
opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT"
|
||||
}
|
||||
}
|
||||
|
||||
func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) {
|
||||
if !opts.AutoChecksum.IsSet() {
|
||||
return
|
||||
}
|
||||
if opts.AutoChecksum.CanComposite() && !opts.AutoChecksum.Is(ChecksumFullObject) {
|
||||
// Add composite hash of hashes.
|
||||
crc, err := opts.AutoChecksum.CompositeChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): crc.Encoded()}
|
||||
}
|
||||
} else if opts.AutoChecksum.CanMergeCRC() {
|
||||
crc, err := opts.AutoChecksum.FullObjectChecksum(allParts)
|
||||
if err == nil {
|
||||
opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+429
-1557
File diff suppressed because it is too large
Load Diff
+32
-11
@@ -76,7 +76,8 @@ type AssumeRoleResult struct {
|
||||
type STSAssumeRole struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// STS endpoint to fetch STS credentials.
|
||||
@@ -108,16 +109,10 @@ type STSAssumeRoleOptions struct {
|
||||
// NewSTSAssumeRole returns a pointer to a new
|
||||
// Credentials object wrapping the STSAssumeRole.
|
||||
func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if opts.AccessKey == "" || opts.SecretKey == "" {
|
||||
return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
|
||||
}
|
||||
return New(&STSAssumeRole{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
Options: opts,
|
||||
}), nil
|
||||
@@ -222,10 +217,30 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
|
||||
// RetrieveWithCredContext retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails, optional cred context.
|
||||
func (m *STSAssumeRole) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getAssumeRoleCredentials(client, stsEndpoint, m.Options)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -241,3 +256,9 @@ func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSAssumeRole) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
+18
@@ -55,6 +55,24 @@ func NewChainCredentials(providers []Provider) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve with CredContext
|
||||
func (c *Chain) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
for _, p := range c.Providers {
|
||||
creds, _ := p.RetrieveWithCredContext(cc)
|
||||
// Always prioritize non-anonymous providers, if any.
|
||||
if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
|
||||
continue
|
||||
}
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
// At this point we have exhausted all the providers and
|
||||
// are left without any credentials return anonymous.
|
||||
return Value{
|
||||
SignerType: SignatureAnonymous,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value, returns no credentials(anonymous)
|
||||
// if no credentials provider returned any value.
|
||||
//
|
||||
|
||||
+47
-1
@@ -18,6 +18,7 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -30,6 +31,10 @@ const (
|
||||
defaultExpiryWindow = 0.8
|
||||
)
|
||||
|
||||
// defaultCredContext is used when the credential context doesn't
|
||||
// actually matter or the default context is suitable.
|
||||
var defaultCredContext = &CredContext{Client: http.DefaultClient}
|
||||
|
||||
// A Value is the S3 credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// S3 Access key ID
|
||||
@@ -52,8 +57,17 @@ type Value struct {
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
type Provider interface {
|
||||
// RetrieveWithCredContext returns nil if it successfully retrieved the
|
||||
// value. Error is returned if the value were not obtainable, or empty.
|
||||
// optionally takes CredContext for additional context to retrieve credentials.
|
||||
RetrieveWithCredContext(cc *CredContext) (Value, error)
|
||||
|
||||
// Retrieve returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
//
|
||||
// Deprecated: Retrieve() exists for historical compatibility and should not
|
||||
// be used. To get new credentials use the RetrieveWithCredContext function
|
||||
// to ensure the proper context (i.e. HTTP client) will be used.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
@@ -61,6 +75,18 @@ type Provider interface {
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// CredContext is passed to the Retrieve function of a provider to provide
|
||||
// some additional context to retrieve credentials.
|
||||
type CredContext struct {
|
||||
// Client specifies the HTTP client that should be used if an HTTP
|
||||
// request is to be made to fetch the credentials.
|
||||
Client *http.Client
|
||||
|
||||
// Endpoint specifies the MinIO endpoint that will be used if no
|
||||
// explicit endpoint is provided.
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
@@ -146,16 +172,36 @@ func New(provider Provider) *Credentials {
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
//
|
||||
// Deprecated: Get() exists for historical compatibility and should not be
|
||||
// used. To get new credentials use the Credentials.GetWithContext function
|
||||
// to ensure the proper context (i.e. HTTP client) will be used.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
return c.GetWithContext(nil)
|
||||
}
|
||||
|
||||
// GetWithContext returns the credentials value, or error if the
|
||||
// credentials Value failed to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) GetWithContext(cc *CredContext) (Value, error) {
|
||||
if c == nil {
|
||||
return Value{}, nil
|
||||
}
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.isExpired() {
|
||||
creds, err := c.provider.Retrieve()
|
||||
creds, err := c.provider.RetrieveWithCredContext(cc)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
+11
-2
@@ -37,8 +37,7 @@ func NewEnvAWS() *Credentials {
|
||||
return New(&EnvAWS{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
func (e *EnvAWS) retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
@@ -65,6 +64,16 @@ func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvAWS) Retrieve() (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve (no-op input of Cred Context)
|
||||
func (e *EnvAWS) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvAWS) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
|
||||
+11
-2
@@ -38,8 +38,7 @@ func NewEnvMinio() *Credentials {
|
||||
return New(&EnvMinio{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
func (e *EnvMinio) retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("MINIO_ROOT_USER")
|
||||
@@ -62,6 +61,16 @@ func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvMinio) Retrieve() (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve() (no-op input cred context)
|
||||
func (e *EnvMinio) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return e.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvMinio) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
|
||||
+12
-3
@@ -71,9 +71,7 @@ func NewFileAWSCredentials(filename, profile string) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
func (p *FileAWSCredentials) retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
if p.Filename == "" {
|
||||
@@ -142,6 +140,17 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileAWSCredentials) Retrieve() (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext is like Retrieve(), cred context is no-op for File credentials
|
||||
func (p *FileAWSCredentials) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
|
||||
+12
-3
@@ -56,9 +56,7 @@ func NewFileMinioClient(filename, alias string) *Credentials {
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
func (p *FileMinioClient) retrieve() (Value, error) {
|
||||
if p.Filename == "" {
|
||||
if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
|
||||
p.Filename = value
|
||||
@@ -96,6 +94,17 @@ func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *FileMinioClient) Retrieve() (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext - is like Retrieve()
|
||||
func (p *FileMinioClient) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return p.retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *FileMinioClient) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
|
||||
+30
-14
@@ -49,7 +49,8 @@ const DefaultExpiryWindow = -1
|
||||
type IAM struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to IAM metadata service.
|
||||
// Optional http Client to use when connecting to IAM metadata service
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Custom endpoint to fetch IAM role credentials.
|
||||
@@ -90,17 +91,16 @@ const (
|
||||
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
|
||||
func NewIAM(endpoint string) *Credentials {
|
||||
return New(&IAM{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
// RetrieveWithCredContext is like Retrieve with Cred Context
|
||||
func (m *IAM) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
|
||||
if token == "" {
|
||||
token = m.Container.AuthorizationToken
|
||||
@@ -144,7 +144,16 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
var roleCreds ec2RoleCredRespBody
|
||||
var err error
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
endpoint := m.Endpoint
|
||||
|
||||
switch {
|
||||
case identityFile != "":
|
||||
if len(endpoint) == 0 {
|
||||
@@ -160,7 +169,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}
|
||||
|
||||
creds := &STSWebIdentity{
|
||||
Client: m.Client,
|
||||
Client: client,
|
||||
STSEndpoint: endpoint,
|
||||
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
||||
token, err := os.ReadFile(identityFile)
|
||||
@@ -174,7 +183,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
roleSessionName: roleSessionName,
|
||||
}
|
||||
|
||||
stsWebIdentityCreds, err := creds.Retrieve()
|
||||
stsWebIdentityCreds, err := creds.RetrieveWithCredContext(cc)
|
||||
if err == nil {
|
||||
m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
|
||||
}
|
||||
@@ -185,11 +194,11 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
|
||||
roleCreds, err = getEcsTaskCredentials(client, endpoint, token)
|
||||
|
||||
case tokenFile != "" && fullURI != "":
|
||||
endpoint = fullURI
|
||||
roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile)
|
||||
roleCreds, err = getEKSPodIdentityCredentials(client, endpoint, tokenFile)
|
||||
|
||||
case fullURI != "":
|
||||
if len(endpoint) == 0 {
|
||||
@@ -203,10 +212,10 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
|
||||
roleCreds, err = getEcsTaskCredentials(client, endpoint, token)
|
||||
|
||||
default:
|
||||
roleCreds, err = getCredentials(m.Client, endpoint)
|
||||
roleCreds, err = getCredentials(client, endpoint)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -224,6 +233,13 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired
|
||||
func (m *IAM) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
|
||||
+5
@@ -59,6 +59,11 @@ func (s *Static) Retrieve() (Value, error) {
|
||||
return s.Value, nil
|
||||
}
|
||||
|
||||
// RetrieveWithCredContext returns the static credentials.
|
||||
func (s *Static) RetrieveWithCredContext(_ *CredContext) (Value, error) {
|
||||
return s.Retrieve()
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
//
|
||||
// For Static, the credentials never expired.
|
||||
|
||||
+31
-11
@@ -72,7 +72,8 @@ type ClientGrantsToken struct {
|
||||
type STSClientGrants struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// MinIO endpoint to fetch STS credentials.
|
||||
@@ -90,16 +91,10 @@ type STSClientGrants struct {
|
||||
// NewSTSClientGrants returns a pointer to a new
|
||||
// Credentials object wrapping the STSClientGrants.
|
||||
func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if getClientGrantsTokenExpiry == nil {
|
||||
return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
|
||||
}
|
||||
return New(&STSClientGrants{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
|
||||
}), nil
|
||||
@@ -162,10 +157,29 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry)
|
||||
// RetrieveWithCredContext is like Retrieve() with cred context
|
||||
func (m *STSClientGrants) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getClientGrantsCredentials(client, stsEndpoint, m.GetClientGrantsTokenExpiry)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -181,3 +195,9 @@ func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSClientGrants) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
+31
-5
@@ -53,6 +53,8 @@ type AssumeRoleWithCustomTokenResponse struct {
|
||||
type CustomTokenIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// MinIO server STS endpoint to fetch STS credentials.
|
||||
@@ -69,9 +71,21 @@ type CustomTokenIdentity struct {
|
||||
RequestedExpiry time.Duration
|
||||
}
|
||||
|
||||
// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
|
||||
func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
u, err := url.Parse(c.STSEndpoint)
|
||||
// RetrieveWithCredContext with Retrieve optionally cred context
|
||||
func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := c.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
u, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -92,7 +106,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
return value, err
|
||||
}
|
||||
|
||||
resp, err := c.Client.Do(req)
|
||||
client := c.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -118,11 +140,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve - to satisfy Provider interface; fetches credentials from MinIO.
|
||||
func (c *CustomTokenIdentity) Retrieve() (value Value, err error) {
|
||||
return c.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// NewCustomTokenCredentials - returns credentials using the
|
||||
// AssumeRoleWithCustomToken STS API.
|
||||
func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) {
|
||||
c := CustomTokenIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
Token: token,
|
||||
RoleArn: roleArn,
|
||||
|
||||
+33
-7
@@ -20,6 +20,7 @@ package credentials
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -55,7 +56,8 @@ type LDAPIdentityResult struct {
|
||||
type LDAPIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Exported STS endpoint to fetch STS credentials.
|
||||
@@ -77,7 +79,6 @@ type LDAPIdentity struct {
|
||||
// Identity.
|
||||
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) {
|
||||
l := LDAPIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
LDAPUsername: ldapUsername,
|
||||
LDAPPassword: ldapPassword,
|
||||
@@ -113,7 +114,6 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt {
|
||||
// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead.
|
||||
func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) {
|
||||
return New(&LDAPIdentity{
|
||||
Client: &http.Client{Transport: http.DefaultTransport},
|
||||
STSEndpoint: stsEndpoint,
|
||||
LDAPUsername: ldapUsername,
|
||||
LDAPPassword: ldapPassword,
|
||||
@@ -121,10 +121,22 @@ func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, p
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Retrieve gets the credential by calling the MinIO STS API for
|
||||
// RetrieveWithCredContext gets the credential by calling the MinIO STS API for
|
||||
// LDAP on the configured stsEndpoint.
|
||||
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
u, err := url.Parse(k.STSEndpoint)
|
||||
func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := k.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
u, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -148,7 +160,15 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := k.Client.Do(req)
|
||||
client := k.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@@ -188,3 +208,9 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
SignerType: SignatureV4,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve gets the credential by calling the MinIO STS API for
|
||||
// LDAP on the configured stsEndpoint.
|
||||
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
return k.RetrieveWithCredContext(defaultCredContext)
|
||||
}
|
||||
|
||||
+57
-43
@@ -20,8 +20,8 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -36,7 +36,12 @@ type CertificateIdentityOption func(*STSCertificateIdentity)
|
||||
// CertificateIdentityWithTransport returns a CertificateIdentityOption that
|
||||
// customizes the STSCertificateIdentity with the given http.RoundTripper.
|
||||
func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption {
|
||||
return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t })
|
||||
return CertificateIdentityOption(func(i *STSCertificateIdentity) {
|
||||
if i.Client == nil {
|
||||
i.Client = &http.Client{}
|
||||
}
|
||||
i.Client.Transport = t
|
||||
})
|
||||
}
|
||||
|
||||
// CertificateIdentityWithExpiry returns a CertificateIdentityOption that
|
||||
@@ -53,6 +58,10 @@ func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOp
|
||||
type STSCertificateIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// STSEndpoint is the base URL endpoint of the STS API.
|
||||
// For example, https://minio.local:9000
|
||||
STSEndpoint string
|
||||
@@ -68,50 +77,18 @@ type STSCertificateIdentity struct {
|
||||
// The default livetime is one hour.
|
||||
S3CredentialLivetime time.Duration
|
||||
|
||||
// Client is the HTTP client used to authenticate and fetch
|
||||
// S3 credentials.
|
||||
//
|
||||
// A custom TLS client configuration can be specified by
|
||||
// using a custom http.Transport:
|
||||
// Client: http.Client {
|
||||
// Transport: &http.Transport{
|
||||
// TLSClientConfig: &tls.Config{},
|
||||
// },
|
||||
// }
|
||||
Client http.Client
|
||||
// Certificate is the client certificate that is used for
|
||||
// STS authentication.
|
||||
Certificate tls.Certificate
|
||||
}
|
||||
|
||||
var _ Provider = (*STSWebIdentity)(nil) // compiler check
|
||||
|
||||
// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates
|
||||
// to the given STS endpoint with the given TLS certificate and retrieves and
|
||||
// rotates S3 credentials.
|
||||
func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) {
|
||||
if endpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if _, err := url.Parse(endpoint); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
identity := &STSCertificateIdentity{
|
||||
STSEndpoint: endpoint,
|
||||
Client: http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
Certificates: []tls.Certificate{certificate},
|
||||
},
|
||||
},
|
||||
},
|
||||
Certificate: certificate,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(identity)
|
||||
@@ -119,10 +96,21 @@ func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, opt
|
||||
return New(identity), nil
|
||||
}
|
||||
|
||||
// Retrieve fetches a new set of S3 credentials from the configured
|
||||
// STS API endpoint.
|
||||
func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
endpointURL, err := url.Parse(i.STSEndpoint)
|
||||
// RetrieveWithCredContext is Retrieve with cred context
|
||||
func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
stsEndpoint := i.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
endpointURL, err := url.Parse(stsEndpoint)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -145,7 +133,28 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
}
|
||||
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
|
||||
|
||||
resp, err := i.Client.Do(req)
|
||||
client := i.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
tr, ok := client.Transport.(*http.Transport)
|
||||
if !ok {
|
||||
return Value{}, fmt.Errorf("CredContext should contain an http.Transport value")
|
||||
}
|
||||
|
||||
// Clone the HTTP transport (patch the TLS client certificate)
|
||||
trCopy := tr.Clone()
|
||||
trCopy.TLSClientConfig.Certificates = []tls.Certificate{i.Certificate}
|
||||
|
||||
// Clone the HTTP client (patch the HTTP transport)
|
||||
clientCopy := *client
|
||||
clientCopy.Transport = trCopy
|
||||
|
||||
resp, err := clientCopy.Do(req)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -193,6 +202,11 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve fetches a new set of S3 credentials from the configured STS API endpoint.
|
||||
func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
return i.RetrieveWithCredContext(defaultCredContext)
|
||||
}
|
||||
|
||||
// Expiration returns the expiration time of the current S3 credentials.
|
||||
func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration }
|
||||
|
||||
|
||||
+39
-14
@@ -58,9 +58,10 @@ type WebIdentityResult struct {
|
||||
|
||||
// WebIdentityToken - web identity token with expiry.
|
||||
type WebIdentityToken struct {
|
||||
Token string
|
||||
AccessToken string
|
||||
Expiry int
|
||||
Token string
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
Expiry int
|
||||
}
|
||||
|
||||
// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
|
||||
@@ -68,7 +69,8 @@ type WebIdentityToken struct {
|
||||
type STSWebIdentity struct {
|
||||
Expiry
|
||||
|
||||
// Required http Client to use when connecting to MinIO STS service.
|
||||
// Optional http Client to use when connecting to MinIO STS service.
|
||||
// (overrides default client in CredContext)
|
||||
Client *http.Client
|
||||
|
||||
// Exported STS endpoint to fetch STS credentials.
|
||||
@@ -96,16 +98,10 @@ type STSWebIdentity struct {
|
||||
// NewSTSWebIdentity returns a pointer to a new
|
||||
// Credentials object wrapping the STSWebIdentity.
|
||||
func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) {
|
||||
if stsEndpoint == "" {
|
||||
return nil, errors.New("STS endpoint cannot be empty")
|
||||
}
|
||||
if getWebIDTokenExpiry == nil {
|
||||
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
|
||||
}
|
||||
i := &STSWebIdentity{
|
||||
Client: &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
},
|
||||
STSEndpoint: stsEndpoint,
|
||||
GetWebIDTokenExpiry: getWebIDTokenExpiry,
|
||||
}
|
||||
@@ -161,6 +157,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
// Usually set when server is using extended userInfo endpoint.
|
||||
v.Set("WebIdentityAccessToken", idToken.AccessToken)
|
||||
}
|
||||
if idToken.RefreshToken != "" {
|
||||
// Usually set when server is using extended userInfo endpoint.
|
||||
v.Set("WebIdentityRefreshToken", idToken.RefreshToken)
|
||||
}
|
||||
if idToken.Expiry > 0 {
|
||||
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
|
||||
}
|
||||
@@ -214,10 +214,29 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
|
||||
// RetrieveWithCredContext is like Retrieve with optional cred context.
|
||||
func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) {
|
||||
if cc == nil {
|
||||
cc = defaultCredContext
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = cc.Client
|
||||
}
|
||||
if client == nil {
|
||||
client = defaultCredContext.Client
|
||||
}
|
||||
|
||||
stsEndpoint := m.STSEndpoint
|
||||
if stsEndpoint == "" {
|
||||
stsEndpoint = cc.Endpoint
|
||||
}
|
||||
if stsEndpoint == "" {
|
||||
return Value{}, errors.New("STS endpoint unknown")
|
||||
}
|
||||
|
||||
a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
@@ -234,6 +253,12 @@ func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the MinIO service.
|
||||
// Error will be returned if the request fails.
|
||||
func (m *STSWebIdentity) Retrieve() (Value, error) {
|
||||
return m.RetrieveWithCredContext(nil)
|
||||
}
|
||||
|
||||
// Expiration returns the expiration time of the credentials
|
||||
func (m *STSWebIdentity) Expiration() time.Time {
|
||||
return m.expiration
|
||||
|
||||
+26
@@ -434,12 +434,34 @@ func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElemen
|
||||
return enc.EncodeElement(delMarkerExp(de), start)
|
||||
}
|
||||
|
||||
// AllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy
|
||||
type AllVersionsExpiration struct {
|
||||
XMLName xml.Name `xml:"AllVersionsExpiration" json:"-"`
|
||||
Days int `xml:"Days,omitempty" json:"Days,omitempty"`
|
||||
DeleteMarker ExpireDeleteMarker `xml:"DeleteMarker,omitempty" json:"DeleteMarker,omitempty"`
|
||||
}
|
||||
|
||||
// IsNull returns true if days field is 0
|
||||
func (e AllVersionsExpiration) IsNull() bool {
|
||||
return e.Days == 0
|
||||
}
|
||||
|
||||
// MarshalXML satisfies xml.Marshaler to provide custom encoding
|
||||
func (e AllVersionsExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
if e.IsNull() {
|
||||
return nil
|
||||
}
|
||||
type allVersionsExp AllVersionsExpiration
|
||||
return enc.EncodeElement(allVersionsExp(e), start)
|
||||
}
|
||||
|
||||
// MarshalJSON customizes json encoding by omitting empty values
|
||||
func (r Rule) MarshalJSON() ([]byte, error) {
|
||||
type rule struct {
|
||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Expiration *Expiration `json:"Expiration,omitempty"`
|
||||
DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"`
|
||||
AllVersionsExpiration *AllVersionsExpiration `json:"AllVersionsExpiration,omitempty"`
|
||||
ID string `json:"ID"`
|
||||
RuleFilter *Filter `json:"Filter,omitempty"`
|
||||
NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"`
|
||||
@@ -475,6 +497,9 @@ func (r Rule) MarshalJSON() ([]byte, error) {
|
||||
if !r.NoncurrentVersionTransition.isNull() {
|
||||
newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition
|
||||
}
|
||||
if !r.AllVersionsExpiration.IsNull() {
|
||||
newr.AllVersionsExpiration = &r.AllVersionsExpiration
|
||||
}
|
||||
|
||||
return json.Marshal(newr)
|
||||
}
|
||||
@@ -485,6 +510,7 @@ type Rule struct {
|
||||
AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
|
||||
DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"`
|
||||
AllVersionsExpiration AllVersionsExpiration `xml:"AllVersionsExpiration,omitempty" json:"AllVersionsExpiration,omitempty"`
|
||||
ID string `xml:"ID" json:"ID"`
|
||||
RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
|
||||
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
|
||||
|
||||
+11
-49
@@ -118,53 +118,53 @@ func GetRegionFromURL(endpointURL url.URL) string {
|
||||
if endpointURL == sentinelURL {
|
||||
return ""
|
||||
}
|
||||
if endpointURL.Host == "s3-external-1.amazonaws.com" {
|
||||
if endpointURL.Hostname() == "s3-external-1.amazonaws.com" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// if elb's are used we cannot calculate which region it may be, just return empty.
|
||||
if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
|
||||
if elbAmazonRegex.MatchString(endpointURL.Hostname()) || elbAmazonCnRegex.MatchString(endpointURL.Hostname()) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// We check for FIPS dualstack matching first to avoid the non-greedy
|
||||
// regex for FIPS non-dualstack matching a dualstack URL
|
||||
parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
|
||||
parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
|
||||
parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Hostname())
|
||||
if len(parts) > 1 {
|
||||
return parts[1]
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
|
||||
if endpointURL == sentinelURL {
|
||||
return false
|
||||
}
|
||||
return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
|
||||
return amazonS3HostPrivateLink.MatchString(endpointURL.Hostname())
|
||||
}
|
||||
|
||||
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
|
||||
@@ -261,44 +261,6 @@ func QueryEncode(v url.Values) string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// TagDecode - decodes canonical tag into map of key and value.
|
||||
func TagDecode(ctag string) map[string]string {
|
||||
if ctag == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
tags := strings.Split(ctag, "&")
|
||||
tagMap := make(map[string]string, len(tags))
|
||||
var err error
|
||||
for _, tag := range tags {
|
||||
kvs := strings.SplitN(tag, "=", 2)
|
||||
if len(kvs) == 0 {
|
||||
return map[string]string{}
|
||||
}
|
||||
if len(kvs) == 1 {
|
||||
return map[string]string{}
|
||||
}
|
||||
tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return tagMap
|
||||
}
|
||||
|
||||
// TagEncode - encodes tag values in their URL encoded form. In
|
||||
// addition to the percent encoding performed by urlEncodePath() used
|
||||
// here, it also percent encodes '/' (forward slash)
|
||||
func TagEncode(tags map[string]string) string {
|
||||
if tags == nil {
|
||||
return ""
|
||||
}
|
||||
values := url.Values{}
|
||||
for k, v := range tags {
|
||||
values[k] = []string{v}
|
||||
}
|
||||
return QueryEncode(values)
|
||||
}
|
||||
|
||||
// if object matches reserved string, no need to encode them
|
||||
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||
|
||||
|
||||
+53
-18
@@ -85,7 +85,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error {
|
||||
|
||||
// SetKey - Sets an object name for the policy based upload.
|
||||
func (p *PostPolicy) SetKey(key string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Object name is empty.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -118,7 +118,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
|
||||
|
||||
// SetBucket - Sets bucket at which objects will be uploaded to.
|
||||
func (p *PostPolicy) SetBucket(bucketName string) error {
|
||||
if strings.TrimSpace(bucketName) == "" || bucketName == "" {
|
||||
if strings.TrimSpace(bucketName) == "" {
|
||||
return errInvalidArgument("Bucket name is empty.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -135,7 +135,7 @@ func (p *PostPolicy) SetBucket(bucketName string) error {
|
||||
|
||||
// SetCondition - Sets condition for credentials, date and algorithm
|
||||
func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
|
||||
if strings.TrimSpace(value) == "" || value == "" {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return errInvalidArgument("No value specified for condition")
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
|
||||
|
||||
// SetTagging - Sets tagging for the object for this policy based upload.
|
||||
func (p *PostPolicy) SetTagging(tagging string) error {
|
||||
if strings.TrimSpace(tagging) == "" || tagging == "" {
|
||||
if strings.TrimSpace(tagging) == "" {
|
||||
return errInvalidArgument("No tagging specified.")
|
||||
}
|
||||
_, err := tags.ParseObjectXML(strings.NewReader(tagging))
|
||||
@@ -178,7 +178,7 @@ func (p *PostPolicy) SetTagging(tagging string) error {
|
||||
// SetContentType - Sets content-type of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetContentType(contentType string) error {
|
||||
if strings.TrimSpace(contentType) == "" || contentType == "" {
|
||||
if strings.TrimSpace(contentType) == "" {
|
||||
return errInvalidArgument("No content type specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -211,7 +211,7 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro
|
||||
|
||||
// SetContentDisposition - Sets content-disposition of the object for this policy
|
||||
func (p *PostPolicy) SetContentDisposition(contentDisposition string) error {
|
||||
if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" {
|
||||
if strings.TrimSpace(contentDisposition) == "" {
|
||||
return errInvalidArgument("No content disposition specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -226,27 +226,44 @@ func (p *PostPolicy) SetContentDisposition(contentDisposition string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContentEncoding - Sets content-encoding of the object for this policy
|
||||
func (p *PostPolicy) SetContentEncoding(contentEncoding string) error {
|
||||
if strings.TrimSpace(contentEncoding) == "" {
|
||||
return errInvalidArgument("No content encoding specified.")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
matchType: "eq",
|
||||
condition: "$Content-Encoding",
|
||||
value: contentEncoding,
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
p.formData["Content-Encoding"] = contentEncoding
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetContentLengthRange - Set new min and max content length
|
||||
// condition for all incoming uploads.
|
||||
func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
|
||||
if min > max {
|
||||
func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error {
|
||||
if minLen > maxLen {
|
||||
return errInvalidArgument("Minimum limit is larger than maximum limit.")
|
||||
}
|
||||
if min < 0 {
|
||||
if minLen < 0 {
|
||||
return errInvalidArgument("Minimum limit cannot be negative.")
|
||||
}
|
||||
if max <= 0 {
|
||||
if maxLen <= 0 {
|
||||
return errInvalidArgument("Maximum limit cannot be non-positive.")
|
||||
}
|
||||
p.contentLengthRange.min = min
|
||||
p.contentLengthRange.max = max
|
||||
p.contentLengthRange.min = minLen
|
||||
p.contentLengthRange.max = maxLen
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
|
||||
if strings.TrimSpace(redirect) == "" || redirect == "" {
|
||||
if strings.TrimSpace(redirect) == "" {
|
||||
return errInvalidArgument("Redirect is empty")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -264,7 +281,7 @@ func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
|
||||
// SetSuccessStatusAction - Sets the status success code of the object for this policy
|
||||
// based upload.
|
||||
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
|
||||
if strings.TrimSpace(status) == "" || status == "" {
|
||||
if strings.TrimSpace(status) == "" {
|
||||
return errInvalidArgument("Status is empty")
|
||||
}
|
||||
policyCond := policyCondition{
|
||||
@@ -282,10 +299,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
|
||||
// SetUserMetadata - Set user metadata as a key/value couple.
|
||||
// Can be retrieved through a HEAD request or an event.
|
||||
func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Key is empty")
|
||||
}
|
||||
if strings.TrimSpace(value) == "" || value == "" {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return errInvalidArgument("Value is empty")
|
||||
}
|
||||
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||
@@ -304,7 +321,7 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error {
|
||||
// SetUserMetadataStartsWith - Set how an user metadata should starts with.
|
||||
// Can be retrieved through a HEAD request or an event.
|
||||
func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||
if strings.TrimSpace(key) == "" || key == "" {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return errInvalidArgument("Key is empty")
|
||||
}
|
||||
headerName := fmt.Sprintf("x-amz-meta-%s", key)
|
||||
@@ -321,11 +338,29 @@ func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
|
||||
}
|
||||
|
||||
// SetChecksum sets the checksum of the request.
|
||||
func (p *PostPolicy) SetChecksum(c Checksum) {
|
||||
func (p *PostPolicy) SetChecksum(c Checksum) error {
|
||||
if c.IsSet() {
|
||||
p.formData[amzChecksumAlgo] = c.Type.String()
|
||||
p.formData[c.Type.Key()] = c.Encoded()
|
||||
|
||||
policyCond := policyCondition{
|
||||
matchType: "eq",
|
||||
condition: fmt.Sprintf("$%s", amzChecksumAlgo),
|
||||
value: c.Type.String(),
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
policyCond = policyCondition{
|
||||
matchType: "eq",
|
||||
condition: fmt.Sprintf("$%s", c.Type.Key()),
|
||||
value: c.Encoded(),
|
||||
}
|
||||
if err := p.addNewPolicy(policyCond); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEncryption - sets encryption headers for POST API
|
||||
|
||||
+5
-5
@@ -20,7 +20,7 @@ package minio
|
||||
import "time"
|
||||
|
||||
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
|
||||
func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||
func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||
attemptCh := make(chan int)
|
||||
|
||||
// normalize jitter to the range [0, 1.0]
|
||||
@@ -39,10 +39,10 @@ func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64,
|
||||
if attempt > maxAttempt {
|
||||
attempt = maxAttempt
|
||||
}
|
||||
// sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
// sleep = random_between(0, min(maxSleep, base * 2 ** attempt))
|
||||
sleep := baseSleep * time.Duration(1<<uint(attempt))
|
||||
if sleep > maxSleep {
|
||||
sleep = maxSleep
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||
|
||||
+6
-5
@@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second
|
||||
|
||||
// newRetryTimer creates a timer with exponentially increasing
|
||||
// delays until the maximum retry attempts are reached.
|
||||
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
|
||||
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int {
|
||||
attemptCh := make(chan int)
|
||||
|
||||
// computes the exponential backoff duration according to
|
||||
@@ -59,10 +59,10 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time
|
||||
jitter = MaxJitter
|
||||
}
|
||||
|
||||
// sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<uint(attempt))
|
||||
if sleep > cap {
|
||||
sleep = cap
|
||||
// sleep = random_between(0, min(maxSleep, base * 2 ** attempt))
|
||||
sleep := baseSleep * time.Duration(1<<uint(attempt))
|
||||
if sleep > maxSleep {
|
||||
sleep = maxSleep
|
||||
}
|
||||
if jitter != NoJitter {
|
||||
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||
@@ -112,6 +112,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) {
|
||||
|
||||
// List of HTTP status codes which are retryable.
|
||||
var retryableHTTPStatusCodes = map[int]struct{}{
|
||||
http.StatusRequestTimeout: {},
|
||||
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
|
||||
499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
|
||||
http.StatusInternalServerError: {},
|
||||
|
||||
+12
@@ -32,6 +32,18 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{
|
||||
"s3.us-east-2.amazonaws.com",
|
||||
"s3.dualstack.us-east-2.amazonaws.com",
|
||||
},
|
||||
"us-iso-east-1": {
|
||||
"s3.us-iso-east-1.c2s.ic.gov",
|
||||
"s3.dualstack.us-iso-east-1.c2s.ic.gov",
|
||||
},
|
||||
"us-isob-east-1": {
|
||||
"s3.us-isob-east-1.sc2s.sgov.gov",
|
||||
"s3.dualstack.us-isob-east-1.sc2s.sgov.gov",
|
||||
},
|
||||
"us-iso-west-1": {
|
||||
"s3.us-iso-west-1.c2s.ic.gov",
|
||||
"s3.dualstack.us-iso-west-1.c2s.ic.gov",
|
||||
},
|
||||
"us-west-2": {
|
||||
"s3.us-west-2.amazonaws.com",
|
||||
"s3.dualstack.us-west-2.amazonaws.com",
|
||||
|
||||
+157
-6
@@ -41,6 +41,7 @@ import (
|
||||
|
||||
md5simd "github.com/minio/md5-simd"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
)
|
||||
|
||||
func trimEtag(etag string) string {
|
||||
@@ -322,7 +323,13 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
||||
userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0]
|
||||
}
|
||||
}
|
||||
userTags := s3utils.TagDecode(h.Get(amzTaggingHeader))
|
||||
|
||||
userTags, err := tags.ParseObjectTags(h.Get(amzTaggingHeader))
|
||||
if err != nil {
|
||||
return ObjectInfo{}, ErrorResponse{
|
||||
Code: "InternalError",
|
||||
}
|
||||
}
|
||||
|
||||
var tagCount int
|
||||
if count := h.Get(amzTaggingCount); count != "" {
|
||||
@@ -373,15 +380,16 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err
|
||||
// which are not part of object metadata.
|
||||
Metadata: metadata,
|
||||
UserMetadata: userMetadata,
|
||||
UserTags: userTags,
|
||||
UserTags: userTags.ToMap(),
|
||||
UserTagCount: tagCount,
|
||||
Restore: restore,
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32: h.Get("x-amz-checksum-crc32"),
|
||||
ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"),
|
||||
ChecksumSHA1: h.Get("x-amz-checksum-sha1"),
|
||||
ChecksumSHA256: h.Get("x-amz-checksum-sha256"),
|
||||
ChecksumCRC32: h.Get(ChecksumCRC32.Key()),
|
||||
ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()),
|
||||
ChecksumSHA1: h.Get(ChecksumSHA1.Key()),
|
||||
ChecksumSHA256: h.Get(ChecksumSHA256.Key()),
|
||||
ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -698,3 +706,146 @@ func (h *hashReaderWrapper) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration.
|
||||
// Used uint for unsigned long. Used uint32 for input arguments in order to match
|
||||
// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib)
|
||||
// Modified for hash/crc64 by Klaus Post, 2024.
|
||||
func gf2MatrixTimes(mat []uint64, vec uint64) uint64 {
|
||||
var sum uint64
|
||||
|
||||
for vec != 0 {
|
||||
if vec&1 != 0 {
|
||||
sum ^= mat[0]
|
||||
}
|
||||
vec >>= 1
|
||||
mat = mat[1:]
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func gf2MatrixSquare(square, mat []uint64) {
|
||||
if len(square) != len(mat) {
|
||||
panic("square matrix size mismatch")
|
||||
}
|
||||
for n := range mat {
|
||||
square[n] = gf2MatrixTimes(mat, mat[n])
|
||||
}
|
||||
}
|
||||
|
||||
// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32
|
||||
// hash values crc1 and crc2. poly represents the generator polynomial
|
||||
// and len2 specifies the byte length that the crc2 hash covers.
|
||||
func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 32) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 32) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = uint64(poly) // CRC-32 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 32; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := uint64(crc1)
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= uint64(crc2)
|
||||
return uint32(crc1n)
|
||||
}
|
||||
|
||||
func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 {
|
||||
// degenerate case (also disallow negative lengths)
|
||||
if len2 <= 0 {
|
||||
return crc1
|
||||
}
|
||||
|
||||
even := make([]uint64, 64) // even-power-of-two zeros operator
|
||||
odd := make([]uint64, 64) // odd-power-of-two zeros operator
|
||||
|
||||
// put operator for one zero bit in odd
|
||||
odd[0] = poly // CRC-64 polynomial
|
||||
row := uint64(1)
|
||||
for n := 1; n < 64; n++ {
|
||||
odd[n] = row
|
||||
row <<= 1
|
||||
}
|
||||
|
||||
// put operator for two zero bits in even
|
||||
gf2MatrixSquare(even, odd)
|
||||
|
||||
// put operator for four zero bits in odd
|
||||
gf2MatrixSquare(odd, even)
|
||||
|
||||
// apply len2 zeros to crc1 (first square will put the operator for one
|
||||
// zero byte, eight zero bits, in even)
|
||||
crc1n := crc1
|
||||
for {
|
||||
// apply zeros operator for this bit of len2
|
||||
gf2MatrixSquare(even, odd)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(even, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// another iteration of the loop with odd and even swapped
|
||||
gf2MatrixSquare(odd, even)
|
||||
if len2&1 != 0 {
|
||||
crc1n = gf2MatrixTimes(odd, crc1n)
|
||||
}
|
||||
len2 >>= 1
|
||||
|
||||
// if no more bits set, then done
|
||||
if len2 == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// return combined crc
|
||||
crc1n ^= crc2
|
||||
return crc1n
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user