Bump reva deps (#8412)

* bump dependencies

Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>

* bump reva and add config options

Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>

---------

Signed-off-by: Jörn Friedrich Dreyer <jfd@butonic.de>
This commit is contained in:
Jörn Friedrich Dreyer
2024-02-21 10:20:36 +01:00
committed by GitHub
parent c92ebf4b46
commit 5ed57cc09a
490 changed files with 19130 additions and 11163 deletions

View File

@@ -2,4 +2,5 @@
*.test
validator
golangci-lint
functional_tests
functional_tests
.idea

View File

@@ -16,9 +16,11 @@ lint:
vet:
@GO111MODULE=on go vet ./...
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
examples:
@echo "Building s3 examples"
@@ -28,7 +30,7 @@ examples:
functional-test:
@GO111MODULE=on go build -race functional_tests.go
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
clean:
@echo "Cleaning up all the generated files"

View File

@@ -1,23 +1,28 @@
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage.
This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader.
For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html).
This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html).
## Download from Github
From your project directory:
```sh
go get github.com/minio/minio-go/v7
```
## Initialize MinIO Client
MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
## Initialize a MinIO Client Object
| Parameter | Description|
| :--- | :--- |
| endpoint | URL to object storage service. |
| _minio.Options_ | All the options such as credentials, custom transport etc. |
The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage:
| Parameter | Description |
| ----------------- | ---------------------------------------------------------- |
| `endpoint` | URL to object storage service. |
| `_minio.Options_` | All the options such as credentials, custom transport etc. |
```go
package main
@@ -48,13 +53,25 @@ func main() {
}
```
## Quick Start Example - File Uploader
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
## Example - File Uploader
We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket.
It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io).
The `play` server runs the latest stable version of MinIO and may be used for testing and development.
The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected.
### FileUploader.go
This example does the following:
- Connects to the MinIO `play` server using the provided credentials.
- Creates a bucket named `testbucket`.
- Uploads a file named `testdata` from `/tmp`.
- Verifies the file was created using `mc ls`.
```go
// FileUploader.go MinIO example
package main
import (
@@ -81,8 +98,8 @@ func main() {
log.Fatalln(err)
}
// Make a new bucket called mymusic.
bucketName := "mymusic"
// Make a new bucket called testbucket.
bucketName := "testbucket"
location := "us-east-1"
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
@@ -98,12 +115,13 @@ func main() {
log.Printf("Successfully created %s\n", bucketName)
}
// Upload the zip file
objectName := "golden-oldies.zip"
filePath := "/tmp/golden-oldies.zip"
contentType := "application/zip"
// Upload the test file
// Change the value of filePath if the file is in another location
objectName := "testdata"
filePath := "/tmp/testdata"
contentType := "application/octet-stream"
// Upload the zip file with FPutObject
// Upload the test file with FPutObject
info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
if err != nil {
log.Fatalln(err)
@@ -113,22 +131,51 @@ func main() {
}
```
### Run FileUploader
```sh
go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
**1. Create a test file containing data:**
mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
You can do this with `dd` on Linux or macOS systems:
```sh
dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10
```
or `fsutil` on Windows:
```sh
fsutil file createnew "C:\Users\<username>\Desktop\sample.txt" 20480
```
**2. Run FileUploader with the following commands:**
```sh
go mod init example/FileUploader
go get github.com/minio/minio-go/v7
go get github.com/minio/minio-go/v7/pkg/credentials
go run FileUploader.go
```
The output resembles the following:
```sh
2023/11/01 14:27:55 Successfully created testbucket
2023/11/01 14:27:55 Successfully uploaded testdata of size 20480
```
**3. Verify the Uploaded File With `mc ls`:**
```sh
mc ls play/testbucket
[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile
```
## API Reference
The full API Reference is available here.
* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
### API Reference : Bucket Operations
* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
@@ -137,10 +184,12 @@ The full API Reference is available here.
* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
### API Reference : Bucket policy Operations
* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
### API Reference : Bucket notification Operations
* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
@@ -148,10 +197,12 @@ The full API Reference is available here.
* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension)
### API Reference : File Object Operations
* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject)
### API Reference : Object Operations
* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
@@ -162,14 +213,15 @@ The full API Reference is available here.
* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
### API Reference : Presigned Operations
* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
### API Reference : Client custom settings
* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
@@ -177,6 +229,7 @@ The full API Reference is available here.
## Full Examples
### Full Examples : Bucket Operations
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
@@ -186,25 +239,30 @@ The full API Reference is available here.
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
### Full Examples : Bucket lifecycle Operations
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
### Full Examples : Bucket encryption Operations
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
### Full Examples : Bucket replication Operations
* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
@@ -212,10 +270,12 @@ The full API Reference is available here.
* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension)
### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
@@ -225,22 +285,28 @@ The full API Reference is available here.
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7)
* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html)
* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html)
## Contribute
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
## License
This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information.

View File

@@ -1,260 +0,0 @@
# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
**支持的云存储:**
- AWS Signature Version 4
- Amazon S3
- MinIO
- AWS Signature Version 2
- Google Cloud Storage (兼容模式)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
本文我们将学习如何安装MinIO client SDK连接到MinIO并提供一下文件上传的示例。对于完整的API以及示例请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。
本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。
## 从Github下载
```sh
go get -u github.com/minio/minio-go
```
## 初始化MinIO Client
MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。
| 参数 | 描述|
| :--- | :--- |
| endpoint | 对象存储服务的URL |
| accessKeyID | Access key是唯一标识你的账户的用户ID。 |
| secretAccessKey | Secret key是你账户的密码。 |
| secure | true代表使用HTTPS |
```go
package main
import (
"log"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
func main() {
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// 初使化 minio client对象。
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
if err != nil {
log.Fatalln(err)
}
log.Printf("%#v\n", minioClient) // minioClient初使化成功
}
```
## 示例-文件上传
本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
### FileUploader.go
```go
package main
import (
"context"
"log"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
func main() {
ctx := context.Background()
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
// 初使化 minio client对象。
minioClient, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
if err != nil {
log.Fatalln(err)
}
// 创建一个叫mymusic的存储桶。
bucketName := "mymusic"
location := "us-east-1"
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location})
if err != nil {
// 检查存储桶是否已经存在。
exists, errBucketExists := minioClient.BucketExists(ctx, bucketName)
if errBucketExists == nil && exists {
log.Printf("We already own %s\n", bucketName)
} else {
log.Fatalln(err)
}
} else {
log.Printf("Successfully created %s\n", bucketName)
}
// 上传一个zip文件。
objectName := "golden-oldies.zip"
filePath := "/tmp/golden-oldies.zip"
contentType := "application/zip"
// 使用FPutObject上传一个zip文件。
n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType})
if err != nil {
log.Fatalln(err)
}
log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
}
```
### 运行FileUploader
```sh
go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
## API文档
完整的API文档在这里。
* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html)
### API文档 : 操作存储桶
* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket)
* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets)
* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists)
* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket)
* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects)
* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads)
### API文档 : 存储桶策略
* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy)
* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy)
### API文档 : 存储桶通知
* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification)
* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展)
* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展)
### API文档 : 操作文件对象
* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject)
### API文档 : 操作对象
* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject)
* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject)
* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming)
* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject)
* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject)
* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject)
* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects)
* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload)
* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent)
### API文档 : Presigned操作
* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject)
* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject)
* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject)
* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy)
### API文档 : 客户端自定义设置
* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo)
* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn)
* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff)
## 完整示例
### 完整示例 : 操作存储桶
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
### 完整示例 : 存储桶策略
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
### 完整示例 : 存储桶生命周期
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
### 完整示例 : 存储桶加密
* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go)
* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go)
* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go)
### 完整示例 : 存储桶复制
* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go)
* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go)
* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go)
### 完整示例 : 存储桶通知
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展)
* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO 扩展)
### 完整示例 : 操作文件对象
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
### 完整示例 : 操作对象
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
### 完整示例 : 操作加密对象
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
### 完整示例 : Presigned操作
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## 了解更多
* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html)
* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html)
## 贡献
[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)

View File

@@ -21,9 +21,10 @@ import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"io"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/v7/pkg/lifecycle"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -102,29 +103,36 @@ func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) e
// GetBucketLifecycle fetch bucket lifecycle configuration
func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) {
lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName)
return lc, err
}
// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated
func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
return nil, time.Time{}, err
}
bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName)
bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName)
if err != nil {
return nil, err
return nil, time.Time{}, err
}
config := lifecycle.NewConfiguration()
if err = xml.Unmarshal(bucketLifecycle, config); err != nil {
return nil, err
return nil, time.Time{}, err
}
return config, nil
return config, updatedAt, nil
}
// Request server for current bucket lifecycle.
func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) {
func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("lifecycle", "")
urlValues.Set("withUpdatedAt", "true")
// Execute GET on bucket to get lifecycle.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
@@ -134,14 +142,28 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
defer closeResponse(resp)
if err != nil {
return nil, err
return nil, time.Time{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, bucketName, "")
return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
return ioutil.ReadAll(resp.Body)
lcBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, time.Time{}, err
}
const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt"
var updatedAt time.Time
if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" {
updatedAt, err = time.Parse(iso8601DateFormat, timeStr)
if err != nil {
return nil, time.Time{}, err
}
}
return lcBytes, updatedAt, nil
}

View File

@@ -166,6 +166,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
// Prepare urlValues to pass into the request on every loop
urlValues := make(url.Values)
urlValues.Set("ping", "10")
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
@@ -224,6 +225,12 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi
closeResponse(resp)
continue
}
// Empty events pinged from the server
if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil {
continue
}
// Send notificationInfo
select {
case notificationInfoCh <- notificationInfo:

View File

@@ -18,7 +18,7 @@ package minio
import (
"context"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string
}
}
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
bucketPolicyBuf, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}

View File

@@ -22,7 +22,7 @@ import (
"context"
"encoding/json"
"encoding/xml"
"io/ioutil"
"io"
"net/http"
"net/url"
"time"
@@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str
if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "")
}
respBytes, err := ioutil.ReadAll(resp.Body)
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return s, err
}
@@ -219,7 +219,7 @@ func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName
// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication
// is enabled in the replication config
func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return
@@ -289,3 +289,67 @@ func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketNam
}
return rinfo, nil
}
// GetBucketReplicationMetricsV2 fetches bucket replication status metrics
func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return s, err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-metrics", "2")
// Execute GET on bucket to get replication metrics.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return s, err
}
if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "")
}
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return s, err
}
if err := json.Unmarshal(respBytes, &s); err != nil {
return s, err
}
return s, nil
}
// CheckBucketReplication validates if replication is set up properly for a bucket
func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("replication-check", "")
// Execute GET on bucket to get replication config.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
return nil
}

View File

@@ -22,7 +22,6 @@ import (
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
@@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags
return nil, httpRespToErrorResponse(resp, bucketName, "")
}
defer io.Copy(ioutil.Discard, resp.Body)
defer io.Copy(io.Discard, resp.Body)
return tags.ParseBucketXML(resp.Body)
}

View File

@@ -21,7 +21,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -223,6 +222,9 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
@@ -284,8 +286,8 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc
return objInfo, nil
}
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset int64, length int64, metadata map[string]string,
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
headers := make(http.Header)
@@ -516,7 +518,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
return UploadInfo{}, err
}
if dst.Progress != nil {
io.CopyN(ioutil.Discard, dst.Progress, end-start+1)
io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
@@ -525,7 +527,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}

View File

@@ -20,7 +20,6 @@ package minio
import (
"context"
"io"
"io/ioutil"
"net/http"
)
@@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
// Update the progress properly after successful copy.
if dst.Progress != nil {
io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size))
io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
}
cpObjRes := copyObjectResult{}

View File

@@ -21,6 +21,8 @@ import (
"encoding/xml"
"io"
"net/http"
"net/url"
"strings"
"time"
)
@@ -43,14 +45,14 @@ type StringMap map[string]string
// if m is nil it can be initialized, which is often the case if m is
// nested in another xml structural. This is also why the first thing done
// on the first line is initialize it.
func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error {
*m = StringMap{}
type Item struct {
Key string
Value string
}
for {
var e Item
// Format is <key>value</key>
var e struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
err := d.Decode(&e)
if err == io.EOF {
break
@@ -58,11 +60,63 @@ func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if err != nil {
return err
}
(*m)[e.Key] = e.Value
(*m)[e.XMLName.Local] = e.Value
}
return nil
}
// URLMap represents map with custom UnmarshalXML
type URLMap map[string]string
// UnmarshalXML unmarshals the XML into a map of string to strings,
// creating a key in the map for each tag and setting it's value to the
// tags contents.
//
// The fact this function is on the pointer of Map is important, so that
// if m is nil it can be initialized, which is often the case if m is
// nested in another xml structural. This is also why the first thing done
// on the first line is initialize it.
func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error {
*m = URLMap{}
var tgs string
if err := d.DecodeElement(&tgs, &se); err != nil {
if err == io.EOF {
return nil
}
return err
}
for tgs != "" {
var key string
key, tgs, _ = stringsCut(tgs, "&")
if key == "" {
continue
}
key, value, _ := stringsCut(key, "=")
key, err := url.QueryUnescape(key)
if err != nil {
return err
}
value, err = url.QueryUnescape(value)
if err != nil {
return err
}
(*m)[key] = value
}
return nil
}
// stringsCut slices s around the first instance of sep,
// returning the text before and after sep.
// The found result reports whether sep appears in s.
// If sep does not appear in s, cut returns s, "", false.
func stringsCut(s, sep string) (before, after string, found bool) {
if i := strings.Index(s, sep); i >= 0 {
return s[:i], s[i+len(sep):], true
}
return s, "", false
}
// Owner name.
type Owner struct {
XMLName xml.Name `xml:"Owner" json:"owner"`
@@ -87,6 +141,8 @@ type UploadInfo struct {
ExpirationRuleID string
// Verified checksum values, if any.
// Values are base64 (standard) encoded.
// For multipart objects this is a checksum of the checksum of each part.
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
@@ -119,10 +175,12 @@ type ObjectInfo struct {
Metadata http.Header `json:"metadata" xml:"-"`
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
// Only returned by MinIO servers.
UserMetadata StringMap `json:"userMetadata,omitempty"`
// x-amz-tagging values in their k/v values.
UserTags map[string]string `json:"userTags"`
// Only returned by MinIO servers.
UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
// x-amz-tagging-count value
UserTagCount int
@@ -162,6 +220,11 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
Internal *struct {
K int // Data blocks
M int // Parity blocks
} `xml:"Internal"`
// Error
Err error `json:"-"`
}

View File

@@ -22,8 +22,8 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
/* **** SAMPLE ERROR RESPONSE ****
@@ -108,7 +108,7 @@ const (
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB)
const maxBodyLength = 1 << 20
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil {
return nil, err
}
@@ -189,6 +189,15 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
}
code := resp.Header.Get("x-minio-error-code")
if code != "" {
errResp.Code = code
}
desc := resp.Header.Get("x-minio-error-desc")
if desc != "" {
errResp.Message = strings.Trim(desc, `"`)
}
// Save hostID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {
@@ -253,26 +262,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
}
}
// errInvalidBucketName - Invalid bucket name response.
func errInvalidBucketName(message string) error {
return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidBucketName",
Message: message,
RequestID: "minio",
}
}
// errInvalidObjectName - Invalid object name response.
func errInvalidObjectName(message string) error {
return ErrorResponse{
StatusCode: http.StatusNotFound,
Code: "NoSuchKey",
Message: message,
RequestID: "minio",
}
}
// errInvalidArgument - Invalid argument response.
func errInvalidArgument(message string) error {
return ErrorResponse{

View File

@@ -23,8 +23,6 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -552,6 +550,8 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
}
}
newOffset := o.currOffset
// Switch through whence.
switch whence {
default:
@@ -560,12 +560,12 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
newOffset = offset
case 1:
if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
newOffset += offset
case 2:
// If we don't know the object size return an error for io.SeekEnd
if o.objectInfo.Size < 0 {
@@ -581,7 +581,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.objectInfo.Size+offset < 0 {
return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
}
o.currOffset = o.objectInfo.Size + offset
newOffset = o.objectInfo.Size + offset
}
// Reset the saved error since we successfully seeked, let the Read
// and ReadAt decide.
@@ -589,8 +589,9 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
o.prevErr = nil
}
// Ask lower level to fetch again from source
o.seekData = true
// Ask lower level to fetch again from source when necessary
o.seekData = (newOffset != o.currOffset) || o.seekData
o.currOffset = newOffset
// Return the effective offset.
return o.currOffset, nil
@@ -654,19 +655,11 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o
return nil, ObjectInfo{}, nil, err
}
urlValues := make(url.Values)
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
if opts.PartNumber > 0 {
urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber))
}
// Execute GET on objectName.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
queryValues: opts.toQueryValues(),
customHeader: opts.Header(),
contentSHA256Hex: emptySHA256Hex,
})

View File

@@ -20,6 +20,8 @@ package minio
import (
"fmt"
"net/http"
"net/url"
"strconv"
"time"
"github.com/minio/minio-go/v7/pkg/encrypt"
@@ -27,14 +29,16 @@ import (
// AdvancedGetOptions for internal use by MinIO server - not intended for client use.
type AdvancedGetOptions struct {
ReplicationDeleteMarker bool
ReplicationProxyRequest string
ReplicationDeleteMarker bool
IsReplicationReadyForDeleteMarker bool
ReplicationProxyRequest string
}
// GetObjectOptions are used to specify additional headers or options
// during GET requests.
type GetObjectOptions struct {
headers map[string]string
reqParams url.Values
ServerSideEncryption encrypt.ServerSide
VersionID string
PartNumber int
@@ -82,6 +86,34 @@ func (o *GetObjectOptions) Set(key, value string) {
o.headers[http.CanonicalHeaderKey(key)] = value
}
// SetReqParam - set request query string parameter
// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
// If an unsupported key is passed in, it will be ignored and nothing will be done.
func (o *GetObjectOptions) SetReqParam(key, value string) {
if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
// do nothing
return
}
if o.reqParams == nil {
o.reqParams = make(url.Values)
}
o.reqParams.Set(key, value)
}
// AddReqParam - add request query string parameter
// supported key: see supportedQueryValues and allowedCustomQueryPrefix.
// If an unsupported key is passed in, it will be ignored and nothing will be done.
func (o *GetObjectOptions) AddReqParam(key, value string) {
if !isCustomQueryValue(key) && !isStandardQueryValue(key) {
// do nothing
return
}
if o.reqParams == nil {
o.reqParams = make(url.Values)
}
o.reqParams.Add(key, value)
}
// SetMatchETag - set match etag.
func (o *GetObjectOptions) SetMatchETag(etag string) error {
if etag == "" {
@@ -148,3 +180,24 @@ func (o *GetObjectOptions) SetRange(start, end int64) error {
}
return nil
}
// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters.
func (o *GetObjectOptions) toQueryValues() url.Values {
urlValues := make(url.Values)
if o.VersionID != "" {
urlValues.Set("versionId", o.VersionID)
}
if o.PartNumber > 0 {
urlValues.Set("partNumber", strconv.Itoa(o.PartNumber))
}
if o.reqParams != nil {
for key, values := range o.reqParams {
for _, value := range values {
urlValues.Add(key, value)
}
}
}
return urlValues
}

View File

@@ -97,7 +97,15 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
defer func() {
if contextCanceled(ctx) {
objectStatCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(objectStatCh)
}()
// Save continuationToken for next request.
var continuationToken string
for {
@@ -168,7 +176,7 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
// ?delimiter - A delimiter is a character you use to group keys.
// ?start-after - Sets a marker to start listing lexically at this key onwards.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
@@ -304,7 +312,14 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh)
defer func() {
if contextCanceled(ctx) {
objectStatCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(objectStatCh)
}()
marker := opts.StartAfter
for {
@@ -321,6 +336,7 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
for _, object := range result.Contents {
// Save the marker.
marker = object.Key
object.ETag = trimEtag(object.ETag)
select {
// Send object content.
case objectStatCh <- object:
@@ -393,7 +409,14 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
// Initiate list objects goroutine here.
go func(resultCh chan<- ObjectInfo) {
defer close(resultCh)
defer func() {
if contextCanceled(ctx) {
resultCh <- ObjectInfo{
Err: ctx.Err(),
}
}
close(resultCh)
}()
var (
keyMarker = ""
@@ -402,7 +425,7 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers)
result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter)
if err != nil {
sendObjectInfo(ObjectInfo{
Err: err,
@@ -422,6 +445,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
IsLatest: version.IsLatest,
VersionID: version.VersionID,
IsDeleteMarker: version.isDeleteMarker,
UserTags: version.UserTags,
UserMetadata: version.UserMetadata,
Internal: version.Internal,
}
select {
// Send object version info.
@@ -474,13 +500,13 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) {
func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListVersionsResult{}, err
}
// Validate object prefix.
if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil {
if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil {
return ListVersionsResult{}, err
}
// Get resources properly escaped and lined up before
@@ -491,7 +517,7 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
urlValues.Set("versions", "")
// Set object prefix, prefix value to be set to empty is okay.
urlValues.Set("prefix", prefix)
urlValues.Set("prefix", opts.Prefix)
// Set delimiter, delimiter value to be set to empty is okay.
urlValues.Set("delimiter", delimiter)
@@ -502,8 +528,8 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
}
// Set max keys.
if maxkeys > 0 {
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
if opts.MaxKeys > 0 {
urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys))
}
// Set version ID marker
@@ -511,6 +537,10 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
urlValues.Set("version-id-marker", versionIDMarker)
}
if opts.WithMetadata {
urlValues.Set("metadata", "true")
}
// Always set encoding-type
urlValues.Set("encoding-type", "url")
@@ -519,7 +549,7 @@ func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
customHeader: opts.headers,
})
defer closeResponse(resp)
if err != nil {
@@ -692,6 +722,10 @@ func (o *ListObjectsOptions) Set(key, value string) {
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
// fmt.Println(object)
// }
//
// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
// caller must drain the channel entirely and wait until channel is closed before proceeding, without
// waiting on the channel to be closed completely you might leak goroutines.
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
if opts.WithVersions {
return c.listObjectVersions(ctx, bucketName, opts)
@@ -732,6 +766,16 @@ func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPr
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
}
// contextCanceled returns whether a context is canceled.
func contextCanceled(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
// listIncompleteUploads lists all incomplete uploads.
func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
@@ -759,7 +803,15 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr
return objectMultipartStatCh
}
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
defer close(objectMultipartStatCh)
defer func() {
if contextCanceled(ctx) {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: ctx.Err(),
}
}
close(objectMultipartStatCh)
}()
// object and upload ID marker for future requests.
var objectMarker string
var uploadIDMarker string
@@ -897,6 +949,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM
}
// listObjectParts list all object parts recursively.
//
//lint:ignore U1000 Keep this around
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int

View File

@@ -32,6 +32,12 @@ import (
// to update tag(s) of a specific object version
type PutObjectTaggingOptions struct {
VersionID string
Internal AdvancedObjectTaggingOptions
}
// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use.
type AdvancedObjectTaggingOptions struct {
ReplicationProxyRequest string
}
// PutObjectTagging replaces or creates object tag(s) and can target
@@ -50,7 +56,10 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
headers := make(http.Header, 0)
if opts.Internal.ReplicationProxyRequest != "" {
headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
}
reqBytes, err := xml.Marshal(otags)
if err != nil {
return err
@@ -63,6 +72,7 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
contentBody: bytes.NewReader(reqBytes),
contentLength: int64(len(reqBytes)),
contentMD5Base64: sumMD5Base64(reqBytes),
customHeader: headers,
}
// Execute PUT to set a object tagging.
@@ -83,6 +93,7 @@ func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName st
// to fetch the tagging key/value pairs
type GetObjectTaggingOptions struct {
VersionID string
Internal AdvancedObjectTaggingOptions
}
// GetObjectTagging fetches object tag(s) with options to target
@@ -96,12 +107,16 @@ func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName st
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
headers := make(http.Header, 0)
if opts.Internal.ReplicationProxyRequest != "" {
headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
}
// Execute GET on object to get object tag(s)
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: headers,
})
defer closeResponse(resp)
@@ -121,6 +136,7 @@ func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName st
// RemoveObjectTaggingOptions holds the version id of the object to remove
type RemoveObjectTaggingOptions struct {
VersionID string
Internal AdvancedObjectTaggingOptions
}
// RemoveObjectTagging removes object tag(s) with options to control a specific object
@@ -134,12 +150,16 @@ func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
headers := make(http.Header, 0)
if opts.Internal.ReplicationProxyRequest != "" {
headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest)
}
// Execute DELETE on object to remove object tag(s)
resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: headers,
})
defer closeResponse(resp)

View File

@@ -30,7 +30,7 @@ import (
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c *Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
// Input validation.
if method == "" {
return nil, errInvalidArgument("method cannot be empty.")
@@ -66,7 +66,7 @@ func (c *Client) presignURL(ctx context.Context, method string, bucketName strin
// data without credentials. URL can have a maximum expiry of
// upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
@@ -77,7 +77,7 @@ func (c *Client) PresignedGetObject(ctx context.Context, bucketName string, obje
// object metadata without credentials. URL can have a maximum expiry
// of upto 7days or a minimum of 1sec. Additionally you can override
// a set of response headers using the query parameters.
func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
@@ -87,7 +87,7 @@ func (c *Client) PresignedHeadObject(ctx context.Context, bucketName string, obj
// PresignedPutObject - Returns a presigned URL to upload an object
// without credentials. URL can have a maximum expiry of upto 7days
// or a minimum of 1sec.
func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) {
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
@@ -101,14 +101,14 @@ func (c *Client) PresignedPutObject(ctx context.Context, bucketName string, obje
//
// FIXME: The extra header parameter should be included in Presign() in the next
// major version bump, and this function should then be deprecated.
func (c *Client) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) {
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)
}
// Presign - returns a presigned URL for any http method of your choice along
// with custom request params and extra signed headers. URL can have a maximum
// expiry of upto 7days or a minimum of 1sec.
func (c *Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil)
}

View File

@@ -42,7 +42,7 @@ func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBuc
return err
}
func (c *Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) {
func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) {
defer func() {
// Save the location into cache on a successful makeBucket response.
if err == nil {

View File

@@ -68,7 +68,7 @@ func isReadAt(reader io.Reader) (ok bool) {
// maxPartsCount - 10000
// minPartSize - 16MiB
// maxMultipartPutObjectSize - 5TiB
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
// object size is '-1' set it to 5TiB.
var unknownSize bool
if objectSize == -1 {

View File

@@ -0,0 +1,164 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2023 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"encoding/json"
"errors"
"io"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/encrypt"
)
// PutObjectFanOutEntry is per object entry fan-out metadata
type PutObjectFanOutEntry struct {
Key string `json:"key"`
UserMetadata map[string]string `json:"metadata,omitempty"`
UserTags map[string]string `json:"tags,omitempty"`
ContentType string `json:"contentType,omitempty"`
ContentEncoding string `json:"contentEncoding,omitempty"`
ContentDisposition string `json:"contentDisposition,omitempty"`
ContentLanguage string `json:"contentLanguage,omitempty"`
CacheControl string `json:"cacheControl,omitempty"`
Retention RetentionMode `json:"retention,omitempty"`
RetainUntilDate *time.Time `json:"retainUntil,omitempty"`
}
// PutObjectFanOutRequest this is the request structure sent
// to the server to fan-out the stream to multiple objects.
type PutObjectFanOutRequest struct {
Entries []PutObjectFanOutEntry
Checksum Checksum
SSE encrypt.ServerSide
}
// PutObjectFanOutResponse this is the response structure sent
// by the server upon success or failure for each object
// fan-out keys. Additionally, this response carries ETag,
// VersionID and LastModified for each object fan-out.
type PutObjectFanOutResponse struct {
Key string `json:"key"`
ETag string `json:"etag,omitempty"`
VersionID string `json:"versionId,omitempty"`
LastModified *time.Time `json:"lastModified,omitempty"`
Error string `json:"error,omitempty"`
}
// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single
// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry
// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is
// mandatory, rest of the other options in PutObjectFanOutRequest are optional.
func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) {
if len(fanOutReq.Entries) == 0 {
return nil, errInvalidArgument("fan out requests cannot be empty")
}
policy := NewPostPolicy()
policy.SetBucket(bucket)
policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16))
// Expires in 15 minutes.
policy.SetExpires(time.Now().UTC().Add(15 * time.Minute))
// Set encryption headers if any.
policy.SetEncryption(fanOutReq.SSE)
// Set checksum headers if any.
policy.SetChecksum(fanOutReq.Checksum)
url, formData, err := c.PresignedPostPolicy(ctx, policy)
if err != nil {
return nil, err
}
r, w := io.Pipe()
req, err := http.NewRequest(http.MethodPost, url.String(), r)
if err != nil {
w.Close()
return nil, err
}
var b strings.Builder
enc := json.NewEncoder(&b)
for _, req := range fanOutReq.Entries {
if req.Key == "" {
w.Close()
return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty")
}
if err = enc.Encode(&req); err != nil {
w.Close()
return nil, err
}
}
mwriter := multipart.NewWriter(w)
req.Header.Add("Content-Type", mwriter.FormDataContentType())
go func() {
defer w.Close()
defer mwriter.Close()
for k, v := range formData {
if err := mwriter.WriteField(k, v); err != nil {
return
}
}
if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil {
return
}
mw, err := mwriter.CreateFormFile("file", "fanout-content")
if err != nil {
return
}
if _, err = io.Copy(mw, fanOutData); err != nil {
return
}
}()
resp, err := c.do(req)
if err != nil {
return nil, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, bucket, "fanout-content")
}
dec := json.NewDecoder(resp.Body)
fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries))
for dec.More() {
var m PutObjectFanOutResponse
if err = dec.Decode(&m); err != nil {
return nil, err
}
fanOutResp = append(fanOutResp, m)
}
return fanOutResp, nil
}

View File

@@ -26,7 +26,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"sort"
@@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{}
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
@@ -386,6 +387,13 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
return UploadInfo{}, err
}
headers := opts.Header()
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
@@ -395,7 +403,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
contentBody: completeMultipartUploadBuffer,
contentLength: int64(len(completeMultipartUploadBytes)),
contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
customHeader: opts.Header(),
customHeader: headers,
}
// Execute POST to complete multipart upload for an objectName.
@@ -412,7 +420,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
// Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte
b, err = ioutil.ReadAll(resp.Body)
b, err = io.ReadAll(resp.Body)
if err != nil {
return UploadInfo{}, err
}
@@ -448,5 +456,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
Location: completeMultipartUploadResult.Location,
Expiration: expTime,
ExpirationRuleID: ruleID,
ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256,
ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1,
ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32,
ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C,
}, nil
}

View File

@@ -28,6 +28,7 @@ import (
"net/url"
"sort"
"strings"
"sync"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -44,7 +45,9 @@ import (
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions,
) (info UploadInfo, err error) {
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
} else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else {
@@ -190,7 +193,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
}
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
var trailer = make(http.Header, 1)
trailer := make(http.Header, 1)
if withChecksum {
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
@@ -200,7 +203,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
}
// Proceed to upload the part.
p := uploadPartParams{bucketName: bucketName,
p := uploadPartParams{
bucketName: bucketName,
objectName: objectName,
uploadID: uploadID,
reader: sectionReader,
@@ -241,7 +245,6 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
return UploadInfo{}, ctx.Err()
case uploadRes := <-uploadedPartsCh:
if uploadRes.Error != nil {
return UploadInfo{}, uploadRes.Error
}
@@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if withChecksum {
// Add hash of hashes.
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
@@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
@@ -425,6 +431,212 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
reader io.Reader, opts PutObjectOptions,
) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
if !opts.SendContentMd5 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
}
// Cancel all when an error occurs.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
if err != nil {
return UploadInfo{}, err
}
// Initiates a new multipart request
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return UploadInfo{}, err
}
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
// Aborts the multipart upload if the function returns
// any error, since we do not resume we should purge
// the parts which have been uploaded to relinquish
// storage space.
defer func() {
if err != nil {
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Create a buffer.
nBuffers := int64(opts.NumThreads)
bufs := make(chan []byte, nBuffers)
all := make([]byte, nBuffers*partSize)
for i := int64(0); i < nBuffers; i++ {
bufs <- all[i*partSize : i*partSize+partSize]
}
var wg sync.WaitGroup
var mu sync.Mutex
errCh := make(chan error, opts.NumThreads)
reader = newHook(reader, opts.Progress)
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Proceed to upload the part.
var buf []byte
select {
case buf = <-bufs:
case err = <-errCh:
cancel()
wg.Wait()
return UploadInfo{}, err
}
if int64(len(buf)) != partSize {
return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
}
length, rerr := readFull(reader, buf)
if rerr == io.EOF && partNumber > 1 {
// Done
break
}
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
cancel()
wg.Wait()
return UploadInfo{}, rerr
}
// Calculate md5sum.
customHeader := make(http.Header)
if !opts.SendContentMd5 {
// Add CRC32C instead.
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
wg.Add(1)
go func(partNumber int) {
// Avoid declaring variables in the for loop
var md5Base64 string
if opts.SendContentMd5 {
md5Hash := c.md5Hasher()
md5Hash.Write(buf[:length])
md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
md5Hash.Close()
}
defer wg.Done()
p := uploadPartParams{
bucketName: bucketName,
objectName: objectName,
uploadID: uploadID,
reader: bytes.NewReader(buf[:length]),
partNumber: partNumber,
md5Base64: md5Base64,
size: int64(length),
sse: opts.ServerSideEncryption,
streamSha256: !opts.DisableContentSha256,
customHeader: customHeader,
}
objPart, uerr := c.uploadPart(ctx, p)
if uerr != nil {
errCh <- uerr
return
}
// Save successfully uploaded part metadata.
mu.Lock()
partsInfo[partNumber] = objPart
mu.Unlock()
// Send buffer back so it can be reused.
bufs <- buf
}(partNumber)
// Save successfully uploaded size.
totalUploadedSize += int64(length)
}
wg.Wait()
// Collect any error
select {
case err = <-errCh:
return UploadInfo{}, err
default:
}
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{}
if len(crcBytes) > 0 {
// Add hash of hashes.
@@ -530,6 +742,17 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
// Set headers.
customHeader := opts.Header()
// Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
if addCrc {
// If user has added checksums, don't add them ourselves.
for k := range opts.UserMetadata {
if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
addCrc = false
}
}
}
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
@@ -540,6 +763,7 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex,
streamSha256: !opts.DisableContentSha256,
addCrc: addCrc,
}
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {

View File

@@ -56,14 +56,15 @@ func (r ReplicationStatus) Empty() bool {
// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition
// implementation on MinIO server
type AdvancedPutOptions struct {
SourceVersionID string
SourceETag string
ReplicationStatus ReplicationStatus
SourceMTime time.Time
ReplicationRequest bool
RetentionTimestamp time.Time
TaggingTimestamp time.Time
LegalholdTimestamp time.Time
SourceVersionID string
SourceETag string
ReplicationStatus ReplicationStatus
SourceMTime time.Time
ReplicationRequest bool
RetentionTimestamp time.Time
TaggingTimestamp time.Time
LegalholdTimestamp time.Time
ReplicationValidityCheck bool
}
// PutObjectOptions represents options specified by user for PutObject call
@@ -76,6 +77,7 @@ type PutObjectOptions struct {
ContentDisposition string
ContentLanguage string
CacheControl string
Expires time.Time
Mode RetentionMode
RetainUntilDate time.Time
ServerSideEncryption encrypt.ServerSide
@@ -87,7 +89,34 @@ type PutObjectOptions struct {
SendContentMd5 bool
DisableContentSha256 bool
DisableMultipart bool
Internal AdvancedPutOptions
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
// fill them serially and upload them in parallel.
// This can be used for faster uploads on non-seekable or slow-to-seek input.
ConcurrentStreamParts bool
Internal AdvancedPutOptions
customHeaders http.Header
}
// SetMatchETag if etag matches while PUT MinIO returns an error
// this is a MinIO specific extension to support optimistic locking
// semantics.
func (opts *PutObjectOptions) SetMatchETag(etag string) {
if opts.customHeaders == nil {
opts.customHeaders = http.Header{}
}
opts.customHeaders.Set("If-Match", "\""+etag+"\"")
}
// SetMatchETagExcept if etag does not match while PUT MinIO returns an
// error this is a MinIO specific extension to support optimistic locking
// semantics.
func (opts *PutObjectOptions) SetMatchETagExcept(etag string) {
if opts.customHeaders == nil {
opts.customHeaders = http.Header{}
}
opts.customHeaders.Set("If-None-Match", "\""+etag+"\"")
}
// getNumThreads - gets the number of threads to be used in the multipart
@@ -125,6 +154,10 @@ func (opts PutObjectOptions) Header() (header http.Header) {
header.Set("Cache-Control", opts.CacheControl)
}
if !opts.Expires.IsZero() {
header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat))
}
if opts.Mode != "" {
header.Set(amzLockMode, opts.Mode.String())
}
@@ -161,6 +194,9 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.Internal.ReplicationRequest {
header.Set(minIOBucketReplicationRequest, "true")
}
if opts.Internal.ReplicationValidityCheck {
header.Set(minIOBucketReplicationCheck, "true")
}
if !opts.Internal.LegalholdTimestamp.IsZero() {
header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
@@ -182,6 +218,12 @@ func (opts PutObjectOptions) Header() (header http.Header) {
header.Set("x-amz-meta-"+k, v)
}
}
// set any other additional custom headers.
for k, v := range opts.customHeaders {
header[k] = v
}
return
}
@@ -272,6 +314,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if opts.DisableMultipart {
return UploadInfo{}, errors.New("no length provided and multipart disabled")
}
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
}
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}

View File

@@ -24,7 +24,7 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"sync"
@@ -49,6 +49,10 @@ type SnowballOptions struct {
// Compression will typically reduce memory and network usage,
// Compression can safely be enabled with MinIO hosts.
Compress bool
// SkipErrs if enabled will skip any errors while reading the
// object content while creating the snowball archive
SkipErrs bool
}
// SnowballObject contains information about a single object to be added to the snowball.
@@ -60,12 +64,21 @@ type SnowballObject struct {
Size int64
// Modtime to apply to the object.
// If Modtime is the zero value current time will be used.
ModTime time.Time
// Content of the object.
// Exactly 'Size' number of bytes must be provided.
Content io.Reader
// VersionID of the object; if empty, a new versionID will be generated
VersionID string
// Headers contains more options for this object upload, the same as you
// would include in a regular PutObject operation, such as user metadata
// and content-disposition, expires, ..
Headers http.Header
// Close will be called when an object has finished processing.
// Note that if PutObjectsSnowball returns because of an error,
// objects not consumed from the input will NOT have been closed.
@@ -107,7 +120,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
}
} else {
f, err := ioutil.TempFile("", "s3-putsnowballobjects-*")
f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
if err != nil {
return err
}
@@ -173,6 +186,18 @@ objectLoop:
ModTime: obj.ModTime,
Format: tar.FormatPAX,
}
if header.ModTime.IsZero() {
header.ModTime = time.Now().UTC()
}
header.PAXRecords = make(map[string]string)
if obj.VersionID != "" {
header.PAXRecords["minio.versionId"] = obj.VersionID
}
for k, vals := range obj.Headers {
header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",")
}
if err := t.WriteHeader(&header); err != nil {
closeObj()
return err
@@ -180,10 +205,16 @@ objectLoop:
n, err := io.Copy(t, obj.Content)
if err != nil {
closeObj()
if opts.SkipErrs {
continue
}
return err
}
if n != obj.Size {
closeObj()
if opts.SkipErrs {
continue
}
return io.ErrUnexpectedEOF
}
closeObj()

View File

@@ -112,10 +112,11 @@ func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error {
// AdvancedRemoveOptions intended for internal use by replication
type AdvancedRemoveOptions struct {
ReplicationDeleteMarker bool
ReplicationStatus ReplicationStatus
ReplicationMTime time.Time
ReplicationRequest bool
ReplicationDeleteMarker bool
ReplicationStatus ReplicationStatus
ReplicationMTime time.Time
ReplicationRequest bool
ReplicationValidityCheck bool // check permissions
}
// RemoveObjectOptions represents options specified by user for RemoveObject call
@@ -168,6 +169,9 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string
if opts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if opts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if opts.ForceDelete {
headers.Set(minIOForceDelete, "true")
}
@@ -235,7 +239,7 @@ func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
// and return the success/failure result status for each object
func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, resultCh chan<- RemoveObjectResult) {
func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) {
// Parse multi delete XML response
rmResult := &deleteMultiObjectsResult{}
err := xmlDecoder(body, rmResult)
@@ -459,7 +463,7 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
}
// Process multiobjects remove xml response
processRemoveMultiObjectsResponse(resp.Body, batch, resultCh)
processRemoveMultiObjectsResponse(resp.Body, resultCh)
closeResponse(resp)
}

View File

@@ -85,6 +85,19 @@ type Version struct {
StorageClass string
VersionID string `xml:"VersionId"`
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
// Only returned by MinIO servers.
UserMetadata StringMap `json:"userMetadata,omitempty"`
// x-amz-tagging values in their k/v values.
// Only returned by MinIO servers.
UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
Internal *struct {
K int // Data blocks
M int // Parity blocks
} `xml:"Internal"`
isDeleteMarker bool
}
@@ -110,7 +123,7 @@ type ListVersionsResult struct {
// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom
// code will unmarshal <Version> and <DeleteMarker> tags and save them in Versions field to
// preserve the lexical order of the listing.
func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) {
for {
// Read tokens from the XML document in a stream.
t, err := d.Token()
@@ -316,8 +329,6 @@ type completeMultipartUploadResult struct {
// CompletePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload.
type CompletePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part.
PartNumber int
ETag string

View File

@@ -41,8 +41,8 @@ type CSVFileHeaderInfo string
// Constants for file header info.
const (
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
CSVFileHeaderInfoIgnore = "IGNORE"
CSVFileHeaderInfoUse = "USE"
CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
)
// SelectCompressionType - is the parameter for what type of compression is
@@ -52,15 +52,15 @@ type SelectCompressionType string
// Constants for compression types under select API.
const (
SelectCompressionNONE SelectCompressionType = "NONE"
SelectCompressionGZIP = "GZIP"
SelectCompressionBZIP = "BZIP2"
SelectCompressionGZIP SelectCompressionType = "GZIP"
SelectCompressionBZIP SelectCompressionType = "BZIP2"
// Non-standard compression schemes, supported by MinIO hosts:
SelectCompressionZSTD = "ZSTD" // Zstandard compression.
SelectCompressionLZ4 = "LZ4" // LZ4 Stream
SelectCompressionS2 = "S2" // S2 Stream
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
)
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
@@ -69,7 +69,7 @@ type CSVQuoteFields string
// Constants for csv quote styles.
const (
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
CSVQuoteFieldsAsNeeded = "AsNeeded"
CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
)
// QueryExpressionType - is of what syntax the expression is, this should only
@@ -87,7 +87,7 @@ type JSONType string
// Constants for JSONTypes.
const (
JSONDocumentType JSONType = "DOCUMENT"
JSONLinesType = "LINES"
JSONLinesType JSONType = "LINES"
)
// ParquetInputOptions parquet input specific options
@@ -378,8 +378,8 @@ type SelectObjectType string
// Constants for input data types.
const (
SelectObjectTypeCSV SelectObjectType = "CSV"
SelectObjectTypeJSON = "JSON"
SelectObjectTypeParquet = "Parquet"
SelectObjectTypeJSON SelectObjectType = "JSON"
SelectObjectTypeParquet SelectObjectType = "Parquet"
)
// preludeInfo is used for keeping track of necessary information from the
@@ -416,7 +416,7 @@ type messageType string
const (
errorMsg messageType = "error"
commonMsg = "event"
commonMsg messageType = "event"
)
// eventType represents the type of event.
@@ -425,9 +425,9 @@ type eventType string
// list of event-types returned by Select API.
const (
endEvent eventType = "End"
recordsEvent = "Records"
progressEvent = "Progress"
statsEvent = "Stats"
recordsEvent eventType = "Records"
progressEvent eventType = "Progress"
statsEvent eventType = "Stats"
)
// contentType represents content type of event.

View File

@@ -20,7 +20,6 @@ package minio
import (
"context"
"net/http"
"net/url"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
@@ -57,7 +56,8 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err
return true, nil
}
// StatObject verifies if object exists and you have permission to access.
// StatObject verifies if object exists, you have permission to access it
// and returns information about the object.
func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@@ -70,16 +70,15 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string,
if opts.Internal.ReplicationDeleteMarker {
headers.Set(minIOBucketReplicationDeleteMarker, "true")
}
urlValues := make(url.Values)
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
if opts.Internal.IsReplicationReadyForDeleteMarker {
headers.Set(isMinioTgtReplicationReady, "true")
}
// Execute HEAD on objectName.
resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
queryValues: opts.toQueryValues(),
contentSHA256Hex: emptySHA256Hex,
customHeader: headers,
})

View File

@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2018 MinIO, Inc.
* Copyright 2015-2023 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,11 +25,11 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/cookiejar"
"net/http/httptrace"
"net/http/httputil"
"net/url"
"os"
@@ -70,6 +70,7 @@ type Client struct {
// Needs allocation.
httpClient *http.Client
httpTrace *httptrace.ClientTrace
bucketLocCache *bucketLocationCache
// Advanced functionality.
@@ -104,9 +105,16 @@ type Options struct {
Creds *credentials.Credentials
Secure bool
Transport http.RoundTripper
Trace *httptrace.ClientTrace
Region string
BucketLookup BucketLookupType
// Allows setting a custom region lookup based on URL pattern
// not all URL patterns are covered by this library so if you
// have a custom endpoints with many regions you can use this
// function to perform region lookups appropriately.
CustomRegionViaURL func(u url.URL) string
// TrailingHeaders indicates server support of trailing headers.
// Only supported for v4 signatures.
TrailingHeaders bool
@@ -119,7 +127,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.42"
libraryVersion = "v7.0.66"
)
// User Agent should always following the below style.
@@ -150,10 +158,6 @@ func New(endpoint string, opts *Options) (*Client, error) {
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV2
}
// If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV4
@@ -224,6 +228,8 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
}
}
clnt.httpTrace = opts.Trace
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Jar: jar,
@@ -235,7 +241,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
// Sets custom region, if region is empty bucket location cache is used automatically.
if opts.Region == "" {
opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
if opts.CustomRegionViaURL != nil {
opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL)
} else {
opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL)
}
}
clnt.region = opts.Region
@@ -269,7 +279,7 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
}
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
func (c *Client) SetAppInfo(appName, appVersion string) {
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo.appName = appName
@@ -354,7 +364,8 @@ const (
online = 1
)
// IsOnline returns true if healthcheck enabled and client is online
// IsOnline returns true if healthcheck enabled and client is online.
// If HealthCheck function has not been called this will always return true.
func (c *Client) IsOnline() bool {
return !c.IsOffline()
}
@@ -365,22 +376,37 @@ func (c *Client) markOffline() {
}
// IsOffline returns true if healthcheck enabled and client is offline
// If HealthCheck function has not been called this will always return false.
func (c *Client) IsOffline() bool {
return atomic.LoadInt32(&c.healthStatus) == offline
}
// HealthCheck starts a healthcheck to see if endpoint is up. Returns a context cancellation function
// and and error if health check is already started
// HealthCheck starts a healthcheck to see if endpoint is up.
// Returns a context cancellation function, to stop the health check,
// and an error if health check is already started.
func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) {
if atomic.LoadInt32(&c.healthStatus) == online {
if atomic.LoadInt32(&c.healthStatus) != unknown {
return nil, fmt.Errorf("health check is running")
}
if hcDuration < 1*time.Second {
return nil, fmt.Errorf("health check duration should be atleast 1 second")
return nil, fmt.Errorf("health check duration should be at least 1 second")
}
ctx, cancelFn := context.WithCancel(context.Background())
atomic.StoreInt32(&c.healthStatus, online)
probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-")
ctx, cancelFn := context.WithCancel(context.Background())
atomic.StoreInt32(&c.healthStatus, offline)
{
// Change to online, if we can connect.
gctx, gcancel := context.WithTimeout(ctx, 3*time.Second)
_, err := c.getBucketLocation(gctx, probeBucketName)
gcancel()
if !IsNetworkOrHostDown(err, false) {
switch ToErrorResponse(err).Code {
case "NoSuchBucket", "AccessDenied", "":
atomic.CompareAndSwapInt32(&c.healthStatus, offline, online)
}
}
}
go func(duration time.Duration) {
timer := time.NewTimer(duration)
defer timer.Stop()
@@ -635,7 +661,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
errBodyBytes, err := io.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
@@ -644,14 +670,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker)
res.Body = io.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = ioutil.NopCloser(errBodySeeker)
res.Body = io.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
@@ -746,6 +772,10 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
return nil, err
}
if c.httpTrace != nil {
ctx = httptrace.WithClientTrace(ctx, c.httpTrace)
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil)
if err != nil {
@@ -814,7 +844,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
if metadata.contentLength == 0 {
req.Body = nil
} else {
req.Body = ioutil.NopCloser(metadata.contentBody)
req.Body = io.NopCloser(metadata.contentBody)
}
// Set incoming content-length.
@@ -846,7 +876,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
// Additionally, we also look if the initialized client is secure,
// if yes then we don't need to perform streaming signature.
req = signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
@@ -910,7 +940,7 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
if ip := net.ParseIP(h); ip != nil && ip.To16() != nil {
if ip := net.ParseIP(h); ip != nil && ip.To4() == nil {
host = "[" + h + "]"
}
}

View File

@@ -58,7 +58,7 @@ func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool)
}
// Set - Will persist a value into cache.
func (r *bucketLocationCache) Set(bucketName string, location string) {
func (r *bucketLocationCache) Set(bucketName, location string) {
r.Lock()
defer r.Unlock()
r.items[bucketName] = location
@@ -190,12 +190,11 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
}
}
isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName)
isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName)
var urlStr string
// only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint
if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) {
if isVirtualStyle {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location"
} else {
targetURL.Path = path.Join(bucketName, "") + "/"
@@ -241,9 +240,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string
}
if signerType.IsV2() {
// Get Bucket Location calls should be always path style
isVirtualHost := false
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle)
return req, nil
}

210
vendor/github.com/minio/minio-go/v7/checksum.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2023 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"hash"
"hash/crc32"
"io"
"math/bits"
)
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
const (
// ChecksumSHA256 indicates a SHA256 checksum.
ChecksumSHA256 ChecksumType = 1 << iota
// ChecksumSHA1 indicates a SHA-1 checksum.
ChecksumSHA1
// ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
ChecksumCRC32
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
ChecksumCRC32C
// Keep after all valid checksums
checksumLast
// checksumMask is a mask for valid checksum types.
checksumMask = checksumLast - 1
// ChecksumNone indicates no checksum.
ChecksumNone ChecksumType = 0
amzChecksumAlgo = "x-amz-checksum-algorithm"
amzChecksumCRC32 = "x-amz-checksum-crc32"
amzChecksumCRC32C = "x-amz-checksum-crc32c"
amzChecksumSHA1 = "x-amz-checksum-sha1"
amzChecksumSHA256 = "x-amz-checksum-sha256"
)
// Is returns if c is all of t.
func (c ChecksumType) Is(t ChecksumType) bool {
return c&t == t
}
// Key returns the header key.
// returns empty string if invalid or none.
func (c ChecksumType) Key() string {
switch c & checksumMask {
case ChecksumCRC32:
return amzChecksumCRC32
case ChecksumCRC32C:
return amzChecksumCRC32C
case ChecksumSHA1:
return amzChecksumSHA1
case ChecksumSHA256:
return amzChecksumSHA256
}
return ""
}
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch c & checksumMask {
case ChecksumCRC32, ChecksumCRC32C:
return 4
case ChecksumSHA1:
return sha1.Size
case ChecksumSHA256:
return sha256.Size
}
return 0
}
// Hasher returns a hasher corresponding to the checksum type.
// Returns nil if no checksum.
func (c ChecksumType) Hasher() hash.Hash {
switch c & checksumMask {
case ChecksumCRC32:
return crc32.NewIEEE()
case ChecksumCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
case ChecksumSHA1:
return sha1.New()
case ChecksumSHA256:
return sha256.New()
}
return nil
}
// IsSet returns whether the type is valid and known.
func (c ChecksumType) IsSet() bool {
return bits.OnesCount32(uint32(c)) == 1
}
// String returns the type as a string.
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
// Empty string for unset and "<invalid>" if not valid.
func (c ChecksumType) String() string {
switch c & checksumMask {
case ChecksumCRC32:
return "CRC32"
case ChecksumCRC32C:
return "CRC32C"
case ChecksumSHA1:
return "SHA1"
case ChecksumSHA256:
return "SHA256"
case ChecksumNone:
return ""
}
return "<invalid>"
}
// ChecksumReader reads all of r and returns a checksum of type c.
// Returns any error that may have occurred while reading.
func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) {
h := c.Hasher()
if h == nil {
return Checksum{}, nil
}
_, err := io.Copy(h, r)
if err != nil {
return Checksum{}, err
}
return NewChecksum(c, h.Sum(nil)), nil
}
// ChecksumBytes returns a checksum of the content b with type c.
func (c ChecksumType) ChecksumBytes(b []byte) Checksum {
h := c.Hasher()
if h == nil {
return Checksum{}
}
n, err := h.Write(b)
if err != nil || n != len(b) {
// Shouldn't happen with these checksummers.
return Checksum{}
}
return NewChecksum(c, h.Sum(nil))
}
// Checksum is a type and encoded value.
type Checksum struct {
Type ChecksumType
r []byte
}
// NewChecksum sets the checksum to the value of b,
// which is the raw hash output.
// If the length of c does not match t.RawByteLen,
// a checksum with ChecksumNone is returned.
func NewChecksum(t ChecksumType, b []byte) Checksum {
if t.IsSet() && len(b) == t.RawByteLen() {
return Checksum{Type: t, r: b}
}
return Checksum{}
}
// NewChecksumString sets the checksum to the value of s,
// which is the base 64 encoded raw hash output.
// If the length of c does not match t.RawByteLen, it is not added.
func NewChecksumString(t ChecksumType, s string) Checksum {
b, _ := base64.StdEncoding.DecodeString(s)
if t.IsSet() && len(b) == t.RawByteLen() {
return Checksum{Type: t, r: b}
}
return Checksum{}
}
// IsSet returns whether the checksum is valid and known.
func (c Checksum) IsSet() bool {
return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen()
}
// Encoded returns the encoded value.
// Returns the empty string if not set or valid.
func (c Checksum) Encoded() string {
if !c.IsSet() {
return ""
}
return base64.StdEncoding.EncodeToString(c.r)
}
// Raw returns the raw checksum value if set.
func (c Checksum) Raw() []byte {
if !c.IsSet() {
return nil
}
return c.r
}

View File

@@ -94,6 +94,8 @@ const (
minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker"
minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request"
minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request"
minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check"
// Header indicates last tag update time on source
minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp"
// Header indicates last retention update time on source
@@ -103,4 +105,6 @@ const (
minIOForceDelete = "x-minio-force-delete"
// Header indicates delete marker replication request can be sent by source now.
minioTgtReplicationReady = "X-Minio-Replication-Ready"
// Header asks if delete marker replication request can be sent by source now.
isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready"
)

View File

@@ -62,7 +62,7 @@ func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBu
// CopyObjectPart - creates a part in a multipart upload by copying (a
// part of) an existing object.
func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
@@ -86,34 +86,45 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke
return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
}
// PutObjectPartOptions contains options for PutObjectPart API
type PutObjectPartOptions struct {
Md5Base64, Sha256Hex string
SSE encrypt.ServerSide
CustomHeader, Trailer http.Header
}
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int,
data io.Reader, size int64, opts PutObjectPartOptions,
) (ObjectPart, error) {
p := uploadPartParams{
bucketName: bucket,
objectName: object,
uploadID: uploadID,
reader: data,
partNumber: partID,
md5Base64: md5Base64,
sha256Hex: sha256Hex,
md5Base64: opts.Md5Base64,
sha256Hex: opts.Sha256Hex,
size: size,
sse: sse,
sse: opts.SSE,
streamSha256: true,
customHeader: opts.CustomHeader,
trailer: opts.Trailer,
}
return c.uploadPart(ctx, p)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) {
return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) {
func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) {
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
}, opts)
return res.ETag, err
return res, err
}
// AbortMultipartUpload - Abort an incomplete upload.

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,6 @@ import (
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -94,7 +93,8 @@ type STSAssumeRoleOptions struct {
AccessKey string
SecretKey string
Policy string // Optional to assign a policy to the assumed role
SessionToken string // Optional if the first request is made with temporary credentials.
Policy string // Optional to assign a policy to the assumed role
Location string // Optional commonly needed with AWS STS.
DurationSeconds int // Optional defaults to 1 hour.
@@ -102,6 +102,7 @@ type STSAssumeRoleOptions struct {
// Optional only valid if using with AWS STS
RoleARN string
RoleSessionName string
ExternalID string
}
// NewSTSAssumeRole returns a pointer to a new
@@ -139,7 +140,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -162,6 +163,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
if opts.Policy != "" {
v.Set("Policy", opts.Policy)
}
if opts.ExternalID != "" {
v.Set("ExternalId", opts.ExternalID)
}
u, err := url.Parse(endpoint)
if err != nil {
@@ -182,6 +186,9 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
if opts.SessionToken != "" {
req.Header.Set("X-Amz-Security-Token", opts.SessionToken)
}
req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
resp, err := clnt.Do(req)
@@ -191,7 +198,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body)
buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleResponse{}, err
}

View File

@@ -0,0 +1,7 @@
{
"Version": 1,
"SessionToken": "token",
"AccessKeyId": "accessKey",
"SecretAccessKey": "secret",
"Expiration": "9999-04-27T16:02:25.000Z"
}

View File

@@ -10,3 +10,6 @@ aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret
[with_process]
credential_process = /bin/cat credentials.json

View File

@@ -22,7 +22,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
)
// ErrorResponse - Is the typed error returned.
@@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error {
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB)
const maxBodyLength = 1 << 20
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil {
return nil, err
}

View File

@@ -18,17 +18,33 @@
package credentials
import (
"encoding/json"
"errors"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
ini "gopkg.in/ini.v1"
)
// A externalProcessCredentials stores the output of a credential_process
type externalProcessCredentials struct {
Version int
SessionToken string
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string
Expiration time.Time
}
// A FileAWSCredentials retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type FileAWSCredentials struct {
Expiry
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
@@ -48,7 +64,7 @@ type FileAWSCredentials struct {
// NewFileAWSCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewFileAWSCredentials(filename string, profile string) *Credentials {
func NewFileAWSCredentials(filename, profile string) *Credentials {
return New(&FileAWSCredentials{
Filename: filename,
Profile: profile,
@@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
// Default to empty string if not found.
token := iniProfile.Key("aws_session_token")
// If credential_process is defined, obtain credentials by executing
// the external process
credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String())
if credentialProcess != "" {
args := strings.Fields(credentialProcess)
if len(args) <= 1 {
return Value{}, errors.New("invalid credential process args")
}
cmd := exec.Command(args[0], args[1:]...)
out, err := cmd.Output()
if err != nil {
return Value{}, err
}
var externalProcessCredentials externalProcessCredentials
err = json.Unmarshal([]byte(out), &externalProcessCredentials)
if err != nil {
return Value{}, err
}
p.retrieved = true
p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: externalProcessCredentials.AccessKeyID,
SecretAccessKey: externalProcessCredentials.SecretAccessKey,
SessionToken: externalProcessCredentials.SessionToken,
SignerType: SignatureV4,
}, nil
}
p.retrieved = true
return Value{
AccessKeyID: id.String(),
@@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) {
}, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *FileAWSCredentials) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.

View File

@@ -18,7 +18,6 @@
package credentials
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -50,7 +49,7 @@ type FileMinioClient struct {
// NewFileMinioClient returns a pointer to a new Credentials object
// wrapping the Alias file provider.
func NewFileMinioClient(filename string, alias string) *Credentials {
func NewFileMinioClient(filename, alias string) *Credentials {
return New(&FileMinioClient{
Filename: filename,
Alias: alias,
@@ -114,6 +113,7 @@ type hostConfig struct {
type config struct {
Version string `json:"version"`
Hosts map[string]hostConfig `json:"hosts"`
Aliases map[string]hostConfig `json:"aliases"`
}
// loadAliass loads from the file pointed to by shared credentials filename for alias.
@@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) {
cfg := &config{}
json := jsoniter.ConfigCompatibleWithStandardLibrary
configBytes, err := ioutil.ReadFile(filename)
configBytes, err := os.ReadFile(filename)
if err != nil {
return hostConfig{}, err
}
if err = json.Unmarshal(configBytes, cfg); err != nil {
return hostConfig{}, err
}
if cfg.Version == "10" {
return cfg.Aliases[alias], nil
}
return cfg.Hosts[alias], nil
}

View File

@@ -22,7 +22,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"net/url"
@@ -54,19 +54,36 @@ type IAM struct {
// Custom endpoint to fetch IAM role credentials.
Endpoint string
// Region configurable custom region for STS
Region string
// Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html
Container struct {
AuthorizationToken string
CredentialsFullURI string
CredentialsRelativeURI string
}
// EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html
EKSIdentity struct {
TokenFile string
RoleARN string
RoleSessionName string
}
}
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
const (
defaultIAMRoleEndpoint = "http://169.254.169.254"
defaultECSRoleEndpoint = "http://169.254.170.2"
defaultSTSRoleEndpoint = "https://sts.amazonaws.com"
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
tokenPath = "/latest/api/token"
tokenTTL = "21600"
tokenRequestHeader = "X-aws-ec2-metadata-token"
DefaultIAMRoleEndpoint = "http://169.254.169.254"
DefaultECSRoleEndpoint = "http://169.254.170.2"
DefaultSTSRoleEndpoint = "https://sts.amazonaws.com"
DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
TokenPath = "/latest/api/token"
TokenTTL = "21600"
TokenRequestHeader = "X-aws-ec2-metadata-token"
)
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
@@ -84,21 +101,55 @@ func NewIAM(endpoint string) *Credentials {
// the desired
func (m *IAM) Retrieve() (Value, error) {
token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN")
if token == "" {
token = m.Container.AuthorizationToken
}
relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
if relativeURI == "" {
relativeURI = m.Container.CredentialsRelativeURI
}
fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
if fullURI == "" {
fullURI = m.Container.CredentialsFullURI
}
identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")
if identityFile == "" {
identityFile = m.EKSIdentity.TokenFile
}
roleArn := os.Getenv("AWS_ROLE_ARN")
if roleArn == "" {
roleArn = m.EKSIdentity.RoleARN
}
roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME")
if roleSessionName == "" {
roleSessionName = m.EKSIdentity.RoleSessionName
}
region := os.Getenv("AWS_REGION")
if region == "" {
region = m.Region
}
var roleCreds ec2RoleCredRespBody
var err error
endpoint := m.Endpoint
switch {
case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0:
case identityFile != "":
if len(endpoint) == 0 {
if len(os.Getenv("AWS_REGION")) > 0 {
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn"
if region != "" {
if strings.HasPrefix(region, "cn-") {
endpoint = "https://sts." + region + ".amazonaws.com.cn"
} else {
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com"
endpoint = "https://sts." + region + ".amazonaws.com"
}
} else {
endpoint = defaultSTSRoleEndpoint
endpoint = DefaultSTSRoleEndpoint
}
}
@@ -106,15 +157,15 @@ func (m *IAM) Retrieve() (Value, error) {
Client: m.Client,
STSEndpoint: endpoint,
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
token, err := os.ReadFile(identityFile)
if err != nil {
return nil, err
}
return &WebIdentityToken{Token: string(token)}, nil
},
RoleARN: os.Getenv("AWS_ROLE_ARN"),
roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
RoleARN: roleArn,
roleSessionName: roleSessionName,
}
stsWebIdentityCreds, err := creds.Retrieve()
@@ -123,17 +174,16 @@ func (m *IAM) Retrieve() (Value, error) {
}
return stsWebIdentityCreds, err
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0:
case relativeURI != "":
if len(endpoint) == 0 {
endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint,
os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"))
endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI)
}
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token)
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0:
case fullURI != "":
if len(endpoint) == 0 {
endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
endpoint = fullURI
var ok bool
if ok, err = isLoopback(endpoint); !ok {
if err == nil {
@@ -189,7 +239,7 @@ func getIAMRoleURL(endpoint string) (*url.URL, error) {
if err != nil {
return nil, err
}
u.Path = defaultIAMSecurityCredsPath
u.Path = DefaultIAMSecurityCredsPath
return u, nil
}
@@ -203,7 +253,7 @@ func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, err
return nil, err
}
if token != "" {
req.Header.Add(tokenRequestHeader, token)
req.Header.Add(TokenRequestHeader, token)
}
resp, err := client.Do(req)
if err != nil {
@@ -227,7 +277,7 @@ func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, err
return credsList, nil
}
func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) {
func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) {
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil {
return ec2RoleCredRespBody{}, err
@@ -258,17 +308,17 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+tokenPath, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil)
if err != nil {
return "", err
}
req.Header.Add(tokenRequestTTLHeader, tokenTTL)
req.Header.Add(TokenRequestTTLHeader, TokenTTL)
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
@@ -285,13 +335,19 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
// reading the response an error will be returned.
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
if endpoint == "" {
endpoint = defaultIAMRoleEndpoint
endpoint = DefaultIAMRoleEndpoint
}
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
token, err := fetchIMDSToken(client, endpoint)
if err != nil {
return ec2RoleCredRespBody{}, err
// Return only errors for valid situations, if the IMDSv2 is not enabled
// we will not be able to get the token, in such a situation we have
// to rely on IMDSv1 behavior as a fallback, this check ensures that.
// Refer https://github.com/minio/minio-go/issues/1866
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
return ec2RoleCredRespBody{}, err
}
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
@@ -326,7 +382,7 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody,
return ec2RoleCredRespBody{}, err
}
if token != "" {
req.Header.Add(tokenRequestHeader, token)
req.Header.Add(TokenRequestHeader, token)
}
resp, err := client.Do(req)

View File

@@ -22,7 +22,7 @@ import (
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body)
buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}

View File

@@ -21,7 +21,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body)
buf, err := io.ReadAll(resp.Body)
if err != nil {
return value, err
}

View File

@@ -21,7 +21,6 @@ import (
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@@ -141,6 +140,9 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
if err != nil {
return Value{}, err
}
if req.Form == nil {
req.Form = url.Values{}
}
req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10))
resp, err := i.Client.Do(req)
@@ -152,7 +154,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
}
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body)
buf, err := io.ReadAll(resp.Body)
if err != nil {
return Value{}, err
}

View File

@@ -22,7 +22,7 @@ import (
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strconv"
@@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body)
buf, err := io.ReadAll(resp.Body)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}

View File

@@ -28,27 +28,27 @@ import (
)
const (
// sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
sseGenericHeader = "X-Amz-Server-Side-Encryption"
// SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
SseGenericHeader = "X-Amz-Server-Side-Encryption"
// sseKmsKeyID is the AWS SSE-KMS key id.
sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id"
// sseEncryptionContext is the AWS SSE-KMS Encryption Context data.
sseEncryptionContext = sseGenericHeader + "-Context"
// SseKmsKeyID is the AWS SSE-KMS key id.
SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id"
// SseEncryptionContext is the AWS SSE-KMS Encryption Context data.
SseEncryptionContext = SseGenericHeader + "-Context"
// sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm"
// sseCustomerKey is the AWS SSE-C encryption key HTTP header key.
sseCustomerKey = sseGenericHeader + "-Customer-Key"
// sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5"
// SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm"
// SseCustomerKey is the AWS SSE-C encryption key HTTP header key.
SseCustomerKey = SseGenericHeader + "-Customer-Key"
// SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5"
// sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
// sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
// sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
// SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
// SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
// SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
)
// PBKDF creates a SSE-C key from the provided password and salt.
@@ -157,9 +157,9 @@ func (s ssec) Type() Type { return SSEC }
func (s ssec) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
h.Set(sseCustomerAlgorithm, "AES256")
h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
h.Set(SseCustomerAlgorithm, "AES256")
h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type ssecCopy [32]byte
@@ -168,16 +168,16 @@ func (s ssecCopy) Type() Type { return SSEC }
func (s ssecCopy) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
h.Set(sseCopyCustomerAlgorithm, "AES256")
h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
h.Set(SseCopyCustomerAlgorithm, "AES256")
h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type s3 struct{}
func (s s3) Type() Type { return S3 }
func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") }
func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") }
type kms struct {
key string
@@ -188,11 +188,11 @@ type kms struct {
func (s kms) Type() Type { return KMS }
func (s kms) Marshal(h http.Header) {
h.Set(sseGenericHeader, "aws:kms")
h.Set(SseGenericHeader, "aws:kms")
if s.key != "" {
h.Set(sseKmsKeyID, s.key)
h.Set(SseKmsKeyID, s.key)
}
if s.hasContext {
h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
}
}

View File

@@ -54,8 +54,8 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta
// specific period in the object's lifetime.
type NoncurrentVersionExpiration struct {
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
}
// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
@@ -211,35 +211,43 @@ func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e
// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
type And struct {
XMLName xml.Name `xml:"And" json:"-"`
Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
XMLName xml.Name `xml:"And" json:"-"`
Prefix string `xml:"Prefix" json:"Prefix,omitempty"`
Tags []Tag `xml:"Tag" json:"Tags,omitempty"`
ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
}
// IsEmpty returns true if Tags field is null
func (a And) IsEmpty() bool {
return len(a.Tags) == 0 && a.Prefix == ""
return len(a.Tags) == 0 && a.Prefix == "" &&
a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0
}
// Filter will be used in selecting rule(s) for lifecycle configuration
type Filter struct {
XMLName xml.Name `xml:"Filter" json:"-"`
And And `xml:"And,omitempty" json:"And,omitempty"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
XMLName xml.Name `xml:"Filter" json:"-"`
And And `xml:"And,omitempty" json:"And,omitempty"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"`
ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"`
}
// IsNull returns true if all Filter fields are empty.
func (f Filter) IsNull() bool {
return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == ""
return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" &&
f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0
}
// MarshalJSON customizes json encoding by removing empty values.
func (f Filter) MarshalJSON() ([]byte, error) {
type filter struct {
And *And `json:"And,omitempty"`
Prefix string `json:"Prefix,omitempty"`
Tag *Tag `json:"Tag,omitempty"`
And *And `json:"And,omitempty"`
Prefix string `json:"Prefix,omitempty"`
Tag *Tag `json:"Tag,omitempty"`
ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"`
ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"`
}
newf := filter{
@@ -251,6 +259,8 @@ func (f Filter) MarshalJSON() ([]byte, error) {
if !f.And.IsEmpty() {
newf.And = &f.And
}
newf.ObjectSizeLessThan = f.ObjectSizeLessThan
newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan
return json.Marshal(newf)
}
@@ -271,7 +281,19 @@ func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return err
}
default:
// Always print Prefix field when both And & Tag are empty
if f.ObjectSizeLessThan > 0 {
if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil {
return err
}
break
}
if f.ObjectSizeGreaterThan > 0 {
if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil {
return err
}
break
}
// Print empty Prefix field only when everything else is empty
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
return err
}
@@ -308,19 +330,27 @@ func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartEle
}
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
type ExpireDeleteMarker bool
type ExpireDeleteMarker ExpirationBoolean
// IsEnabled returns true if the auto delete-marker expiration is enabled
func (e ExpireDeleteMarker) IsEnabled() bool {
return bool(e)
}
// ExpirationBoolean represents an XML version of 'bool' type
type ExpirationBoolean bool
// MarshalXML encodes delete marker boolean into an XML form.
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if !b {
return nil
}
type expireDeleteMarkerWrapper ExpireDeleteMarker
return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement)
type booleanWrapper ExpirationBoolean
return e.EncodeElement(booleanWrapper(b), startElement)
}
// IsEnabled returns true if the auto delete-marker expiration is enabled
func (b ExpireDeleteMarker) IsEnabled() bool {
// IsEnabled returns true if the expiration boolean is enabled
func (b ExpirationBoolean) IsEnabled() bool {
return bool(b)
}
@@ -330,6 +360,7 @@ type Expiration struct {
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
}
// MarshalJSON customizes json encoding by removing empty day/date specification.
@@ -338,10 +369,12 @@ func (e Expiration) MarshalJSON() ([]byte, error) {
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
}
newexp := expiration{
DeleteMarker: e.DeleteMarker,
DeleteAll: e.DeleteAll,
}
if !e.IsDaysNull() {
newexp.Days = &e.Days

View File

@@ -21,6 +21,7 @@ import (
"encoding/xml"
"errors"
"fmt"
"strings"
"github.com/minio/minio-go/v7/pkg/set"
)
@@ -32,20 +33,40 @@ type EventType string
//
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const (
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet = "s3:ObjectAccessed:Get"
ObjectAccessedHead = "s3:ObjectAccessed:Head"
ObjectAccessedAll = "s3:ObjectAccessed:*"
ObjectRemovedAll = "s3:ObjectRemoved:*"
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
BucketCreatedAll = "s3:BucketCreated:*"
BucketRemovedAll = "s3:BucketRemoved:*"
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging"
ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold"
ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention"
ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging"
ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention"
ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold"
ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
ObjectTransitionAll EventType = "s3:ObjectTransition:*"
ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed"
ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete"
ObjectTransitionPost EventType = "s3:ObjectRestore:Post"
ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed"
ObjectReplicationAll EventType = "s3:Replication:*"
ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication"
ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication"
ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold"
ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked"
ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold"
ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions"
ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix"
ObjectScannerAll EventType = "s3:Scanner:*"
BucketCreatedAll EventType = "s3:BucketCreated:*"
BucketRemovedAll EventType = "s3:BucketRemoved:*"
)
// FilterRule - child of S3Key, a tag in the notification xml which
@@ -88,6 +109,27 @@ func NewArn(partition, service, region, accountID, resource string) Arn {
}
}
var (
// ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn'
ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'")
// ErrInvalidArnFormat is returned when ARN string format is not valid
ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:<partition>:<service>:<region>:<accountID>:<resource>'")
)
// NewArnFromString parses string representation of ARN into Arn object.
// Returns an error if the string format is incorrect.
func NewArnFromString(arn string) (Arn, error) {
parts := strings.Split(arn, ":")
if len(parts) != 6 {
return Arn{}, ErrInvalidArnFormat
}
if parts[0] != "arn" {
return Arn{}, ErrInvalidArnPrefix
}
return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil
}
// String returns the string format of the ARN
func (arn Arn) String() string {
return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource

View File

@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"math"
"strconv"
"strings"
"time"
@@ -688,35 +689,83 @@ func (e ExistingObjectReplication) Validate() error {
// TargetMetrics represents inline replication metrics
// such as pending, failed and completed bytes in total for a bucket remote target
type TargetMetrics struct {
// Pending size in bytes
PendingSize uint64 `json:"pendingReplicationSize"`
// Completed count
ReplicatedCount uint64 `json:"replicationCount,omitempty"`
// Completed size in bytes
ReplicatedSize uint64 `json:"completedReplicationSize"`
ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
// Bandwidth limit in bytes/sec for this target
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"`
// Current bandwidth used in bytes/sec for this target
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"`
// errors seen in replication in last minute, hour and total
Failed TimedErrStats `json:"failed,omitempty"`
// Deprecated fields
// Pending size in bytes
PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
// Total Replica size in bytes
ReplicaSize uint64 `json:"replicaSize"`
ReplicaSize uint64 `json:"replicaSize,omitempty"`
// Failed size in bytes
FailedSize uint64 `json:"failedReplicationSize"`
FailedSize uint64 `json:"failedReplicationSize,omitempty"`
// Total number of pending operations including metadata updates
PendingCount uint64 `json:"pendingReplicationCount"`
PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
// Total number of failed operations including metadata updates
FailedCount uint64 `json:"failedReplicationCount"`
FailedCount uint64 `json:"failedReplicationCount,omitempty"`
}
// Metrics represents inline replication metrics for a bucket.
type Metrics struct {
Stats map[string]TargetMetrics
// Total Pending size in bytes across targets
PendingSize uint64 `json:"pendingReplicationSize"`
// Completed size in bytes across targets
ReplicatedSize uint64 `json:"completedReplicationSize"`
ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"`
// Total Replica size in bytes across targets
ReplicaSize uint64 `json:"replicaSize"`
ReplicaSize uint64 `json:"replicaSize,omitempty"`
// Total Replica counts
ReplicaCount int64 `json:"replicaCount,omitempty"`
// Total Replicated count
ReplicatedCount int64 `json:"replicationCount,omitempty"`
// errors seen in replication in last minute, hour and total
Errors TimedErrStats `json:"failed,omitempty"`
// Total number of entries that are queued for replication
QStats InQueueMetric `json:"queued"`
// Deprecated fields
// Total Pending size in bytes across targets
PendingSize uint64 `json:"pendingReplicationSize,omitempty"`
// Failed size in bytes across targets
FailedSize uint64 `json:"failedReplicationSize"`
FailedSize uint64 `json:"failedReplicationSize,omitempty"`
// Total number of pending operations including metadata updates across targets
PendingCount uint64 `json:"pendingReplicationCount"`
PendingCount uint64 `json:"pendingReplicationCount,omitempty"`
// Total number of failed operations including metadata updates across targets
FailedCount uint64 `json:"failedReplicationCount"`
FailedCount uint64 `json:"failedReplicationCount,omitempty"`
}
// RStat - has count and bytes for replication metrics
type RStat struct {
Count float64 `json:"count"`
Bytes int64 `json:"bytes"`
}
// Add two RStat
func (r RStat) Add(r1 RStat) RStat {
return RStat{
Count: r.Count + r1.Count,
Bytes: r.Bytes + r1.Bytes,
}
}
// TimedErrStats holds error stats for a time period
type TimedErrStats struct {
LastMinute RStat `json:"lastMinute"`
LastHour RStat `json:"lastHour"`
Totals RStat `json:"totals"`
}
// Add two TimedErrStats
func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats {
return TimedErrStats{
LastMinute: te.LastMinute.Add(o.LastMinute),
LastHour: te.LastHour.Add(o.LastHour),
Totals: te.Totals.Add(o.Totals),
}
}
// ResyncTargetsInfo provides replication target information to resync replicated data.
@@ -738,9 +787,185 @@ type ResyncTarget struct {
FailedSize int64 `json:"failedReplicationSize,omitempty"`
// Total number of failed operations
FailedCount int64 `json:"failedReplicationCount,omitempty"`
// Total number of failed operations
// Total number of completed operations
ReplicatedCount int64 `json:"replicationCount,omitempty"`
// Last bucket/object replicated.
Bucket string `json:"bucket,omitempty"`
Object string `json:"object,omitempty"`
}
// XferStats holds transfer rate info for uploads/sec
type XferStats struct {
AvgRate float64 `json:"avgRate"`
PeakRate float64 `json:"peakRate"`
CurrRate float64 `json:"currRate"`
}
// Merge two XferStats
func (x *XferStats) Merge(x1 XferStats) {
x.AvgRate += x1.AvgRate
x.PeakRate += x1.PeakRate
x.CurrRate += x1.CurrRate
}
// QStat holds count and bytes for objects in replication queue
type QStat struct {
Count float64 `json:"count"`
Bytes float64 `json:"bytes"`
}
// Add 2 QStat entries
func (q *QStat) Add(q1 QStat) {
q.Count += q1.Count
q.Bytes += q1.Bytes
}
// InQueueMetric holds stats for objects in replication queue
type InQueueMetric struct {
Curr QStat `json:"curr" msg:"cq"`
Avg QStat `json:"avg" msg:"aq"`
Max QStat `json:"peak" msg:"pq"`
}
// MetricName name of replication metric
type MetricName string
const (
// Large is a metric name for large objects >=128MiB
Large MetricName = "Large"
// Small is a metric name for objects <128MiB size
Small MetricName = "Small"
// Total is a metric name for total objects
Total MetricName = "Total"
)
// WorkerStat has stats on number of replication workers
type WorkerStat struct {
Curr int32 `json:"curr"`
Avg float32 `json:"avg"`
Max int32 `json:"max"`
}
// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes
// and number of entries that failed replication after 3 retries
type ReplMRFStats struct {
LastFailedCount uint64 `json:"failedCount_last5min"`
// Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
TotalDroppedCount uint64 `json:"droppedCount_since_uptime"`
// Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start.
TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"`
}
// ReplQNodeStats holds stats for a node in replication queue
type ReplQNodeStats struct {
NodeName string `json:"nodeName"`
Uptime int64 `json:"uptime"`
Workers WorkerStat `json:"activeWorkers"`
XferStats map[MetricName]XferStats `json:"transferSummary"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"`
QStats InQueueMetric `json:"queueStats"`
MRFStats ReplMRFStats `json:"mrfStats"`
}
// ReplQueueStats holds stats for replication queue across nodes
type ReplQueueStats struct {
Nodes []ReplQNodeStats `json:"nodes"`
}
// Workers returns number of workers across all nodes
func (q ReplQueueStats) Workers() (tot WorkerStat) {
for _, node := range q.Nodes {
tot.Avg += node.Workers.Avg
tot.Curr += node.Workers.Curr
if tot.Max < node.Workers.Max {
tot.Max = node.Workers.Max
}
}
if len(q.Nodes) > 0 {
tot.Avg /= float32(len(q.Nodes))
tot.Curr /= int32(len(q.Nodes))
}
return tot
}
// qStatSummary returns cluster level stats for objects in replication queue
func (q ReplQueueStats) qStatSummary() InQueueMetric {
m := InQueueMetric{}
for _, v := range q.Nodes {
m.Avg.Add(v.QStats.Avg)
m.Curr.Add(v.QStats.Curr)
if m.Max.Count < v.QStats.Max.Count {
m.Max.Add(v.QStats.Max)
}
}
return m
}
// ReplQStats holds stats for objects in replication queue
type ReplQStats struct {
Uptime int64 `json:"uptime"`
Workers WorkerStat `json:"workers"`
XferStats map[MetricName]XferStats `json:"xferStats"`
TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"`
QStats InQueueMetric `json:"qStats"`
MRFStats ReplMRFStats `json:"mrfStats"`
}
// QStats returns cluster level stats for objects in replication queue
func (q ReplQueueStats) QStats() (r ReplQStats) {
r.QStats = q.qStatSummary()
r.XferStats = make(map[MetricName]XferStats)
r.TgtXferStats = make(map[string]map[MetricName]XferStats)
r.Workers = q.Workers()
for _, node := range q.Nodes {
for arn := range node.TgtXferStats {
xmap, ok := node.TgtXferStats[arn]
if !ok {
xmap = make(map[MetricName]XferStats)
}
for m, v := range xmap {
st, ok := r.XferStats[m]
if !ok {
st = XferStats{}
}
st.AvgRate += v.AvgRate
st.CurrRate += v.CurrRate
st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
if _, ok := r.TgtXferStats[arn]; !ok {
r.TgtXferStats[arn] = make(map[MetricName]XferStats)
}
r.TgtXferStats[arn][m] = st
}
}
for k, v := range node.XferStats {
st, ok := r.XferStats[k]
if !ok {
st = XferStats{}
}
st.AvgRate += v.AvgRate
st.CurrRate += v.CurrRate
st.PeakRate = math.Max(st.PeakRate, v.PeakRate)
r.XferStats[k] = st
}
r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount
r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount
r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes
r.Uptime += node.Uptime
}
if len(q.Nodes) > 0 {
r.Uptime /= int64(len(q.Nodes)) // average uptime
}
return
}
// MetricsV2 represents replication metrics for a bucket.
type MetricsV2 struct {
Uptime int64 `json:"uptime"`
CurrentStats Metrics `json:"currStats"`
QueueStats ReplQueueStats `json:"queueStats"`
}

View File

@@ -121,49 +121,54 @@ func GetRegionFromURL(endpointURL url.URL) string {
if endpointURL.Host == "s3-external-1.amazonaws.com" {
return ""
}
if IsAmazonGovCloudEndpoint(endpointURL) {
return "us-gov-west-1"
}
// if elb's are used we cannot calculate which region it may be, just return empty.
if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
return ""
}
parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
// We check for FIPS dualstack matching first to avoid the non-greedy
// regex for FIPS non-dualstack matching a dualstack URL
parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
if IsAmazonFIPSUSEastWestEndpoint(endpointURL) {
// We check for FIPS dualstack matching first to avoid the non-greedy
// regex for FIPS non-dualstack matching a dualstack URL
parts = amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
return ""
}
@@ -186,45 +191,25 @@ func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
return false
}
return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" ||
IsAmazonFIPSGovCloudEndpoint(endpointURL))
}
// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
// See https://aws.amazon.com/compliance/fips.
// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud.
func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com"
}
// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint.
// See https://aws.amazon.com/compliance/fips.
func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
switch endpointURL.Host {
case "s3-fips.us-east-2.amazonaws.com":
case "s3-fips.dualstack.us-west-1.amazonaws.com":
case "s3-fips.dualstack.us-west-2.amazonaws.com":
case "s3-fips.dualstack.us-east-2.amazonaws.com":
case "s3-fips.dualstack.us-east-1.amazonaws.com":
case "s3-fips.us-west-1.amazonaws.com":
case "s3-fips.us-west-2.amazonaws.com":
case "s3-fips.us-east-1.amazonaws.com":
default:
return false
}
return true
return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-")
}
// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
// See https://aws.amazon.com/compliance/fips.
func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL)
if endpointURL == sentinelURL {
return false
}
return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com")
}
// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
@@ -339,12 +324,12 @@ func EncodePath(pathName string) string {
encodedPathname.WriteRune(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
l := utf8.RuneLen(s)
if l < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
u := make([]byte, l)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})

View File

@@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
@@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64,
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
stReader := &StreamingUSReader{

View File

@@ -22,11 +22,12 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
md5simd "github.com/minio/md5-simd"
)
// Reference for constants used below -
@@ -91,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
// buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{
streamingPayloadHdr,
t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3),
previousSig,
emptySHA256,
hex.EncodeToString(sum256(chunkData)),
chunkChecksum,
}
return strings.Join(stringToSignParts, "\n")
@@ -106,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{
streamingTrailerHdr,
t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3),
previousSig,
hex.EncodeToString(sum256(chunkData)),
chunkChecksum,
}
return strings.Join(stringToSignParts, "\n")
@@ -149,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
previousSignature, secretAccessKey string,
) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region,
previousSignature, chunkData)
previousSignature, chunkCheckSum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign)
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region,
func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
previousSignature, secretAccessKey string,
) string {
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
previousSignature, chunkData)
previousSignature, chunkChecksum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign)
}
@@ -203,12 +204,17 @@ type StreamingReader struct {
totalChunks int
lastChunkSize int
trailer http.Header
sh256 md5simd.Hasher
}
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
// Compute chunk signature for next header
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
s.sh256.Reset()
s.sh256.Write(s.chunkBuf[:chunkLen])
chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
signature := buildChunkSignature(chunckChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
@@ -240,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) {
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
}
s.sh256.Reset()
s.sh256.Write(s.chunkBuf)
chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
// Compute chunk signature
signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime,
signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
@@ -274,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
region string, dataLen int64, reqTime time.Time,
region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
stReader := &StreamingReader{
@@ -295,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize),
sh256: sh256,
}
if len(req.Trailer) > 0 {
stReader.trailer = req.Trailer
@@ -385,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
// Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error {
if s.sh256 != nil {
s.sh256.Close()
s.sh256 = nil
}
return s.baseReadCloser.Close()
}

View File

@@ -289,7 +289,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati
req.Header.Add("X-Amz-Trailer", strings.ToLower(k))
}
req.TransferEncoding = []string{"aws-chunked"}
req.Header.Set("Content-Encoding", "aws-chunked")
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10))
}

View File

@@ -35,7 +35,7 @@ func sum256(data []byte) []byte {
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
func sumHMAC(key, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)

View File

@@ -203,6 +203,10 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error {
return nil
}
func (tags tagSet) count() int {
return len(tags.tagMap)
}
func (tags tagSet) toMap() map[string]string {
m := make(map[string]string, len(tags.tagMap))
for key, value := range tags.tagMap {
@@ -279,6 +283,11 @@ func (tags *Tags) Set(key, value string) error {
return tags.TagSet.set(key, value, false)
}
// Count - return number of tags accounted for
func (tags Tags) Count() int {
return tags.TagSet.count()
}
// ToMap returns copy of tags.
func (tags Tags) ToMap() map[string]string {
return tags.TagSet.toMap()

View File

@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
* Copyright 2015-2023 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,12 +20,15 @@ package minio
import (
"encoding/base64"
"fmt"
"net/http"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/encrypt"
)
// expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
const expirationDateFormat = "2006-01-02T15:04:05.000Z"
// policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
@@ -97,10 +100,8 @@ func (p *PostPolicy) SetKey(key string) error {
// SetKeyStartsWith - Sets an object name that an policy based upload
// can start with.
// Can use an empty value ("") to allow any key.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return errInvalidArgument("Object prefix is empty.")
}
policyCond := policyCondition{
matchType: "starts-with",
condition: "$key",
@@ -171,10 +172,8 @@ func (p *PostPolicy) SetContentType(contentType string) error {
// SetContentTypeStartsWith - Sets what content-type of the object for this policy
// based upload can start with.
// Can use an empty value ("") to allow any content-type.
func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error {
if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" {
return errInvalidArgument("No content type specified.")
}
policyCond := policyCondition{
matchType: "starts-with",
condition: "$Content-Type",
@@ -242,7 +241,7 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
// SetUserMetadata - Set user metadata as a key/value couple.
// Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserMetadata(key string, value string) error {
func (p *PostPolicy) SetUserMetadata(key, value string) error {
if strings.TrimSpace(key) == "" || key == "" {
return errInvalidArgument("Key is empty")
}
@@ -262,9 +261,29 @@ func (p *PostPolicy) SetUserMetadata(key string, value string) error {
return nil
}
// SetChecksum sets the checksum of the request.
func (p *PostPolicy) SetChecksum(c Checksum) {
if c.IsSet() {
p.formData[amzChecksumAlgo] = c.Type.String()
p.formData[c.Type.Key()] = c.Encoded()
}
}
// SetEncryption - sets encryption headers for POST API
func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) {
if sse == nil {
return
}
h := http.Header{}
sse.Marshal(h)
for k, v := range h {
p.formData[k] = v[0]
}
}
// SetUserData - Set user data as a key/value couple.
// Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserData(key string, value string) error {
func (p *PostPolicy) SetUserData(key, value string) error {
if key == "" {
return errInvalidArgument("Key is empty")
}
@@ -285,10 +304,14 @@ func (p *PostPolicy) SetUserData(key string, value string) error {
}
// addNewPolicy - internal helper to validate adding new policies.
// Can use starts-with with an empty value ("") to allow any content within a form field.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
if policyCond.matchType == "" || policyCond.condition == "" {
return errInvalidArgument("Policy fields are empty.")
}
if policyCond.matchType != "starts-with" && policyCond.value == "" {
return errInvalidArgument("Policy value is empty.")
}
p.conditions = append(p.conditions, policyCond)
return nil
}

View File

@@ -20,7 +20,7 @@ package minio
import "time"
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
func (c *Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
// normalize jitter to the range [0, 1.0]

View File

@@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second
// newRetryTimer creates a timer with exponentially increasing
// delays until the maximum retry attempts are reached.
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int {
attemptCh := make(chan int)
// computes the exponential backoff duration according to

View File

@@ -28,16 +28,20 @@ var awsS3EndpointMap = map[string]string{
"eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com",
"eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com",
"eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com",
"eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com",
"eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com",
"eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com",
"eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com",
"ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
"ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
"ap-south-2": "s3.dualstack.ap-south-2.amazonaws.com",
"ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com",
"ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com",
"af-south-1": "s3.dualstack.af-south-1.amazonaws.com",
"me-central-1": "s3.dualstack.me-central-1.amazonaws.com",
"me-south-1": "s3.dualstack.me-south-1.amazonaws.com",
"sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com",
"us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com",
@@ -45,6 +49,8 @@ var awsS3EndpointMap = map[string]string{
"cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn",
"cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
"ap-southeast-3": "s3.dualstack.ap-southeast-3.amazonaws.com",
"ap-southeast-4": "s3.dualstack.ap-southeast-4.amazonaws.com",
"il-central-1": "s3.dualstack.il-central-1.amazonaws.com",
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.

View File

@@ -23,7 +23,6 @@ package minio
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"net/http"
"os"
@@ -73,7 +72,7 @@ var DefaultTransport = func(secure bool) (*http.Transport, error) {
}
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
rootCAs := mustGetSystemCertPool()
data, err := ioutil.ReadFile(f)
data, err := os.ReadFile(f)
if err == nil {
rootCAs.AppendCertsFromPEM(data)
}

View File

@@ -28,7 +28,6 @@ import (
"fmt"
"hash"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
@@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -256,7 +255,7 @@ func parseRFC7231Time(lastModified string) (time.Time, error) {
// ToObjectInfo converts http header values into ObjectInfo type,
// extracts metadata and fills in all the necessary fields in ObjectInfo.
func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) {
func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) {
var err error
// Trim off the odd double quotes from ETag in the beginning and end.
etag := trimEtag(h.Get("ETag"))
@@ -512,6 +511,31 @@ func isAmzHeader(headerKey string) bool {
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-")
}
// supportedQueryValues is a list of query strings that can be passed in when using GetObject.
var supportedQueryValues = map[string]bool{
"partNumber": true,
"versionId": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"response-content-language": true,
"response-content-type": true,
"response-expires": true,
}
// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized.
func isStandardQueryValue(qsKey string) bool {
return supportedQueryValues[qsKey]
}
// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the
// set of query params starting with "x-" are ignored by S3.
const allowedCustomQueryPrefix = "x-"
func isCustomQueryValue(qsKey string) bool {
return strings.HasPrefix(qsKey, allowedCustomQueryPrefix)
}
var (
md5Pool = sync.Pool{New: func() interface{} { return md5.New() }}
sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }}

View File

@@ -23,6 +23,11 @@ import (
"github.com/klauspost/cpuid/v2"
)
var (
hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
)
func hasArmSha2() bool {
if cpuid.CPU.Has(cpuid.SHA2) {
return true
@@ -42,5 +47,4 @@ func hasArmSha2() bool {
return false
}
return bytes.Contains(cpuInfo, []byte(sha256Feature))
}

View File

@@ -19,10 +19,8 @@ package sha256
import (
"crypto/sha256"
"encoding/binary"
"errors"
"hash"
"runtime"
"github.com/klauspost/cpuid/v2"
)
// Size - The size of a SHA256 checksum in bytes.
@@ -68,42 +66,34 @@ func (d *digest) Reset() {
type blockfuncType int
const (
blockfuncGeneric blockfuncType = iota
blockfuncSha blockfuncType = iota
blockfuncArm blockfuncType = iota
blockfuncStdlib blockfuncType = iota
blockfuncIntelSha
blockfuncArmSha2
blockfuncForceGeneric = -1
)
var blockfunc blockfuncType
func init() {
blockfunc = blockfuncGeneric
switch {
case hasSHAExtensions():
blockfunc = blockfuncSha
case hasIntelSha:
blockfunc = blockfuncIntelSha
case hasArmSha2():
blockfunc = blockfuncArm
default:
blockfunc = blockfuncGeneric
blockfunc = blockfuncArmSha2
}
}
var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
// hasSHAExtensions return whether the cpu supports SHA extensions.
func hasSHAExtensions() bool {
return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64"
}
// New returns a new hash.Hash computing the SHA256 checksum.
func New() hash.Hash {
if blockfunc != blockfuncGeneric {
d := new(digest)
d.Reset()
return d
if blockfunc == blockfuncStdlib {
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
}
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
d := new(digest)
d.Reset()
return d
}
// Sum256 - single caller sha256 helper
@@ -272,11 +262,11 @@ func (d *digest) checkSum() (digest [Size]byte) {
}
func block(dig *digest, p []byte) {
if blockfunc == blockfuncSha {
blockShaGo(dig, p)
} else if blockfunc == blockfuncArm {
blockArmGo(dig, p)
} else if blockfunc == blockfuncGeneric {
if blockfunc == blockfuncIntelSha {
blockIntelShaGo(dig, p)
} else if blockfunc == blockfuncArmSha2 {
blockArmSha2Go(dig, p)
} else {
blockGeneric(dig, p)
}
}
@@ -397,3 +387,82 @@ var _K = []uint32{
0xbef9a3f7,
0xc67178f2,
}
const (
magic256 = "sha\x03"
marshaledSize = len(magic256) + 8*4 + chunk + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic256...)
b = appendUint32(b, d.h[0])
b = appendUint32(b, d.h[1])
b = appendUint32(b, d.h[2])
b = appendUint32(b, d.h[3])
b = appendUint32(b, d.h[4])
b = appendUint32(b, d.h[5])
b = appendUint32(b, d.h[6])
b = appendUint32(b, d.h[7])
b = append(b, d.x[:d.nx]...)
b = b[:len(b)+len(d.x)-d.nx] // already zero
b = appendUint64(b, d.len)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
return errors.New("crypto/sha256: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha256: invalid hash state size")
}
b = b[len(magic256):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b, d.h[5] = consumeUint32(b)
b, d.h[6] = consumeUint32(b)
b, d.h[7] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % chunk)
return nil
}
func appendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func appendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func consumeUint64(b []byte) ([]byte, uint64) {
_ = b[7]
x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
_ = b[3]
x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return b[4:], x
}

View File

@@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.

View File

@@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
TEXT ·sha256X16Avx512(SB), 7, $0
MOVQ digests+0(FP), DI

View File

@@ -1,6 +0,0 @@
//+build !noasm,!appengine,gc
package sha256
//go:noescape
func blockSha(h *[8]uint32, message []uint8)

View File

@@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@@ -18,10 +19,13 @@
package sha256
func blockArmGo(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly")
func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmSha2Go called unexpectedly")
}
func blockShaGo(dig *digest, p []byte) {
blockSha(&dig.h, p)
//go:noescape
func blockIntelSha(h *[8]uint32, message []uint8)
func blockIntelShaGo(dig *digest, p []byte) {
blockIntelSha(&dig.h, p)
}

View File

@@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
// SHA intrinsic version of SHA256
@@ -106,7 +106,7 @@ GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
// X13 saved hash state // CDGH
// X15 data shuffle mask (constant)
TEXT ·blockSha(SB), NOSPLIT, $0-32
TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
MOVQ h+0(FP), DX
MOVQ message_base+8(FP), SI
MOVQ message_len+16(FP), DI

View File

@@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@@ -18,18 +19,18 @@
package sha256
func blockShaGo(dig *digest, p []byte) {
panic("blockShaGoc called unexpectedly")
func blockIntelShaGo(dig *digest, p []byte) {
panic("blockIntelShaGo called unexpectedly")
}
//go:noescape
func blockArm(h []uint32, message []uint8)
func blockArmSha2(h []uint32, message []uint8)
func blockArmGo(dig *digest, p []byte) {
func blockArmSha2Go(dig *digest, p []byte) {
h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
blockArm(h[:], p[:])
blockArmSha2(h[:], p[:])
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
h[5], h[6], h[7]

View File

@@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
// ARM64 version of SHA256
@@ -25,7 +25,7 @@
// their Plan9 equivalents
//
TEXT ·blockArm(SB), 7, $0
TEXT ·blockArmSha2(SB), 7, $0
MOVD h+0(FP), R0
MOVD message+24(FP), R1
MOVD message_len+32(FP), R2 // length of message

View File

@@ -1,4 +1,5 @@
//+build appengine noasm !amd64,!arm64 !gc
//go:build appengine || noasm || (!amd64 && !arm64) || !gc
// +build appengine noasm !amd64,!arm64 !gc
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
@@ -18,11 +19,11 @@
package sha256
func blockShaGo(dig *digest, p []byte) {
panic("blockShaGo called unexpectedly")
func blockIntelShaGo(dig *digest, p []byte) {
panic("blockIntelShaGo called unexpectedly")
}
func blockArmGo(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly")
func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmSha2Go called unexpectedly")
}