Bump reva to pull in the latest fixes and improvements

This commit is contained in:
André Duffeck
2026-01-21 08:43:08 +01:00
parent ef3c0da0cb
commit a93769ae9b
88 changed files with 7473 additions and 577 deletions
+9 -8
View File
@@ -34,6 +34,7 @@ require (
github.com/go-micro/plugins/v4/wrapper/monitoring/prometheus v1.2.0
github.com/go-micro/plugins/v4/wrapper/trace/opentelemetry v1.2.0
github.com/go-playground/validator/v10 v10.30.1
github.com/go-resty/resty/v2 v2.7.0
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/golang/protobuf v1.5.4
github.com/google/go-cmp v0.7.0
@@ -64,7 +65,7 @@ require (
github.com/open-policy-agent/opa v1.11.1
github.com/opencloud-eu/icap-client v0.0.0-20250930132611-28a2afe62d89
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19
github.com/opensearch-project/opensearch-go/v4 v4.6.0
github.com/orcaman/concurrent-map v1.0.0
github.com/pkg/errors v0.9.1
@@ -124,7 +125,7 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
@@ -221,7 +222,6 @@ require (
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-redis/redis/v8 v8.11.5 // indirect
github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
@@ -250,6 +250,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-plugin v1.7.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/iancoleman/strcase v0.3.0 // indirect
@@ -288,10 +289,10 @@ require (
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect
github.com/miekg/dns v1.1.57 // indirect
github.com/mileusna/useragent v1.3.5 // indirect
github.com/minio/crc64nvme v1.1.0 // indirect
github.com/minio/crc64nvme v1.1.1 // indirect
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/minio-go/v7 v7.0.97 // indirect
github.com/minio/minio-go/v7 v7.0.98 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
@@ -327,7 +328,7 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
github.com/prometheus/alertmanager v0.30.0 // indirect
github.com/prometheus/alertmanager v0.30.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.4 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
@@ -342,7 +343,7 @@ require (
github.com/samber/slog-common v0.19.0 // indirect
github.com/samber/slog-zerolog/v2 v2.9.0 // indirect
github.com/segmentio/asm v1.2.1 // indirect
github.com/segmentio/kafka-go v0.4.49 // indirect
github.com/segmentio/kafka-go v0.4.50 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
github.com/sergi/go-diff v1.4.0 // indirect
@@ -362,7 +363,7 @@ require (
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tinylib/msgp v1.6.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
+16 -14
View File
@@ -65,8 +65,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CiscoM31/godata v1.0.11 h1:w7y8twuW02LdH6mak3/GJ5i0GrCv2IoZUJVqa/g5Yeo=
github.com/CiscoM31/godata v1.0.11/go.mod h1:ZMiT6JuD3Rm83HEtiTx4JEChsd25YCrxchKGag/sdTc=
@@ -651,6 +651,8 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@@ -851,14 +853,14 @@ github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws=
github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc=
github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk=
github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ=
github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk=
github.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=
github.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@@ -967,8 +969,8 @@ github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9 h1:dIft
github.com/opencloud-eu/inotifywaitgo v0.0.0-20251111171128-a390bae3c5e9/go.mod h1:JWyDC6H+5oZRdUJUgKuaye+8Ph5hEs6HVzVoPKzWSGI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76 h1:vD/EdfDUrv4omSFjrinT8Mvf+8D7f9g4vgQ2oiDrVUI=
github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76/go.mod h1:pzatilMEHZFT3qV7C/X3MqOa3NlRQuYhlRhZTL+hN6Q=
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993 h1:qWU0bKhD1wqQIq6giMTvUUbG1IlaT/lzchLDSjuedi0=
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993/go.mod h1:foXaMxugUi4TTRsK3AAXRAb/kyFd4A9k2+wNv+p+vbU=
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19 h1:8loHHe7FYd7zgIcGTlbHwre+bU/AAwREEYVd4SWM9/s=
github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19/go.mod h1:pv+w23JG0/qJweZbTzNNev//YEvlUML1L/2iXgKGkkg=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
@@ -1020,8 +1022,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om
github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k=
github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/alertmanager v0.30.0 h1:E4dnxSFXK8V2Bb8iqudlisTmaIrF3hRJSWnliG08tBM=
github.com/prometheus/alertmanager v0.30.0/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
github.com/prometheus/alertmanager v0.30.1 h1:427prmCHuy1rMmV7fl/TVQFh5A/78XQ/Mp+TsswZNGM=
github.com/prometheus/alertmanager v0.30.1/go.mod h1:93PBumcTLr/gNtNtM0m7BcCffbvYP5bKuLBWiOnISaA=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
@@ -1111,8 +1113,8 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210127161313-bd30bebeac4f/
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk=
github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
github.com/segmentio/kafka-go v0.4.50 h1:mcyC3tT5WeyWzrFbd6O374t+hmcu1NKt2Pu1L3QaXmc=
github.com/segmentio/kafka-go v0.4.50/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY=
@@ -1224,8 +1226,8 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=
github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
+1 -1
View File
@@ -1,7 +1,7 @@
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Compatible with TOML version [v1.1.0](https://toml.io/en/v1.1.0).
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
+8 -1
View File
@@ -206,6 +206,13 @@ func markDecodedRecursive(md *MetaData, tmap map[string]any) {
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
if tarr, ok := tmap[key].([]map[string]any); ok {
for _, elm := range tarr {
md.context = append(md.context, key)
markDecodedRecursive(md, elm)
md.context = md.context[0 : len(md.context)-1]
}
}
}
}
@@ -423,7 +430,7 @@ func (md *MetaData) unifyString(data any, rv reflect.Value) error {
if i, ok := data.(int64); ok {
rv.SetString(strconv.FormatInt(i, 10))
} else if f, ok := data.(float64); ok {
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
rv.SetString(strconv.FormatFloat(f, 'g', -1, 64))
} else {
return md.badtype("string", data)
}
+46 -33
View File
@@ -228,9 +228,9 @@ func (enc *Encoder) eElement(rv reflect.Value) {
}
switch v.Location() {
default:
enc.wf(v.Format(format))
enc.write(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
enc.wf(v.In(time.UTC).Format(format))
enc.write(v.In(time.UTC).Format(format))
}
return
case Marshaler:
@@ -279,40 +279,40 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.String:
enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
enc.write(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
enc.write(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
enc.write(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
if math.Signbit(f) {
enc.wf("-")
enc.write("-")
}
enc.wf("nan")
enc.write("nan")
} else if math.IsInf(f, 0) {
if math.Signbit(f) {
enc.wf("-")
enc.write("-")
}
enc.wf("inf")
enc.write("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
if math.Signbit(f) {
enc.wf("-")
enc.write("-")
}
enc.wf("nan")
enc.write("nan")
} else if math.IsInf(f, 0) {
if math.Signbit(f) {
enc.wf("-")
enc.write("-")
}
enc.wf("inf")
enc.write("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
@@ -330,27 +330,32 @@ func (enc *Encoder) eElement(rv reflect.Value) {
// By the TOML spec, all floats must have a decimal with at least one number on
// either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
for _, c := range fstr {
if c == 'e' { // Exponent syntax
return fstr
}
if c == '.' {
return fstr
}
}
return fstr
return fstr + ".0"
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
enc.write(`"` + dblQuotedReplacer.Replace(s) + `"`)
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
enc.write("[")
for i := 0; i < length; i++ {
elem := eindirect(rv.Index(i))
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
enc.write(", ")
}
}
enc.wf("]")
enc.write("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
@@ -363,7 +368,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
continue
}
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.writef("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
enc.eMapOrStruct(key, trv, false)
}
@@ -376,7 +381,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key)
enc.writef("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
enc.eMapOrStruct(key, rv, false)
@@ -422,7 +427,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{mapKey.String()}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
enc.write(", ")
}
} else {
enc.encode(key.add(mapKey.String()), val)
@@ -431,12 +436,12 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
if inline {
enc.wf("{")
enc.write("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
enc.wf("}")
enc.write("}")
}
}
@@ -534,7 +539,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != totalFields-1 {
enc.wf(", ")
enc.write(", ")
}
} else {
enc.encode(key.add(keyName), fieldVal)
@@ -543,14 +548,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
if inline {
enc.wf("{")
enc.write("{")
}
l := len(fieldsDirect) + len(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline {
enc.wf("}")
enc.write("}")
}
}
@@ -700,7 +705,7 @@ func isEmpty(rv reflect.Value) bool {
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
enc.write("\n")
}
}
@@ -722,14 +727,22 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
enc.eElement(val)
return
}
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.writef("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
if !inline {
enc.newline()
}
}
func (enc *Encoder) wf(format string, v ...any) {
func (enc *Encoder) write(s string) {
_, err := enc.w.WriteString(s)
if err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) writef(format string, v ...any) {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
+53 -77
View File
@@ -13,7 +13,6 @@ type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
@@ -47,14 +46,13 @@ func (p Position) String() string {
}
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
tomlNext bool
esc bool
input string
start int
pos int
line int
state stateFn
items chan item
esc bool
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
@@ -90,14 +88,13 @@ func (lx *lexer) nextItem() item {
}
}
func lex(input string, tomlNext bool) *lexer {
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
line: 1,
tomlNext: tomlNext,
input: input,
state: lexTop,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
line: 1,
}
return lx
}
@@ -108,7 +105,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
panic("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
@@ -305,6 +302,8 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart
case eof:
if lx.pos > lx.start {
// TODO: never reached? I think this can only occur on a bug in the
// lexer(?)
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
@@ -392,8 +391,6 @@ func lexTableNameStart(lx *lexer) stateFn {
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == '.':
lx.ignore()
return lexTableNameStart
@@ -412,7 +409,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r, lx.tomlNext) {
if isBareKeyChar(r) {
return lexBareName
}
lx.backup()
@@ -420,23 +417,23 @@ func lexBareName(lx *lexer) stateFn {
return lx.pop()
}
// lexBareName lexes one part of a key or table.
//
// It assumes that at least one valid character for the table has already been
// read.
// lexQuotedName lexes one part of a quoted key or table name. It assumes that
// it starts lexing at the quote itself (" or ').
//
// Lexes only one part, e.g. only '"a"' inside '"a".b'.
func lexQuotedName(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case r == '"':
lx.ignore() // ignore the '"'
return lexString
case r == '\'':
lx.ignore() // ignore the "'"
return lexRawString
// TODO: I don't think any of the below conditions can ever be reached?
case isWhitespace(r):
return lexSkip(lx, lexValue)
case r == eof:
return lx.errorf("unexpected EOF; expected value")
default:
@@ -464,17 +461,19 @@ func lexKeyStart(lx *lexer) stateFn {
func lexKeyNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == '=' || r == eof:
return lx.errorf("unexpected '='")
case r == '.':
return lx.errorf("unexpected '.'")
default:
lx.push(lexKeyEnd)
return lexBareName
case r == '"' || r == '\'':
lx.ignore()
lx.push(lexKeyEnd)
return lexQuotedName
default:
lx.push(lexKeyEnd)
return lexBareName
// TODO: I think these can never be reached?
case r == '=' || r == eof:
return lx.errorf("unexpected '='")
case r == '.':
return lx.errorf("unexpected '.'")
}
}
@@ -485,7 +484,7 @@ func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
case r == eof:
case r == eof: // TODO: never reached
return lx.errorf("unexpected EOF; expected key separator '='")
case r == '.':
lx.ignore()
@@ -628,10 +627,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
if lx.tomlNext {
return lexSkip(lx, lexInlineTableValue)
}
return lx.errorPrevLine(errLexInlineTableNL{})
return lexSkip(lx, lexInlineTableValue)
case r == '#':
lx.push(lexInlineTableValue)
return lexCommentStart
@@ -653,10 +649,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
if lx.tomlNext {
return lexSkip(lx, lexInlineTableValueEnd)
}
return lx.errorPrevLine(errLexInlineTableNL{})
return lexSkip(lx, lexInlineTableValueEnd)
case r == '#':
lx.push(lexInlineTableValueEnd)
return lexCommentStart
@@ -664,10 +657,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
if lx.tomlNext {
return lexInlineTableValueEnd
}
return lx.errorf("trailing comma not allowed in inline tables")
return lexInlineTableValueEnd
}
return lexInlineTableValue
case r == '}':
@@ -855,9 +845,6 @@ func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'e':
if !lx.tomlNext {
return lx.error(errLexEscape{r})
}
fallthrough
case 'b':
fallthrough
@@ -878,9 +865,6 @@ func lexStringEscape(lx *lexer) stateFn {
case '\\':
return lx.pop()
case 'x':
if !lx.tomlNext {
return lx.error(errLexEscape{r})
}
return lexHexEscape
case 'u':
return lexShortUnicodeEscape
@@ -928,19 +912,9 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
// lexBaseNumberOrDate can differentiate base prefixed integers from other
// types.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
switch r {
case '0':
if lx.next() == '0' {
return lexBaseNumberOrDate
}
if !isDigit(r) {
// The only way to reach this state is if the value starts
// with a digit, so specifically treat anything else as an
// error.
return lx.errorf("expected a digit but got %q", r)
}
return lexNumberOrDate
}
@@ -1196,13 +1170,13 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
}
func (s stateFn) String() string {
if s == nil {
return "<nil>"
}
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
name = name[i+1:]
}
if s == nil {
name = "<nil>"
}
return name + "()"
}
@@ -1210,8 +1184,6 @@ func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
@@ -1226,18 +1198,22 @@ func (itype itemType) String() string {
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemKeyEnd:
return "KeyEnd"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemArrayTableStart:
return "ArrayTableStart"
case itemArrayTableEnd:
return "ArrayTableEnd"
case itemKeyStart:
return "KeyStart"
case itemKeyEnd:
return "KeyEnd"
case itemCommentStart:
return "CommentStart"
case itemInlineTableStart:
@@ -1266,7 +1242,7 @@ func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool {
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || r == '_' || r == '-'
}
+18 -28
View File
@@ -3,7 +3,6 @@ package toml
import (
"fmt"
"math"
"os"
"strconv"
"strings"
"time"
@@ -17,7 +16,6 @@ type parser struct {
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
tomlNext bool
ordered []Key // List of keys in the order that they appear in the TOML data.
@@ -32,8 +30,6 @@ type keyInfo struct {
}
func parse(data string) (p *parser, err error) {
_, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
defer func() {
if r := recover(); r != nil {
if pErr, ok := r.(ParseError); ok {
@@ -73,10 +69,9 @@ func parse(data string) (p *parser, err error) {
p = &parser{
keyInfo: make(map[string]keyInfo),
mapping: make(map[string]any),
lx: lex(data, tomlNext),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]struct{}),
tomlNext: tomlNext,
}
for {
item := p.next()
@@ -350,17 +345,14 @@ func (p *parser) valueFloat(it item) (any, tomlType) {
var dtTypes = []struct {
fmt string
zone *time.Location
next bool
}{
{time.RFC3339Nano, time.Local, false},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
{"2006-01-02", internal.LocalDate, false},
{"15:04:05.999999999", internal.LocalTime, false},
// tomlNext
{"2006-01-02T15:04Z07:00", time.Local, true},
{"2006-01-02T15:04", internal.LocalDatetime, true},
{"15:04", internal.LocalTime, true},
{time.RFC3339Nano, time.Local},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
{"2006-01-02", internal.LocalDate},
{"15:04:05.999999999", internal.LocalTime},
{"2006-01-02T15:04Z07:00", time.Local},
{"2006-01-02T15:04", internal.LocalDatetime},
{"15:04", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (any, tomlType) {
@@ -371,9 +363,6 @@ func (p *parser) valueDatetime(it item) (any, tomlType) {
err error
)
for _, dt := range dtTypes {
if dt.next && !p.tomlNext {
continue
}
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
if missingLeadingZero(it.val, dt.fmt) {
@@ -644,6 +633,11 @@ func (p *parser) setValue(key string, value any) {
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isArray(keyContext) {
if !p.isImplicit(keyContext) {
if _, ok := hash[key]; ok {
p.panicf("Key '%s' has already been defined.", keyContext)
}
}
p.removeImplicit(keyContext)
hash[key] = value
return
@@ -802,10 +796,8 @@ func (p *parser) replaceEscapes(it item, str string) string {
b.WriteByte(0x0d)
skip = 1
case 'e':
if p.tomlNext {
b.WriteByte(0x1b)
skip = 1
}
b.WriteByte(0x1b)
skip = 1
case '"':
b.WriteByte(0x22)
skip = 1
@@ -815,11 +807,9 @@ func (p *parser) replaceEscapes(it item, str string) string {
// The lexer guarantees the correct number of characters are present;
// don't need to check here.
case 'x':
if p.tomlNext {
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
b.WriteRune(escaped)
skip = 3
}
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
b.WriteRune(escaped)
skip = 3
case 'u':
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
b.WriteRune(escaped)
+23
View File
@@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
+46
View File
@@ -0,0 +1,46 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
linters:
fast: false
disable-all: true
enable:
- revive
- megacheck
- govet
- unconvert
- gas
- gocyclo
- dupl
- misspell
- unparam
- unused
- typecheck
- ineffassign
# - stylecheck
- exportloopref
- gocritic
- nakedret
- gosimple
- prealloc
# golangci-lint configuration file
linters-settings:
revive:
ignore-generated-header: true
severity: warning
rules:
- name: package-comments
severity: warning
disabled: true
- name: exported
severity: warning
disabled: false
arguments: ["checkPrivateReceivers", "disableStutteringCheck"]
issues:
exclude-use-default: false
exclude-rules:
- path: _test\.go
linters:
- dupl
+267
View File
@@ -0,0 +1,267 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"errors"
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache[K comparable, V any] struct {
size int
recentSize int
recentRatio float64
ghostRatio float64
recent simplelru.LRUCache[K, V]
frequent simplelru.LRUCache[K, V]
recentEvict simplelru.LRUCache[K, struct{}]
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) {
return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) {
if size <= 0 {
return nil, errors.New("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, errors.New("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, errors.New("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU[K, V](size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache[K, V]{
size: size,
recentSize: recentSize,
recentRatio: recentRatio,
ghostRatio: ghostRatio,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return
}
// Add adds a value to the cache.
func (c *TwoQueueCache[K, V]) Add(key K, value V) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, struct{}{})
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache[K, V]) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Resize changes the cache size.
func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) {
c.lock.Lock()
defer c.lock.Unlock()
// Recalculate the sub-sizes
recentSize := int(float64(size) * c.recentRatio)
evictSize := int(float64(size) * c.ghostRatio)
c.size = size
c.recentSize = recentSize
// ensureSpace
diff := c.recent.Len() + c.frequent.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.ensureSpace(true)
}
// Reallocate the LRUs
c.recent.Resize(size)
c.frequent.Resize(size)
c.recentEvict.Resize(evictSize)
return diff
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache[K, V]) Keys() []K {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Values returns a slice of the values in the cache.
// The frequently used values are first in the returned slice.
func (c *TwoQueueCache[K, V]) Values() []V {
c.lock.RLock()
defer c.lock.RUnlock()
v1 := c.frequent.Values()
v2 := c.recent.Values()
return append(v1, v2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache[K, V]) Remove(key K) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache[K, V]) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache[K, V]) Contains(key K) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}
+364
View File
@@ -0,0 +1,364 @@
Copyright (c) 2014 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
+79
View File
@@ -0,0 +1,79 @@
golang-lru
==========
This provides the `lru` package which implements a fixed-size
thread safe LRU cache. It is based on the cache in Groupcache.
Documentation
=============
Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2)
LRU cache example
=================
```go
package main
import (
"fmt"
"github.com/hashicorp/golang-lru/v2"
)
func main() {
l, _ := lru.New[int, any](128)
for i := 0; i < 256; i++ {
l.Add(i, nil)
}
if l.Len() != 128 {
panic(fmt.Sprintf("bad len: %v", l.Len()))
}
}
```
Expirable LRU cache example
===========================
```go
package main
import (
"fmt"
"time"
"github.com/hashicorp/golang-lru/v2/expirable"
)
func main() {
// make cache with 10ms TTL and 5 max keys
cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10)
// set value under key1.
cache.Add("key1", "val1")
// get value under key1
r, ok := cache.Get("key1")
// check for OK value
if ok {
fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r)
}
// wait for cache to expire
time.Sleep(time.Millisecond * 12)
// get value under key1 after key expiration
r, ok = cache.Get("key1")
fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r)
// set value under key2, would evict old entry because it is already expired.
cache.Add("key2", "val2")
fmt.Printf("Cache len: %d\n", cache.Len())
// Output:
// value before expiration is found: true, value: "val1"
// value after expiration is found: false, value: ""
// Cache len: 1
}
```
+24
View File
@@ -0,0 +1,24 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the LRU implementation in
// groupcache: https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries, at
// the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as well
// as recent usage in both the frequent and recent caches. Its computational
// overhead is comparable to TwoQueueCache, but the memory overhead is linear
// with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program. For this reason, it is in a separate go module contained within
// this repository.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru
+142
View File
@@ -0,0 +1,142 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE_list file.
package internal
import "time"
// Entry is an LRU Entry
type Entry[K comparable, V any] struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *Entry[K, V]
// The list to which this element belongs.
list *LruList[K, V]
// The LRU Key of this element.
Key K
// The Value stored with this element.
Value V
// The time this element would be cleaned up, optional
ExpiresAt time.Time
// The expiry bucket item was put in, optional
ExpireBucket uint8
}
// PrevEntry returns the previous list element or nil.
func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// LruList represents a doubly linked list.
// The zero Value for LruList is an empty list ready to use.
type LruList[K comparable, V any] struct {
root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
len int // current list Length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *LruList[K, V]) Init() *LruList[K, V] {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// NewList returns an initialized list.
func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
// Length returns the number of elements of list l.
// The complexity is O(1).
func (l *LruList[K, V]) Length() int { return l.len }
// Back returns the last element of list l or nil if the list is empty.
func (l *LruList[K, V]) Back() *Entry[K, V] {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List Value.
func (l *LruList[K, V]) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
}
// Remove removes e from its list, decrements l.len
func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
return e.Value
}
// move moves e to next to at.
func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
if e == at {
return
}
e.prev.next = e.next
e.next.prev = e.prev
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
l.lazyInit()
return l.insertValue(k, v, time.Time{}, &l.root)
}
// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
l.lazyInit()
return l.insertValue(k, v, expiresAt, &l.root)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, &l.root)
}
+250
View File
@@ -0,0 +1,250 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, V]
evictedKeys []K
evictedVals []V
onEvictedCB func(k K, v V)
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New[K comparable, V any](size int) (*Cache[K, V], error) {
return NewWithEvict[K, V](size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
// create a cache with default settings
c = &Cache[K, V]{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache[K, V]) initEvictBuffers() {
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache[K, V]) onEvicted(k K, v V) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache[K, V]) Purge() {
var ks []K
var vs []V
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
var k K
var v V
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache[K, V]) Contains(key K) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
var k K
var v V
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
var k K
var v V
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache[K, V]) Remove(key K) (present bool) {
var k K
var v V
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
var ks []K
var vs []V
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
var k K
var v V
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache[K, V]) Keys() []K {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *Cache[K, V]) Values() []V {
c.lock.RLock()
values := c.lru.Values()
c.lock.RUnlock()
return values
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}
+29
View File
@@ -0,0 +1,29 @@
This license applies to simplelru/list.go
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+177
View File
@@ -0,0 +1,177 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package simplelru
import (
"errors"
"github.com/hashicorp/golang-lru/v2/internal"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback[K comparable, V any] func(key K, value V)
// LRU implements a non-thread safe fixed size LRU cache
type LRU[K comparable, V any] struct {
size int
evictList *internal.LruList[K, V]
items map[K]*internal.Entry[K, V]
onEvict EvictCallback[K, V]
}
// NewLRU constructs an LRU of the given size
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) {
if size <= 0 {
return nil, errors.New("must provide a positive size")
}
c := &LRU[K, V]{
size: size,
evictList: internal.NewList[K, V](),
items: make(map[K]*internal.Entry[K, V]),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU[K, V]) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value = value
return false
}
// Add new item
ent := c.evictList.PushFront(key, value)
c.items[key] = ent
evict := c.evictList.Length() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
return ent.Value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU[K, V]) Contains(key K) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
var ent *internal.Entry[K, V]
if ent, ok = c.items[key]; ok {
return ent.Value, true
}
return
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU[K, V]) Remove(key K) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
return ent.Key, ent.Value, true
}
return
}
// GetOldest returns the oldest entry
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
if ent := c.evictList.Back(); ent != nil {
return ent.Key, ent.Value, true
}
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU[K, V]) Keys() []K {
keys := make([]K, c.evictList.Length())
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
keys[i] = ent.Key
i++
}
return keys
}
// Values returns a slice of the values in the cache, from oldest to newest.
func (c *LRU[K, V]) Values() []V {
values := make([]V, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
values[i] = ent.Value
i++
}
return values
}
// Len returns the number of items in the cache.
func (c *LRU[K, V]) Len() int {
return c.evictList.Length()
}
// Resize changes the cache size.
func (c *LRU[K, V]) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache.
func (c *LRU[K, V]) removeOldest() {
if ent := c.evictList.Back(); ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) {
c.evictList.Remove(e)
delete(c.items, e.Key)
if c.onEvict != nil {
c.onEvict(e.Key, e.Value)
}
}
+46
View File
@@ -0,0 +1,46 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Package simplelru provides simple LRU implementation based on build-in container/list.
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache[K comparable, V any] interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key K, value V) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key K) (value V, ok bool)
// Checks if a key exists in cache without updating the recent-ness.
Contains(key K) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key K) (value V, ok bool)
// Removes a key from the cache.
Remove(key K) bool
// Removes the oldest entry from cache.
RemoveOldest() (K, V, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (K, V, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []K
// Values returns a slice of the values in the cache, from oldest to newest.
Values() []V
// Returns the number of items in the cache.
Len() int
// Clears all cache entries.
Purge()
// Resizes cache, returning number evicted
Resize(int) int
}
+1 -1
View File
@@ -128,7 +128,7 @@ func update(crc uint64, p []byte) uint64 {
if hasAsm512 && runs >= 8 {
// Use 512-bit wide instructions for >= 1KB.
crc = updateAsm512(crc, p[:128*runs])
} else {
} else if runs > 0 {
crc = updateAsm(crc, p[:128*runs])
}
return update(crc, p[128*runs:])
+25 -23
View File
@@ -15,18 +15,18 @@ TEXT ·updateAsm(SB), $0-40
CMPQ CX, $1
JLT skip128
VMOVDQA 0x00(SI), X0
VMOVDQA 0x10(SI), X1
VMOVDQA 0x20(SI), X2
VMOVDQA 0x30(SI), X3
VMOVDQA 0x40(SI), X4
VMOVDQA 0x50(SI), X5
VMOVDQA 0x60(SI), X6
VMOVDQA 0x70(SI), X7
MOVQ AX, X8
PXOR X8, X0
CMPQ CX, $1
JE tail128
MOVOA 0x00(SI), X0
MOVOA 0x10(SI), X1
MOVOA 0x20(SI), X2
MOVOA 0x30(SI), X3
MOVOA 0x40(SI), X4
MOVOA 0x50(SI), X5
MOVOA 0x60(SI), X6
MOVOA 0x70(SI), X7
MOVQ AX, X8
PXOR X8, X0
CMPQ CX, $1
JE tail128
MOVQ $0xa1ca681e733f9c40, AX
MOVQ AX, X8
@@ -36,42 +36,42 @@ TEXT ·updateAsm(SB), $0-40
loop128:
ADDQ $128, SI
SUBQ $1, CX
VMOVDQA X0, X10
MOVOA X0, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X0
PXOR X10, X0
PXOR 0(SI), X0
VMOVDQA X1, X10
MOVOA X1, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X1
PXOR X10, X1
PXOR 0x10(SI), X1
VMOVDQA X2, X10
MOVOA X2, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X2
PXOR X10, X2
PXOR 0x20(SI), X2
VMOVDQA X3, X10
MOVOA X3, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X3
PXOR X10, X3
PXOR 0x30(SI), X3
VMOVDQA X4, X10
MOVOA X4, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X4
PXOR X10, X4
PXOR 0x40(SI), X4
VMOVDQA X5, X10
MOVOA X5, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X5
PXOR X10, X5
PXOR 0x50(SI), X5
VMOVDQA X6, X10
MOVOA X6, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X6
PXOR X10, X6
PXOR 0x60(SI), X6
VMOVDQA X7, X10
MOVOA X7, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X7
PXOR X10, X7
@@ -202,15 +202,17 @@ TEXT ·updateAsm512(SB), $0-40
PCALIGN $16
loop128:
VMOVDQU64 0x80(SI), Z1
VMOVDQU64 0xc0(SI), Z5
ADDQ $128, SI
PREFETCHT0 512(SI)
VMOVDQU64 0x80(SI), Z1
VMOVDQU64 0xc0(SI), Z5
ADDQ $128, SI
SUBQ $1, CX
VPCLMULQDQ $0x00, Z8, Z0, Z10
VPCLMULQDQ $0x11, Z9, Z0, Z0
VPTERNLOGD $0x96, Z1, Z10, Z0 // Combine results with xor into Z0
PREFETCHT0 512-64(SI)
VPCLMULQDQ $0x00, Z8, Z4, Z10
VPCLMULQDQ $0x11, Z9, Z4, Z4
VPTERNLOGD $0x96, Z5, Z10, Z4 // Combine results with xor into Z4
+5 -6
View File
@@ -29,17 +29,16 @@ SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABL
### Linting and Code Quality
```bash
# Run all checks (lint, vet, test, examples, functional tests)
# Run all checks (lint, test, examples, functional tests)
make checks
# Run linter only
# Run linter only (includes govet, staticcheck, and other linters)
make lint
# Run vet and staticcheck
make vet
# Alternative: run golangci-lint directly
# Run golangci-lint directly
golangci-lint run --timeout=5m --config ./.golangci.yml
# Note: 'make vet' is now an alias for 'make lint' for backwards compatibility
```
### Building Examples
+2 -5
View File
@@ -5,7 +5,7 @@ all: checks
.PHONY: examples docs
checks: lint vet test examples functional-test
checks: lint test examples functional-test
lint:
@mkdir -p ${GOPATH}/bin
@@ -14,10 +14,7 @@ lint:
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
vet:
@GO111MODULE=on go vet ./...
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
vet: lint
test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
+9
View File
@@ -45,6 +45,7 @@ type AppendObjectOptions struct {
customHeaders http.Header
checksumType ChecksumType
offset int64
}
// Header returns the custom header for AppendObject API
@@ -61,6 +62,7 @@ func (opts *AppendObjectOptions) setWriteOffset(offset int64) {
opts.customHeaders = make(http.Header)
}
opts.customHeaders["x-amz-write-offset-bytes"] = []string{strconv.FormatInt(offset, 10)}
opts.offset = offset
}
func (opts *AppendObjectOptions) setChecksumParams(info ObjectInfo) {
@@ -149,10 +151,16 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri
// When AppendObject() is used, S3 Express will return final object size as x-amz-object-size
if amzSize := h.Get("x-amz-object-size"); amzSize != "" {
oSize := size
size, err = strconv.ParseInt(amzSize, 10, 64)
if err != nil {
return UploadInfo{}, err
}
if size != opts.offset+oSize {
return UploadInfo{}, errors.New("server returned incorrect object size")
}
} else {
return UploadInfo{}, errors.New("server does not support appends. Object has been overwritten")
}
return UploadInfo{
@@ -172,6 +180,7 @@ func (c *Client) appendObjectDo(ctx context.Context, bucketName, objectName stri
}
// AppendObject - S3 Express Zone https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html
// Note that appending on a server without append support may overwrite the object.
func (c *Client) AppendObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts AppendObjectOptions,
) (info UploadInfo, err error) {
+1 -1
View File
@@ -25,7 +25,7 @@ import (
"strings"
"github.com/minio/minio-go/v7/pkg/s3utils"
"gopkg.in/yaml.v3"
"go.yaml.in/yaml/v3"
)
// QOSConfigVersionCurrent is the current version of the QoS configuration.
+18 -13
View File
@@ -82,6 +82,9 @@ type CopyDestOptions struct {
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
// PartSize specifies the part size for multipart copy operations.
// If not specified, defaults to maxPartSize (5 GiB).
PartSize uint64
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
@@ -460,15 +463,15 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// Is data to copy too large?
totalSize += srcCopySize
if totalSize > maxMultipartPutObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
if totalSize > maxObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5GiB * 10000)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
totalParts += partsRequired(srcCopySize)
totalParts += partsRequired(srcCopySize, int64(dst.PartSize))
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
@@ -534,7 +537,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src, int64(dst.PartSize))
for j, start := range startIdx {
end := endIdx[j]
@@ -568,12 +571,14 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
return uploadInfo, nil
}
// partsRequired is maximum parts possible with
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
r := size / int64(maxPartSize)
if size%int64(maxPartSize) > 0 {
// partsRequired calculates the number of parts needed for a given size
// using the specified part size. If partSize is 0, defaults to maxPartSize (5 GiB).
func partsRequired(size int64, partSize int64) int64 {
if partSize == 0 {
partSize = maxPartSize
}
r := size / partSize
if size%partSize > 0 {
r++
}
return r
@@ -582,13 +587,13 @@ func partsRequired(size int64) int64 {
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
// it is not the last part. If partSize is 0, defaults to maxPartSize (5 GiB).
func calculateEvenSplits(size int64, src CopySrcOptions, partSize int64) (startIndex, endIndex []int64) {
if size == 0 {
return startIndex, endIndex
}
reqParts := partsRequired(size)
reqParts := partsRequired(size, partSize)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
+1 -1
View File
@@ -226,7 +226,7 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
ChecksumCRC64NVME string
ChecksumMode string
ChecksumMode string `xml:"ChecksumType"`
Internal *struct {
K int // Data blocks
+1 -1
View File
@@ -129,7 +129,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Server: resp.Header.Get("Server"),
}
_, success := successStatus[resp.StatusCode]
success := successStatus.Contains(resp.StatusCode)
errBody, err := xmlDecodeAndBody(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
+6 -4
View File
@@ -67,9 +67,11 @@ func isReadAt(reader io.Reader) (ok bool) {
//
// maxPartsCount - 10000
// minPartSize - 16MiB
// maxMultipartPutObjectSize - 5TiB
// maxObjectSize - ~48.83TiB (maxPartSize * maxPartsCount)
func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) {
// object size is '-1' set it to 5TiB.
// When object size is unknown (-1), default to 5TiB to limit memory usage.
// This results in ~537MiB part sizes. For larger objects (up to ~48.83TiB),
// callers should set configuredPartSize explicitly to control memory usage.
var unknownSize bool
if objectSize == -1 {
unknownSize = true
@@ -77,8 +79,8 @@ func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCou
}
// object size is larger than supported maximum.
if objectSize > maxMultipartPutObjectSize {
err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
if objectSize > maxObjectSize {
err = errEntityTooLarge(objectSize, maxObjectSize, "", "")
return totalPartsCount, partSize, lastPartSize, err
}
+5 -3
View File
@@ -311,7 +311,9 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
// be uploaded through this operation will be 5TiB by default.
// For larger objects (up to ~48.83TiB), set PutObjectOptions.PartSize
// to control memory usage and enable uploads beyond 5TiB.
//
// WARNING: Passing down '-1' will use memory and these cannot
// be reused for best outcomes for PutObject(), pass the size always.
@@ -330,8 +332,8 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
if size > int64(maxObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxObjectSize, bucketName, objectName)
}
if opts.Checksum.IsSet() {
+80 -3
View File
@@ -30,6 +30,14 @@ import (
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// useMultiDeleteForBulkDelete returns true if the client should use
// multi-object delete API for bulk delete operations. Returns false
// for endpoints that do not support multi-object delete (e.g., GCS).
func (c *Client) useMultiDeleteForBulkDelete() bool {
// NOTE: GCS does not support multi-object delete API.
return !s3utils.IsGoogleEndpoint(*c.endpointURL)
}
//revive:disable
// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions.
@@ -411,6 +419,12 @@ func hasInvalidXMLChar(str string) bool {
// Generate and call MultiDelete S3 requests based on entries received from the iterator.
func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
// NOTE: GCS does not support multi-object delete, use single DELETE requests.
if !c.useMultiDeleteForBulkDelete() {
c.removeObjectsSingleIter(ctx, bucketName, objectsIter, yield, opts)
return
}
maxEntries := 1000
urlValues := make(url.Values)
urlValues.Set("delete", "")
@@ -549,14 +563,20 @@ func (c *Client) removeObjectsIter(ctx context.Context, bucketName string, objec
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
// Close result channel when delete finishes.
defer close(resultCh)
// NOTE: GCS does not support multi-object delete, use single DELETE requests.
if !c.useMultiDeleteForBulkDelete() {
c.removeObjectsSingle(ctx, bucketName, objectsCh, resultCh, opts)
return
}
maxEntries := 1000
finish := false
urlValues := make(url.Values)
urlValues.Set("delete", "")
// Close result channel when Multi delete finishes.
defer close(resultCh)
// Loop over entries by 1000 and call MultiDelete requests
for !finish {
count := 0
@@ -640,6 +660,63 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
}
}
// removeObjectsSingle deletes objects one by one using single DELETE requests.
// This is used for endpoints that do not support multi-object delete (e.g., GCS).
func (c *Client) removeObjectsSingle(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
for {
select {
case <-ctx.Done():
return
case object, ok := <-objectsCh:
if !ok {
return
}
removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
VersionID: object.VersionID,
GovernanceBypass: opts.GovernanceBypass,
})
if err := removeResult.Err; err != nil {
// Version/object does not exist is not an error, ignore and continue.
switch ToErrorResponse(err).Code {
case NoSuchVersion, NoSuchKey:
continue
}
}
select {
case <-ctx.Done():
return
case resultCh <- removeResult:
}
}
}
}
// removeObjectsSingleIter deletes objects one by one using single DELETE requests.
// This is used for endpoints that do not support multi-object delete (e.g., GCS).
func (c *Client) removeObjectsSingleIter(ctx context.Context, bucketName string, objectsIter iter.Seq[ObjectInfo], yield func(RemoveObjectResult) bool, opts RemoveObjectsOptions) {
for object := range objectsIter {
select {
case <-ctx.Done():
return
default:
}
removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
VersionID: object.VersionID,
GovernanceBypass: opts.GovernanceBypass,
})
if err := removeResult.Err; err != nil {
// Version/object does not exist is not an error, ignore and continue.
switch ToErrorResponse(err).Code {
case NoSuchVersion, NoSuchKey:
continue
}
}
if !yield(removeResult) {
return
}
}
}
// RemoveIncompleteUpload aborts an partially uploaded object.
func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error {
// Input validation.
+8 -7
View File
@@ -43,6 +43,7 @@ import (
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/kvcache"
"github.com/minio/minio-go/v7/pkg/s3utils"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio-go/v7/pkg/singleflight"
"golang.org/x/net/publicsuffix"
@@ -160,7 +161,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.96"
libraryVersion = "v7.0.98"
)
// User Agent should always following the below style.
@@ -636,11 +637,11 @@ func (c *Client) do(req *http.Request) (resp *http.Response, err error) {
}
// List of success status.
var successStatus = map[int]struct{}{
http.StatusOK: {},
http.StatusNoContent: {},
http.StatusPartialContent: {},
}
var successStatus = set.CreateIntSet(
http.StatusOK,
http.StatusNoContent,
http.StatusPartialContent,
)
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
@@ -722,7 +723,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
return nil, err
}
_, success := successStatus[res.StatusCode]
success := successStatus.Contains(res.StatusCode)
if success && !metadata.expect200OKWithError {
// We do not expect 2xx to return an error return.
return res, nil
+4
View File
@@ -42,6 +42,10 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// maxObjectSize - maximum size of an object calculated from
// maxPartSize * maxPartsCount = 5GiB * 10000 = ~48.83TiB
const maxObjectSize = maxPartSize * maxPartsCount
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
+96 -1
View File
@@ -1,5 +1,4 @@
//go:build mint
// +build mint
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
@@ -1965,6 +1964,101 @@ func testObjectTaggingWithVersioning() {
logSuccess(testName, function, args, startTime)
}
func testPutObjectWithAutoChecksums() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
function := "PutObject(bucketName, objectName, reader, size, opts)"
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
"opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
}
if !isFullMode() {
logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
return
}
c, err := NewClient(ClientConfig{TrailingHeaders: true})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
return
}
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
args["bucketName"] = bucketName
// Make a new bucket.
err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
if err != nil {
logError(testName, function, args, startTime, "", "Make bucket failed", err)
return
}
defer cleanupBucket(bucketName, c)
const testfile = "datafile-1.03-MB"
bufSize := dataFileMap[testfile]
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
c.TraceOn(os.Stdout)
cmpChecksum := func(got, want string) {
if want != got {
logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
return
}
}
meta := map[string]string{}
reader := getDataReader(testfile)
b, err := io.ReadAll(reader)
if err != nil {
logError(testName, function, args, startTime, "", "Read failed", err)
return
}
h := minio.ChecksumCRC64NVME.Hasher()
h.Reset()
h.Write(b)
// Upload the data without explicit checksum.
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
DisableContentSha256: false,
UserMetadata: meta,
AutoChecksum: minio.ChecksumNone,
Checksum: minio.ChecksumNone,
})
_ = resp
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
// Read the metadata back
gopts := minio.GetObjectOptions{Checksum: true}
st, err := c.StatObject(context.Background(), bucketName, objectName, gopts)
if err != nil {
logError(testName, function, args, startTime, "", "GetObject failed", err)
return
}
if st.ChecksumCRC64NVME != "" {
meta[minio.ChecksumCRC64NVME.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
cmpChecksum(st.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"])
if st.ChecksumMode != minio.ChecksumFullObjectMode.String() {
logError(testName, function, args, startTime, "", "Checksum mode is not full object", fmt.Errorf("got %s, want %s", st.ChecksumMode, minio.ChecksumFullObjectMode.String()))
}
}
if st.Size != int64(bufSize) {
logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
return
}
logSuccess(testName, function, args, startTime)
}
// Test PutObject with custom checksums.
func testPutObjectWithChecksums() {
// initialize logging params
@@ -14686,6 +14780,7 @@ func main() {
testPutObjectMetadataNonUSASCIIV2()
testPutObjectNoLengthV2()
testPutObjectsUnknownV2()
testPutObjectWithAutoChecksums()
testGetObjectContextV2()
testFPutObjectContextV2()
testFGetObjectContextV2()
-1
View File
@@ -1,5 +1,4 @@
//go:build !fips
// +build !fips
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
-1
View File
@@ -1,5 +1,4 @@
//go:build fips
// +build fips
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
+4
View File
@@ -267,6 +267,10 @@ func (f Filter) MarshalJSON() ([]byte, error) {
// MarshalXML - produces the xml representation of the Filter struct
// only one of Prefix, And and Tag should be present in the output.
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if f.IsNull() {
return nil
}
if err := e.EncodeToken(start); err != nil {
return err
}
+127
View File
@@ -0,0 +1,127 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package set
import (
"encoding/json"
"fmt"
)
// IntSet - uses map as set of ints.
// This is now implemented using the generic Set[int] type.
type IntSet Set[int]
// ToSlice - returns IntSet as int slice.
func (set IntSet) ToSlice() []int {
return ToSliceOrdered(Set[int](set))
}
// IsEmpty - returns whether the set is empty or not.
func (set IntSet) IsEmpty() bool {
return Set[int](set).IsEmpty()
}
// Add - adds int to the set.
func (set IntSet) Add(i int) {
Set[int](set).Add(i)
}
// Remove - removes int in the set. It does nothing if int does not exist in the set.
func (set IntSet) Remove(i int) {
Set[int](set).Remove(i)
}
// Contains - checks if int is in the set.
func (set IntSet) Contains(i int) bool {
return Set[int](set).Contains(i)
}
// FuncMatch - returns new set containing each value who passes match function.
// A 'matchFn' should accept element in a set as first argument and
// 'matchInt' as second argument. The function can do any logic to
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set IntSet) FuncMatch(matchFn func(int, int) bool, matchInt int) IntSet {
return IntSet(Set[int](set).FuncMatch(matchFn, matchInt))
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
// A 'applyFn' should accept element in a set as a argument and return
// a processed int. The function can do any logic to return a processed
// int.
func (set IntSet) ApplyFunc(applyFn func(int) int) IntSet {
return IntSet(Set[int](set).ApplyFunc(applyFn))
}
// Equals - checks whether given set is equal to current set or not.
func (set IntSet) Equals(iset IntSet) bool {
return Set[int](set).Equals(Set[int](iset))
}
// Intersection - returns the intersection with given set as new set.
func (set IntSet) Intersection(iset IntSet) IntSet {
return IntSet(Set[int](set).Intersection(Set[int](iset)))
}
// Difference - returns the difference with given set as new set.
func (set IntSet) Difference(iset IntSet) IntSet {
return IntSet(Set[int](set).Difference(Set[int](iset)))
}
// Union - returns the union with given set as new set.
func (set IntSet) Union(iset IntSet) IntSet {
return IntSet(Set[int](set).Union(Set[int](iset)))
}
// MarshalJSON - converts to JSON data.
func (set IntSet) MarshalJSON() ([]byte, error) {
return json.Marshal(set.ToSlice())
}
// UnmarshalJSON - parses JSON data and creates new set with it.
func (set *IntSet) UnmarshalJSON(data []byte) error {
sl := []int{}
var err error
if err = json.Unmarshal(data, &sl); err == nil {
*set = make(IntSet)
for _, i := range sl {
set.Add(i)
}
}
return err
}
// String - returns printable string of the set.
func (set IntSet) String() string {
return fmt.Sprintf("%v", set.ToSlice())
}
// NewIntSet - creates new int set.
func NewIntSet() IntSet {
return IntSet(New[int]())
}
// CreateIntSet - creates new int set with given int values.
func CreateIntSet(il ...int) IntSet {
return IntSet(Create(il...))
}
// CopyIntSet - returns copy of given set.
func CopyIntSet(set IntSet) IntSet {
return IntSet(Copy(Set[int](set)))
}
+68 -86
View File
@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2025 MinIO, Inc.
* Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,119 +17,47 @@
package set
import "github.com/tinylib/msgp/msgp"
import (
"github.com/tinylib/msgp/msgp"
"github.com/tinylib/msgp/msgp/setof"
)
// EncodeMsg encodes the message to the writer.
// Values are stored as a slice of strings or nil.
func (s StringSet) EncodeMsg(writer *msgp.Writer) error {
if s == nil {
return writer.WriteNil()
}
err := writer.WriteArrayHeader(uint32(len(s)))
if err != nil {
return err
}
sorted := s.ToByteSlices()
for _, k := range sorted {
err = writer.WriteStringFromBytes(k)
if err != nil {
return err
}
}
return nil
return setof.StringSorted(s).EncodeMsg(writer)
}
// MarshalMsg encodes the message to the bytes.
// Values are stored as a slice of strings or nil.
func (s StringSet) MarshalMsg(bytes []byte) ([]byte, error) {
if s == nil {
return msgp.AppendNil(bytes), nil
}
if len(s) == 0 {
return msgp.AppendArrayHeader(bytes, 0), nil
}
bytes = msgp.AppendArrayHeader(bytes, uint32(len(s)))
sorted := s.ToByteSlices()
for _, k := range sorted {
bytes = msgp.AppendStringFromBytes(bytes, k)
}
return bytes, nil
return setof.StringSorted(s).MarshalMsg(bytes)
}
// DecodeMsg decodes the message from the reader.
func (s *StringSet) DecodeMsg(reader *msgp.Reader) error {
if reader.IsNil() {
*s = nil
return reader.Skip()
}
sz, err := reader.ReadArrayHeader()
if err != nil {
var ss setof.String
if err := ss.DecodeMsg(reader); err != nil {
return err
}
dst := *s
if dst == nil {
dst = make(StringSet, sz)
} else {
for k := range dst {
delete(dst, k)
}
}
for i := uint32(0); i < sz; i++ {
var k string
k, err = reader.ReadString()
if err != nil {
return err
}
dst[k] = struct{}{}
}
*s = dst
*s = StringSet(ss)
return nil
}
// UnmarshalMsg decodes the message from the bytes.
func (s *StringSet) UnmarshalMsg(bytes []byte) ([]byte, error) {
if msgp.IsNil(bytes) {
*s = nil
return bytes[msgp.NilSize:], nil
}
// Read the array header
sz, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
var ss setof.String
bytes, err := ss.UnmarshalMsg(bytes)
if err != nil {
return nil, err
}
dst := *s
if dst == nil {
dst = make(StringSet, sz)
} else {
for k := range dst {
delete(dst, k)
}
}
for i := uint32(0); i < sz; i++ {
var k string
k, bytes, err = msgp.ReadStringBytes(bytes)
if err != nil {
return nil, err
}
dst[k] = struct{}{}
}
*s = dst
*s = StringSet(ss)
return bytes, nil
}
// Msgsize returns the maximum size of the message.
func (s StringSet) Msgsize() int {
if s == nil {
return msgp.NilSize
}
if len(s) == 0 {
return msgp.ArrayHeaderSize
}
size := msgp.ArrayHeaderSize
for key := range s {
size += msgp.StringPrefixSize + len(key)
}
return size
return setof.String(s).Msgsize()
}
// MarshalBinary encodes the receiver into a binary form and returns the result.
@@ -147,3 +75,57 @@ func (s *StringSet) UnmarshalBinary(b []byte) error {
_, err := s.UnmarshalMsg(b)
return err
}
// EncodeMsg encodes the message to the writer.
// Values are stored as a slice of ints or nil.
func (s IntSet) EncodeMsg(writer *msgp.Writer) error {
return setof.IntSorted(s).EncodeMsg(writer)
}
// MarshalMsg encodes the message to the bytes.
// Values are stored as a slice of ints or nil.
func (s IntSet) MarshalMsg(bytes []byte) ([]byte, error) {
return setof.IntSorted(s).MarshalMsg(bytes)
}
// DecodeMsg decodes the message from the reader.
func (s *IntSet) DecodeMsg(reader *msgp.Reader) error {
var is setof.Int
if err := is.DecodeMsg(reader); err != nil {
return err
}
*s = IntSet(is)
return nil
}
// UnmarshalMsg decodes the message from the bytes.
func (s *IntSet) UnmarshalMsg(bytes []byte) ([]byte, error) {
var is setof.Int
bytes, err := is.UnmarshalMsg(bytes)
if err != nil {
return nil, err
}
*s = IntSet(is)
return bytes, nil
}
// Msgsize returns the maximum size of the message.
func (s IntSet) Msgsize() int {
return setof.Int(s).Msgsize()
}
// MarshalBinary encodes the receiver into a binary form and returns the result.
func (s IntSet) MarshalBinary() ([]byte, error) {
return s.MarshalMsg(nil)
}
// AppendBinary appends the binary representation of itself to the end of b
func (s IntSet) AppendBinary(b []byte) ([]byte, error) {
return s.MarshalMsg(b)
}
// UnmarshalBinary decodes the binary representation of itself from b
func (s *IntSet) UnmarshalBinary(b []byte) error {
_, err := s.UnmarshalMsg(b)
return err
}
+190
View File
@@ -0,0 +1,190 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package set
import (
"cmp"
"slices"
)
// Set - uses map as a set of comparable elements.
//
// Important Caveats:
// - Sets are unordered by nature. Map iteration order is non-deterministic in Go.
// - When converting to slices, use ToSlice() with a comparison function or
// ToSliceOrdered() for ordered types to get deterministic, sorted results.
// - Comparison functions must provide total ordering: if your comparison returns 0
// for different elements, their relative order in the result is undefined.
// - For deterministic ordering when elements may compare equal, use secondary
// sort criteria (e.g., sort by length first, then alphabetically for ties).
type Set[T comparable] map[T]struct{}
// ToSlice - returns Set as a slice sorted using the provided comparison function.
// If cmpFn is nil, the slice order is undefined (non-deterministic).
//
// Important: The comparison function should provide total ordering. If it returns 0
// for elements that are not identical, their relative order in the result is undefined.
// For deterministic results, use secondary sort criteria for tie-breaking.
func (set Set[T]) ToSlice(cmpFn func(a, b T) int) []T {
keys := make([]T, 0, len(set))
for k := range set {
keys = append(keys, k)
}
if cmpFn != nil {
slices.SortFunc(keys, cmpFn)
}
return keys
}
// ToSliceOrdered - returns Set as a sorted slice for ordered types.
// This is a convenience method for types that implement cmp.Ordered.
// The result is deterministic and always sorted in ascending order.
func ToSliceOrdered[T cmp.Ordered](set Set[T]) []T {
keys := make([]T, 0, len(set))
for k := range set {
keys = append(keys, k)
}
slices.Sort(keys)
return keys
}
// IsEmpty - returns whether the set is empty or not.
func (set Set[T]) IsEmpty() bool {
return len(set) == 0
}
// Add - adds element to the set.
func (set Set[T]) Add(s T) {
set[s] = struct{}{}
}
// Remove - removes element from the set. It does nothing if element does not exist in the set.
func (set Set[T]) Remove(s T) {
delete(set, s)
}
// Contains - checks if element is in the set.
func (set Set[T]) Contains(s T) bool {
_, ok := set[s]
return ok
}
// FuncMatch - returns new set containing each value that passes match function.
// A 'matchFn' should accept element in a set as first argument and
// 'matchValue' as second argument. The function can do any logic to
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set Set[T]) FuncMatch(matchFn func(T, T) bool, matchValue T) Set[T] {
nset := New[T]()
for k := range set {
if matchFn(k, matchValue) {
nset.Add(k)
}
}
return nset
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
// A 'applyFn' should accept element in a set as an argument and return
// a processed value. The function can do any logic to return a processed value.
func (set Set[T]) ApplyFunc(applyFn func(T) T) Set[T] {
nset := New[T]()
for k := range set {
nset.Add(applyFn(k))
}
return nset
}
// Equals - checks whether given set is equal to current set or not.
func (set Set[T]) Equals(sset Set[T]) bool {
// If length of set is not equal to length of given set, the
// set is not equal to given set.
if len(set) != len(sset) {
return false
}
// As both sets are equal in length, check each elements are equal.
for k := range set {
if _, ok := sset[k]; !ok {
return false
}
}
return true
}
// Intersection - returns the intersection with given set as new set.
func (set Set[T]) Intersection(sset Set[T]) Set[T] {
nset := New[T]()
for k := range set {
if _, ok := sset[k]; ok {
nset.Add(k)
}
}
return nset
}
// Difference - returns the difference with given set as new set.
func (set Set[T]) Difference(sset Set[T]) Set[T] {
nset := New[T]()
for k := range set {
if _, ok := sset[k]; !ok {
nset.Add(k)
}
}
return nset
}
// Union - returns the union with given set as new set.
func (set Set[T]) Union(sset Set[T]) Set[T] {
nset := New[T]()
for k := range set {
nset.Add(k)
}
for k := range sset {
nset.Add(k)
}
return nset
}
// New - creates new set.
func New[T comparable]() Set[T] {
return make(Set[T])
}
// Create - creates new set with given values.
func Create[T comparable](sl ...T) Set[T] {
set := make(Set[T], len(sl))
for _, k := range sl {
set.Add(k)
}
return set
}
// Copy - returns copy of given set.
func Copy[T comparable](set Set[T]) Set[T] {
nset := make(Set[T], len(set))
for k, v := range set {
nset[k] = v
}
return nset
}
+17 -76
View File
@@ -1,6 +1,6 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
* Copyright 2015-2026 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,16 +24,12 @@ import (
)
// StringSet - uses map as set of strings.
type StringSet map[string]struct{}
// This is now implemented using the generic Set[string] type.
type StringSet Set[string]
// ToSlice - returns StringSet as string slice.
func (set StringSet) ToSlice() []string {
keys := make([]string, 0, len(set))
for k := range set {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
return ToSliceOrdered(Set[string](set))
}
// ToByteSlices - returns StringSet as a sorted
@@ -62,23 +58,22 @@ func (set StringSet) ToByteSlices() [][]byte {
// IsEmpty - returns whether the set is empty or not.
func (set StringSet) IsEmpty() bool {
return len(set) == 0
return Set[string](set).IsEmpty()
}
// Add - adds string to the set.
func (set StringSet) Add(s string) {
set[s] = struct{}{}
Set[string](set).Add(s)
}
// Remove - removes string in the set. It does nothing if string does not exist in the set.
func (set StringSet) Remove(s string) {
delete(set, s)
Set[string](set).Remove(s)
}
// Contains - checks if string is in the set.
func (set StringSet) Contains(s string) bool {
_, ok := set[s]
return ok
return Set[string](set).Contains(s)
}
// FuncMatch - returns new set containing each value who passes match function.
@@ -87,13 +82,7 @@ func (set StringSet) Contains(s string) bool {
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
nset := NewStringSet()
for k := range set {
if matchFn(k, matchString) {
nset.Add(k)
}
}
return nset
return StringSet(Set[string](set).FuncMatch(matchFn, matchString))
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
@@ -101,67 +90,27 @@ func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString st
// a processed string. The function can do any logic to return a processed
// string.
func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(applyFn(k))
}
return nset
return StringSet(Set[string](set).ApplyFunc(applyFn))
}
// Equals - checks whether given set is equal to current set or not.
func (set StringSet) Equals(sset StringSet) bool {
// If length of set is not equal to length of given set, the
// set is not equal to given set.
if len(set) != len(sset) {
return false
}
// As both sets are equal in length, check each elements are equal.
for k := range set {
if _, ok := sset[k]; !ok {
return false
}
}
return true
return Set[string](set).Equals(Set[string](sset))
}
// Intersection - returns the intersection with given set as new set.
func (set StringSet) Intersection(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; ok {
nset.Add(k)
}
}
return nset
return StringSet(Set[string](set).Intersection(Set[string](sset)))
}
// Difference - returns the difference with given set as new set.
func (set StringSet) Difference(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; !ok {
nset.Add(k)
}
}
return nset
return StringSet(Set[string](set).Difference(Set[string](sset)))
}
// Union - returns the union with given set as new set.
func (set StringSet) Union(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(k)
}
for k := range sset {
nset.Add(k)
}
return nset
return StringSet(Set[string](set).Union(Set[string](sset)))
}
// MarshalJSON - converts to JSON data.
@@ -196,23 +145,15 @@ func (set StringSet) String() string {
// NewStringSet - creates new string set.
func NewStringSet() StringSet {
return make(StringSet)
return StringSet(New[string]())
}
// CreateStringSet - creates new string set with given string values.
func CreateStringSet(sl ...string) StringSet {
set := make(StringSet, len(sl))
for _, k := range sl {
set.Add(k)
}
return set
return StringSet(Create(sl...))
}
// CopyStringSet - returns copy of given set.
func CopyStringSet(set StringSet) StringSet {
nset := make(StringSet, len(set))
for k, v := range set {
nset[k] = v
}
return nset
return StringSet(Copy(Set[string](set)))
}
+30 -30
View File
@@ -26,6 +26,8 @@ import (
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/v7/pkg/set"
)
// MaxRetry is the maximum number of retries before stopping.
@@ -93,45 +95,43 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max
}
// List of AWS S3 error codes which are retryable.
var retryableS3Codes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
"Throttling": {},
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
"InternalError": {},
"ExpiredToken": {},
"ExpiredTokenException": {},
"SlowDown": {},
"SlowDownWrite": {},
"SlowDownRead": {},
var retryableS3Codes = set.CreateStringSet(
"RequestError",
"RequestTimeout",
"Throttling",
"ThrottlingException",
"RequestLimitExceeded",
"RequestThrottled",
"InternalError",
"ExpiredToken",
"ExpiredTokenException",
"SlowDown",
"SlowDownWrite",
"SlowDownRead",
// Add more AWS S3 codes here.
}
)
// isS3CodeRetryable - is s3 error code retryable.
func isS3CodeRetryable(s3Code string) (ok bool) {
_, ok = retryableS3Codes[s3Code]
return ok
func isS3CodeRetryable(s3Code string) bool {
return retryableS3Codes.Contains(s3Code)
}
// List of HTTP status codes which are retryable.
var retryableHTTPStatusCodes = map[int]struct{}{
http.StatusRequestTimeout: {},
429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
499: {}, // client closed request, retry. A non-standard status code introduced by nginx.
http.StatusInternalServerError: {},
http.StatusBadGateway: {},
http.StatusServiceUnavailable: {},
http.StatusGatewayTimeout: {},
520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
var retryableHTTPStatusCodes = set.CreateIntSet(
http.StatusRequestTimeout,
429, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
499, // client closed request, retry. A non-standard status code introduced by nginx.
http.StatusInternalServerError,
http.StatusBadGateway,
http.StatusServiceUnavailable,
http.StatusGatewayTimeout,
520, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected.
// Add more HTTP status codes here.
}
)
// isHTTPStatusRetryable - is HTTP error code retryable.
func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
_, ok = retryableHTTPStatusCodes[httpStatusCode]
return ok
func isHTTPStatusRetryable(httpStatusCode int) bool {
return retryableHTTPStatusCodes.Contains(httpStatusCode)
}
// For now, all http Do() requests are retriable except some well defined errors
-1
View File
@@ -1,5 +1,4 @@
//go:build go1.7 || go1.8
// +build go1.7 go1.8
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
+44 -10
View File
@@ -30,6 +30,7 @@ import (
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/google/uuid"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/opencloud-eu/reva/v2/pkg/appctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
@@ -74,6 +75,7 @@ type Lookup struct {
IDCache IDCache
IDHistoryCache IDCache
spaceRootCache *lru.Cache[string, string]
metadataBackend metadata.Backend
userMapper usermapper.Mapper
tm node.TimeManager
@@ -85,11 +87,14 @@ func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.T
idHistoryConf.Database = o.Options.IDCache.Table + "_history"
idHistoryConf.TTL = 1 * time.Minute
spaceRootCache, _ := lru.New[string, string](1000)
lu := &Lookup{
Options: o,
metadataBackend: b,
IDCache: NewStoreIDCache(o.Options.IDCache),
IDHistoryCache: NewStoreIDCache(idHistoryConf),
spaceRootCache: spaceRootCache,
userMapper: um,
tm: tm,
}
@@ -99,11 +104,17 @@ func New(b metadata.Backend, um usermapper.Mapper, o *options.Options, tm node.T
// CacheID caches the path for the given space and node id
func (lu *Lookup) CacheID(ctx context.Context, spaceID, nodeID, val string) error {
if spaceID == nodeID {
lu.spaceRootCache.Add(spaceID, val)
}
return lu.IDCache.Set(ctx, spaceID, nodeID, val)
}
// GetCachedID returns the cached path for the given space and node id
func (lu *Lookup) GetCachedID(ctx context.Context, spaceID, nodeID string) (string, bool) {
if spaceID == nodeID {
return lu.getSpaceRootPathWithStatus(ctx, spaceID)
}
return lu.IDCache.Get(ctx, spaceID, nodeID)
}
@@ -186,7 +197,7 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, "", false, nil, false)
}
// Pathify segments the beginning of a string into depth segments of width length
@@ -207,7 +218,7 @@ func Pathify(id string, depth, width int) string {
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, "", false, nil, false)
if err != nil {
return nil, err
}
@@ -283,35 +294,55 @@ func (lu *Lookup) InternalRoot() string {
return lu.Options.Root
}
func (lu *Lookup) getSpaceRootPathWithStatus(ctx context.Context, spaceID string) (string, bool) {
if val, ok := lu.spaceRootCache.Get(spaceID); ok {
return val, true
}
val, ok := lu.IDCache.Get(ctx, spaceID, spaceID)
if ok {
lu.spaceRootCache.Add(spaceID, val)
}
return val, ok
}
func (lu *Lookup) getSpaceRootPath(ctx context.Context, spaceID string) string {
val, _ := lu.getSpaceRootPathWithStatus(ctx, spaceID)
return val
}
// InternalSpaceRoot returns the internal path for a space
func (lu *Lookup) InternalSpaceRoot(spaceID string) string {
return lu.InternalPath(spaceID, spaceID)
return lu.getSpaceRootPath(context.Background(), spaceID)
}
// InternalPath returns the internal path for a given ID
func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
if strings.Contains(nodeID, node.RevisionIDDelimiter) || strings.HasSuffix(nodeID, node.CurrentIDDelimiter) {
spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
return filepath.Join(spaceRoot, MetadataDir, Pathify(nodeID, 4, 2))
}
if spaceID == nodeID {
return lu.getSpaceRootPath(context.Background(), spaceID)
}
path, _ := lu.IDCache.Get(context.Background(), spaceID, nodeID)
return path
}
// LockfilePaths returns the paths(s) to the lockfile of the node
func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
func (lu *Lookup) LockfilePaths(n *node.Node) []string {
spaceRoot := lu.getSpaceRootPath(context.Background(), n.SpaceID)
if len(spaceRoot) == 0 {
return nil
}
paths := []string{filepath.Join(spaceRoot, MetadataDir, Pathify(nodeID, 4, 2)+".lock")}
paths := []string{filepath.Join(spaceRoot, MetadataDir, Pathify(n.ID, 4, 2)+".lock")}
nodepath := lu.InternalPath(spaceID, nodeID)
nodepath := n.InternalPath()
if len(nodepath) > 0 {
paths = append(paths, nodepath+".lock")
}
@@ -321,7 +352,7 @@ func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
// VersionPath returns the path to the version of the node
func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string {
spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
@@ -331,7 +362,7 @@ func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string {
// VersionPath returns the "current" path of the node
func (lu *Lookup) CurrentPath(spaceID, nodeID string) string {
spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
spaceRoot := lu.getSpaceRootPath(context.Background(), spaceID)
if len(spaceRoot) == 0 {
return ""
}
@@ -446,6 +477,9 @@ func (lu *Lookup) PurgeNode(n *node.Node) error {
if cerr := lu.IDCache.Delete(context.Background(), n.SpaceID, n.ID); cerr != nil {
return cerr
}
if n.ID == n.SpaceID {
lu.spaceRootCache.Remove(n.SpaceID)
}
return rerr
}
@@ -360,7 +360,7 @@ func (t *Tree) getNodeForPath(path string) (*node.Node, error) {
return nil, err
}
return node.ReadNode(context.Background(), t.lookup, spaceID, nodeID, false, nil, false)
return node.ReadNode(context.Background(), t.lookup, spaceID, nodeID, path, false, nil, false)
}
func (t *Tree) findSpaceId(path string) (string, error) {
@@ -909,17 +909,24 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
}
if id != "" {
// Check if the item on the previous still exists. In this case it might have been a copy with extended attributes -> set new ID
// Check if the item on the previous path still exists. In this case it might have been a copy with extended attributes -> set new ID
isCopy := false
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id)
if ok && previousPath != path {
// this id clashes with an existing id -> re-assimilate
_, err := os.Stat(previousPath)
if err == nil {
_ = t.assimilate(scanItem{Path: path})
// previous path (using the same id) still exists -> this is a copy
isCopy = true
}
}
if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
if isCopy {
// copy detected -> re-assimilate
_ = t.assimilate(scanItem{Path: path})
} else {
// update cached id with new path
if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
}
}
}
} else if assimilate {
@@ -943,7 +950,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
t.log.Error().Err(err).Str("path", dir).Msg("could not get ids for path")
continue
}
n, err := node.ReadNode(context.Background(), t.lookup, spaceID, id, true, nil, false)
n, err := node.ReadNode(context.Background(), t.lookup, spaceID, id, dir, true, nil, false)
if err != nil {
t.log.Error().Err(err).Str("path", dir).Msg("could not read directory node")
continue
@@ -216,7 +216,7 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, nil, err
}
+1 -1
View File
@@ -529,7 +529,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
}
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, path, false, n.SpaceRoot, true)
if err != nil {
t.log.Error().Err(err).Str("path", path).Msg("failed to read node")
continue
@@ -28,7 +28,6 @@ import (
"github.com/opencloud-eu/reva/v2/pkg/appctx"
ctxpkg "github.com/opencloud-eu/reva/v2/pkg/ctx"
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
"github.com/opencloud-eu/reva/v2/pkg/sharedconf"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
@@ -119,7 +118,7 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g
}
}
if sharedconf.MultiTenantEnabled() {
if fs.o.MultiTenantEnabled {
spaceTenant, err := grantNode.SpaceRoot.XattrString(ctx, prefixes.SpaceTenantIDAttr)
if err != nil {
log.Error().Err(err).Msg("failed to read tenant id of space")
@@ -151,7 +151,7 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, "", false, nil, false)
}
// Pathify segments the beginning of a string into depth segments of width length
@@ -172,7 +172,7 @@ func Pathify(id string, depth, width int) string {
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, "", false, nil, false)
if err != nil {
return nil, err
}
@@ -274,8 +274,8 @@ func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
}
// LockfilePaths returns the paths(s) to the lockfile of the node
func (lu *Lookup) LockfilePaths(spaceID, nodeID string) []string {
return []string{lu.InternalPath(spaceID, nodeID) + ".lock"}
func (lu *Lookup) LockfilePaths(n *node.Node) []string {
return []string{n.InternalPath() + ".lock"}
}
// VersionPath returns the internal path for a version of a node
@@ -29,6 +29,7 @@ import (
"hash/adler32"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@@ -158,7 +159,7 @@ type PathLookup interface {
InternalRoot() string
InternalSpaceRoot(spaceID string) string
InternalPath(spaceID, nodeID string) string
LockfilePaths(spaceID, nodeID string) []string
LockfilePaths(n *Node) []string
VersionPath(spaceID, nodeID, version string) string
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
MetadataBackend() metadata.Backend
@@ -350,7 +351,7 @@ func (n *Node) SpaceOwnerOrManager(ctx context.Context) *userpb.UserId {
}
// ReadNode creates a new instance from an id and checks if it exists
func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID, internalPath string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {
ctx, span := tracer.Start(ctx, "ReadNode")
defer span.End()
var err error
@@ -417,6 +418,9 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
},
SpaceRoot: spaceRoot,
}
if internalPath != "" {
n.internalPath = internalPath
}
// append back revision to nodeid, even when returning a not existing node
defer func() {
@@ -506,7 +510,7 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
return nil, err
}
readNode, err := ReadNode(ctx, n.lu, spaceID, nodeID, false, n.SpaceRoot, true)
readNode, err := ReadNode(ctx, n.lu, spaceID, nodeID, filepath.Join(n.internalPath, name), false, n.SpaceRoot, true)
if err != nil {
return nil, errors.Wrap(err, "could not read child node")
}
@@ -653,7 +657,7 @@ func (n *Node) ParentPath() string {
// path to use for new locks.
// In the future only one path should remain at which point the function can return a single string.
func (n *Node) LockFilePaths() []string {
return n.lu.LockfilePaths(n.SpaceID, n.ID)
return n.lu.LockfilePaths(n)
}
// CalculateEtag returns a hash of fileid + tmtime (or mtime)
@@ -94,6 +94,8 @@ type Options struct {
DisableVersioning bool `mapstructure:"disable_versioning"`
MountID string `mapstructure:"mount_id"`
MultiTenantEnabled bool `mapstructure:"multi_tenant_enabled"`
}
// AsyncPropagatorOptions holds the configuration for the async propagator
@@ -69,7 +69,7 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], "", false, nil, false)
if err != nil {
return err
}
@@ -185,7 +185,7 @@ func (fs *Decomposedfs) getRevisionNode(ctx context.Context, ref *provider.Refer
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], false, nil, false)
n, err := node.ReadNode(ctx, fs.lu, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, err
}
@@ -107,7 +107,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr
alias = templates.WithSpacePropertiesAndUser(u, req.Type, req.Name, spaceID, fs.o.PersonalSpaceAliasTemplate)
}
root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false) // will fall into `Exists` case below
root, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false) // will fall into `Exists` case below
switch {
case err != nil:
return nil, err
@@ -312,7 +312,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
if spaceID != spaceIDAny && entry != spaceIDAny {
// try directly reading the node
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked later
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, "", true, nil, false) // permission to read disabled space is checked later
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("id", entry).Msg("could not read node")
return nil, err
@@ -449,7 +449,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
continue
}
n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, true)
n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, true)
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("id", spaceID).Msg("could not read node, skipping")
continue
@@ -519,7 +519,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
// if there are no matches (or they happened to be spaces for the owner) and the node is a child return a space
if int64(len(matches)) <= numShares.Load() && entry != spaceID {
// try node id
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
n, err := node.ReadNode(ctx, fs.lu, spaceID, entry, "", true, nil, false) // permission to read disabled space is checked in storageSpaceFromNode
if err != nil {
return nil, err
}
@@ -631,7 +631,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up
}
// check which permissions are needed
spaceNode, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false)
spaceNode, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false)
if err != nil {
return nil, err
}
@@ -733,7 +733,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
return err
}
n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, true, nil, false) // permission to read disabled space is checked later
n, err := node.ReadNode(ctx, fs.lu, spaceID, spaceID, "", true, nil, false) // permission to read disabled space is checked later
if err != nil {
return err
}
@@ -295,7 +295,7 @@ func (p AsyncPropagator) propagate(ctx context.Context, pn PropagationNode, reca
defer func() { _ = unlock() }()
_, subspan = tracer.Start(ctx, "node.ReadNode")
n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), false, nil, false)
n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), "", false, nil, false)
if err != nil {
log.Error().Err(err).
Msg("Propagation failed. Could not read node.")
@@ -214,7 +214,7 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, nil, err
}
@@ -307,7 +307,7 @@ func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, re
spaceID := ref.ResourceId.SpaceId
// check if the node is available and has not been deleted
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], false, nil, false)
n, err := node.ReadNode(ctx, tp.lookup, spaceID, kp[0], "", false, nil, false)
if err != nil {
return nil, err
}
@@ -383,7 +383,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
}
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, "", false, n.SpaceRoot, true)
if err != nil {
return err
}
@@ -889,7 +889,7 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
nodeID = strings.ReplaceAll(nodeID, "/", "")
recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false, nil, false)
recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, "", false, nil, false)
if err != nil {
return
}
@@ -170,7 +170,7 @@ func (session *DecomposedFsSession) HeaderIfUnmodifiedSince() string {
// Node returns the node for the session
func (session *DecomposedFsSession) Node(ctx context.Context) (*node.Node, error) {
return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], false, nil, true)
return node.ReadNode(ctx, session.store.lu, session.SpaceID(), session.info.Storage["NodeId"], "", false, nil, true)
}
// ID returns the upload session id
@@ -213,7 +213,7 @@ func (store DecomposedFsStore) CreateNodeForUpload(ctx context.Context, session
store.lu,
)
var err error
n.SpaceRoot, err = node.ReadNode(ctx, store.lu, session.SpaceID(), session.SpaceID(), false, nil, false)
n.SpaceRoot, err = node.ReadNode(ctx, store.lu, session.SpaceID(), session.SpaceID(), "", false, nil, false)
if err != nil {
return nil, err
}
@@ -316,7 +316,7 @@ func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *
return f.Close()
}
old, _ := node.ReadNode(ctx, store.lu, spaceID, n.ID, false, nil, false)
old, _ := node.ReadNode(ctx, store.lu, spaceID, n.ID, "", false, nil, false)
if _, err := node.CheckQuota(ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil {
return unlock, err
}
@@ -326,7 +326,7 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) {
if !isProcessing || procssingID != session.ID() {
versionID := revisionNode.ID + node.RevisionIDDelimiter + session.MTime().UTC().Format(time.RFC3339Nano)
// There should be a revision node (created by the other upload that finished before us), read it and upload our blob there.
existingRevisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, spaceRoot, false)
existingRevisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, spaceRoot, false)
if err != nil || !existingRevisionNode.Exists {
// The revision node has not been created. Likely because the file on disk was modified externally and re-assilimated (watchfs == true)
// Let's create the revision node now and upload the blob to it.
@@ -379,7 +379,7 @@ func (session *DecomposedFsSession) createRevisionNodeForUpload(ctx context.Cont
prefixes.ChecksumPrefix + "md5": md5h.Sum(nil),
prefixes.ChecksumPrefix + "adler32": adler32h.Sum(nil),
}
revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, baseNode.SpaceRoot, false)
revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, baseNode.SpaceRoot, false)
if err == nil {
mtime := session.MTime()
attrs.SetString(prefixes.BlobIDAttr, session.ID())
@@ -432,7 +432,7 @@ func (session *DecomposedFsSession) Cleanup(revertNodeMetadata, cleanBin, cleanI
if session.NodeExists() && session.info.MetaData["versionID"] != "" {
versionID := session.info.MetaData["versionID"]
sublog.Debug().Str("nodepath", n.InternalPath()).Str("versionID", versionID).Msg("restoring revision")
revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, false, n.SpaceRoot, false)
revisionNode, err := node.ReadNode(ctx, session.store.lu, session.SpaceID(), versionID, "", false, n.SpaceRoot, false)
if err != nil {
sublog.Error().Err(err).Str("versionID", versionID).Msg("reading revision node failed")
}
+7 -5
View File
@@ -78,13 +78,15 @@ func (r *revaWalker) walkRecursively(ctx context.Context, wd string, info *provi
return fn(wd, info, nil)
}
list, err := r.readDir(ctx, info.Id)
errFn := fn(wd, info, err)
if err != nil || errFn != nil {
return errFn
err := fn(wd, info, nil)
if err != nil {
return err
}
list, err := r.readDir(ctx, info.Id)
if err != nil {
return err
}
for _, file := range list {
err = r.walkRecursively(ctx, filepath.Join(wd, info.Path), file, fn)
if err != nil && (file.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER || err != filepath.SkipDir) {
@@ -10,8 +10,11 @@ func init() {
// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups
type Request struct {
Groups []string `kafka:"min=v0,max=v4"`
IncludeAuthorizedOperations bool `kafka:"min=v3,max=v4"`
// We need at least one tagged field to indicate that this is a "flexible" message
// type.
_ struct{} `kafka:"min=v5,max=v5,tag"`
Groups []string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
IncludeAuthorizedOperations bool `kafka:"min=v3,max=v5"`
}
func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
@@ -42,27 +45,36 @@ func (r *Request) Split(cluster protocol.Cluster) (
}
type Response struct {
ThrottleTimeMs int32 `kafka:"min=v1,max=v4"`
Groups []ResponseGroup `kafka:"min=v0,max=v4"`
// We need at least one tagged field to indicate that this is a "flexible" message
// type.
_ struct{} `kafka:"min=v5,max=v5,tag"`
ThrottleTimeMs int32 `kafka:"min=v1,max=v5"`
Groups []ResponseGroup `kafka:"min=v0,max=v5"`
}
type ResponseGroup struct {
ErrorCode int16 `kafka:"min=v0,max=v4"`
GroupID string `kafka:"min=v0,max=v4"`
GroupState string `kafka:"min=v0,max=v4"`
ProtocolType string `kafka:"min=v0,max=v4"`
ProtocolData string `kafka:"min=v0,max=v4"`
Members []ResponseGroupMember `kafka:"min=v0,max=v4"`
AuthorizedOperations int32 `kafka:"min=v3,max=v4"`
// We need at least one tagged field to indicate that this is a "flexible" message
// type.
_ struct{} `kafka:"min=v5,max=v5,tag"`
ErrorCode int16 `kafka:"min=v0,max=v5"`
GroupID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
GroupState string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
ProtocolType string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
ProtocolData string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
Members []ResponseGroupMember `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
AuthorizedOperations int32 `kafka:"min=v3,max=v5"`
}
type ResponseGroupMember struct {
MemberID string `kafka:"min=v0,max=v4"`
GroupInstanceID string `kafka:"min=v4,max=v4,nullable"`
ClientID string `kafka:"min=v0,max=v4"`
ClientHost string `kafka:"min=v0,max=v4"`
MemberMetadata []byte `kafka:"min=v0,max=v4"`
MemberAssignment []byte `kafka:"min=v0,max=v4"`
// We need at least one tagged field to indicate that this is a "flexible" message
// type.
_ struct{} `kafka:"min=v5,max=v5,tag"`
MemberID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
GroupInstanceID string `kafka:"min=v4,max=v4,nullable|min=v5,max=v5,compact,nullable"`
ClientID string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
ClientHost string `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
MemberMetadata []byte `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
MemberAssignment []byte `kafka:"min=v0,max=v4|min=v5,max=v5,compact"`
}
func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups }
-1
View File
@@ -1,5 +1,4 @@
//go:build linux && !appengine && !tinygo
// +build linux,!appengine,!tinygo
package msgp
-1
View File
@@ -1,5 +1,4 @@
//go:build (!linux && !tinygo && !windows) || appengine
// +build !linux,!tinygo,!windows appengine
package msgp
+159
View File
@@ -0,0 +1,159 @@
package msgp
import "strconv"
// AutoShim provides helper functions for converting between string and
// numeric types.
type AutoShim struct{}
// ParseUint converts a string to a uint.
func (a AutoShim) ParseUint(s string) (uint, error) {
v, err := strconv.ParseUint(s, 10, strconv.IntSize)
return uint(v), err
}
// ParseUint8 converts a string to a uint8.
func (a AutoShim) ParseUint8(s string) (uint8, error) {
v, err := strconv.ParseUint(s, 10, 8)
return uint8(v), err
}
// ParseUint16 converts a string to a uint16.
func (a AutoShim) ParseUint16(s string) (uint16, error) {
v, err := strconv.ParseUint(s, 10, 16)
return uint16(v), err
}
// ParseUint32 converts a string to a uint32.
func (a AutoShim) ParseUint32(s string) (uint32, error) {
v, err := strconv.ParseUint(s, 10, 32)
return uint32(v), err
}
// ParseUint64 converts a string to a uint64.
func (a AutoShim) ParseUint64(s string) (uint64, error) {
v, err := strconv.ParseUint(s, 10, 64)
return v, err
}
// ParseInt converts a string to an int.
func (a AutoShim) ParseInt(s string) (int, error) {
v, err := strconv.ParseInt(s, 10, strconv.IntSize)
return int(v), err
}
// ParseInt8 converts a string to an int8.
func (a AutoShim) ParseInt8(s string) (int8, error) {
v, err := strconv.ParseInt(s, 10, 8)
return int8(v), err
}
// ParseInt16 converts a string to an int16.
func (a AutoShim) ParseInt16(s string) (int16, error) {
v, err := strconv.ParseInt(s, 10, 16)
return int16(v), err
}
// ParseInt32 converts a string to an int32.
func (a AutoShim) ParseInt32(s string) (int32, error) {
v, err := strconv.ParseInt(s, 10, 32)
return int32(v), err
}
// ParseInt64 converts a string to an int64.
func (a AutoShim) ParseInt64(s string) (int64, error) {
v, err := strconv.ParseInt(s, 10, 64)
return v, err
}
// ParseBool converts a string to a bool.
func (a AutoShim) ParseBool(s string) (bool, error) {
return strconv.ParseBool(s)
}
// ParseFloat64 converts a string to a float64.
func (a AutoShim) ParseFloat64(s string) (float64, error) {
return strconv.ParseFloat(s, 64)
}
// ParseFloat32 converts a string to a float32.
func (a AutoShim) ParseFloat32(s string) (float32, error) {
v, err := strconv.ParseFloat(s, 32)
return float32(v), err
}
// ParseByte converts a string to a byte.
func (a AutoShim) ParseByte(s string) (byte, error) {
v, err := strconv.ParseUint(s, 10, 8)
return byte(v), err
}
// Uint8String returns the string representation of a uint8.
func (a AutoShim) Uint8String(v uint8) string {
return strconv.FormatUint(uint64(v), 10)
}
// UintString returns the string representation of a uint.
func (a AutoShim) UintString(v uint) string {
return strconv.FormatUint(uint64(v), 10)
}
// Uint16String returns the string representation of a uint16.
func (a AutoShim) Uint16String(v uint16) string {
return strconv.FormatUint(uint64(v), 10)
}
// Uint32String returns the string representation of a uint32.
func (a AutoShim) Uint32String(v uint32) string {
return strconv.FormatUint(uint64(v), 10)
}
// Uint64String returns the string representation of a uint64.
func (a AutoShim) Uint64String(v uint64) string {
return strconv.FormatUint(v, 10)
}
// IntString returns the string representation of an int.
func (a AutoShim) IntString(v int) string {
return strconv.FormatInt(int64(v), 10)
}
// Int8String returns the string representation of an int8.
func (a AutoShim) Int8String(v int8) string {
return strconv.FormatInt(int64(v), 10)
}
// Int16String returns the string representation of an int16.
func (a AutoShim) Int16String(v int16) string {
return strconv.FormatInt(int64(v), 10)
}
// Int32String returns the string representation of an int32.
func (a AutoShim) Int32String(v int32) string {
return strconv.FormatInt(int64(v), 10)
}
// Int64String returns the string representation of an int64.
func (a AutoShim) Int64String(v int64) string {
return strconv.FormatInt(v, 10)
}
// BoolString returns the string representation of a bool.
func (a AutoShim) BoolString(v bool) string {
return strconv.FormatBool(v)
}
// Float64String returns the string representation of a float64.
func (a AutoShim) Float64String(v float64) string {
return strconv.FormatFloat(v, 'g', -1, 64)
}
// Float32String returns the string representation of a float32.
func (a AutoShim) Float32String(v float32) string {
return strconv.FormatFloat(float64(v), 'g', -1, 32)
}
// ByteString returns the string representation of a byte.
func (a AutoShim) ByteString(v byte) string {
return strconv.FormatUint(uint64(v), 10)
}
+21
View File
@@ -26,6 +26,27 @@
// the wiki at http://github.com/tinylib/msgp
package msgp
// RT is the runtime interface for all types that can be encoded and decoded.
type RT interface {
Decodable
Encodable
Sizer
Unmarshaler
Marshaler
}
// PtrTo is the runtime interface for all types that can be encoded and decoded.
type PtrTo[T any] interface {
~*T
}
// RTFor is the runtime interface for all types that can be encoded and decoded.
// Use for generic types.
type RTFor[T any] interface {
PtrTo[T]
RT
}
const (
last4 = 0x0f
first4 = 0xf0
+1 -1
View File
@@ -58,7 +58,7 @@ func HasKey(key string, raw []byte) bool {
return false
}
var field []byte
for i := uint32(0); i < sz; i++ {
for range sz {
field, bts, err = ReadStringZC(bts)
if err != nil {
return false
+1 -2
View File
@@ -1,5 +1,4 @@
//go:build !tinygo
// +build !tinygo
package msgp
@@ -10,7 +9,7 @@ package msgp
var sizes [256]bytespec
func init() {
for i := 0; i < 256; i++ {
for i := range 256 {
sizes[i] = calcBytespec(byte(i))
}
}
-1
View File
@@ -1,5 +1,4 @@
//go:build tinygo
// +build tinygo
package msgp
+12 -3
View File
@@ -17,6 +17,10 @@ var (
// This should only realistically be seen on adversarial data trying to exhaust the stack.
ErrRecursion error = errRecursion{}
// ErrLimitExceeded is returned when a set limit is exceeded.
// Limits can be set on the Reader to prevent excessive memory usage by adversarial data.
ErrLimitExceeded error = errLimitExceeded{}
// this error is only returned
// if we reach code that should
// be unreachable
@@ -73,7 +77,7 @@ func Resumable(e error) bool {
//
// ErrShortBytes is not wrapped with any context due to backward compatibility
// issues with the public API.
func WrapError(err error, ctx ...interface{}) error {
func WrapError(err error, ctx ...any) error {
switch e := err.(type) {
case errShort:
return e
@@ -143,6 +147,11 @@ type errRecursion struct{}
func (e errRecursion) Error() string { return "msgp: recursion limit reached" }
func (e errRecursion) Resumable() bool { return false }
type errLimitExceeded struct{}
func (e errLimitExceeded) Error() string { return "msgp: configured reader limit exceeded" }
func (e errLimitExceeded) Resumable() bool { return false }
// ArrayError is an error returned
// when decoding a fix-sized array
// of the wrong size
@@ -382,8 +391,8 @@ l: // loop through string bytes (not UTF-8 characters)
}
// anything else is \x
sb = append(sb, `\x`...)
sb = append(sb, lowerhex[byte(b)>>4])
sb = append(sb, lowerhex[byte(b)&0xF])
sb = append(sb, lowerhex[b>>4])
sb = append(sb, lowerhex[b&0xF])
continue l
}
}
+1 -2
View File
@@ -1,5 +1,4 @@
//go:build !tinygo
// +build !tinygo
package msgp
@@ -9,7 +8,7 @@ import (
)
// ctxString converts the incoming interface{} slice into a single string.
func ctxString(ctx []interface{}) string {
func ctxString(ctx []any) string {
out := ""
for idx, cv := range ctx {
if idx > 0 {
-1
View File
@@ -1,5 +1,4 @@
//go:build tinygo
// +build tinygo
package msgp
+10 -4
View File
@@ -181,7 +181,7 @@ func (mw *Writer) writeExtensionHeader(length int, extType int8) error {
return err
}
mw.buf[o] = mext8
mw.buf[o+1] = byte(uint8(length))
mw.buf[o+1] = byte(length)
mw.buf[o+2] = byte(extType)
case length < math.MaxUint16:
o, err := mw.require(4)
@@ -342,7 +342,7 @@ func (m *Reader) peekExtensionHeader() (offset int, length int, extType int8, er
}
offset = 3
extType = int8(p[2])
length = int(uint8(p[1]))
length = int(p[1])
case mext16:
p, err = m.R.Peek(4)
@@ -383,6 +383,9 @@ func (m *Reader) ReadExtension(e Extension) error {
if expectedType := e.ExtensionType(); extType != expectedType {
return errExt(extType, expectedType)
}
if uint32(length) > m.GetMaxElements() {
return ErrLimitExceeded
}
p, err := m.R.Peek(offset + length)
if err != nil {
@@ -404,6 +407,9 @@ func (m *Reader) ReadExtensionRaw() (int8, []byte, error) {
if err != nil {
return 0, nil, err
}
if uint32(length) > m.GetMaxElements() {
return 0, nil, ErrLimitExceeded
}
payload, err := m.R.Next(offset + length)
if err != nil {
@@ -455,7 +461,7 @@ func AppendExtension(b []byte, e Extension) ([]byte, error) {
case l < math.MaxUint8:
o, n = ensure(b, l+3)
o[n] = mext8
o[n+1] = byte(uint8(l))
o[n+1] = byte(l)
o[n+2] = byte(e.ExtensionType())
n += 3
case l < math.MaxUint16:
@@ -528,7 +534,7 @@ func readExt(b []byte) (typ int8, remain []byte, data []byte, err error) {
sz = 16
off = 2
case mext8:
sz = int(uint8(b[1]))
sz = int(b[1])
typ = int8(b[2])
off = 3
if sz == 0 {
-3
View File
@@ -1,7 +1,4 @@
//go:build (linux || darwin || dragonfly || freebsd || illumos || netbsd || openbsd) && !appengine && !tinygo
// +build linux darwin dragonfly freebsd illumos netbsd openbsd
// +build !appengine
// +build !tinygo
package msgp
-1
View File
@@ -1,5 +1,4 @@
//go:build windows || appengine || tinygo
// +build windows appengine tinygo
package msgp
+3 -3
View File
@@ -133,11 +133,11 @@ func putMuint8(b []byte, u uint8) {
_ = b[1] // bounds check elimination
b[0] = muint8
b[1] = byte(u)
b[1] = u
}
func getMuint8(b []byte) uint8 {
return uint8(b[1])
return b[1]
}
func getUnix(b []byte) (sec int64, nsec int32) {
@@ -161,7 +161,7 @@ func prefixu8(b []byte, pre byte, sz uint8) {
_ = b[1] // bounds check elimination
b[0] = pre
b[1] = byte(sz)
b[1] = sz
}
// write prefix and big-endian uint16
+386
View File
@@ -0,0 +1,386 @@
package msgp
import (
"cmp"
"fmt"
"iter"
"maps"
"math"
"slices"
)
// ReadArray returns an iterator that can be used to iterate over the elements
// of an array in the MessagePack data while being read by the provided Reader.
// The type parameter V specifies the type of the elements in the array.
// The returned iterator implements the iter.Seq[V] interface,
// allowing for sequential access to the array elements.
// The iterator will always stop after one error has been encountered.
func ReadArray[T any](m *Reader, readFn func() (T, error)) iter.Seq2[T, error] {
return func(yield func(T, error) bool) {
// Check if nil
if m.IsNil() {
m.ReadNil()
return
}
// Regular array.
var empty T
length, err := m.ReadArrayHeader()
if err != nil {
yield(empty, fmt.Errorf("cannot read array header: %w", err))
return
}
for range length {
var v T
v, err = readFn()
if !yield(v, err) || err != nil {
return
}
}
}
}
// WriteArray writes an array to the provided Writer.
// The writeFn parameter specifies the function to use to write each element of the array.
func WriteArray[T any](w *Writer, a []T, writeFn func(T) error) error {
// Check if nil
if a == nil {
return w.WriteNil()
}
if uint64(len(a)) > math.MaxUint32 {
return fmt.Errorf("array too large to encode: %d elements", len(a))
}
// Write array header
err := w.WriteArrayHeader(uint32(len(a)))
if err != nil {
return err
}
// Write elements
for _, v := range a {
err = writeFn(v)
if err != nil {
return err
}
}
return nil
}
// ReadMap returns an iterator that can be used to iterate over the elements
// of a map in the MessagePack data while being read by the provided Reader.
// The type parameters K and V specify the types of the keys and values in the map.
// The returned iterator implements the iter.Seq2[K, V] interface,
// allowing for sequential access to the map elements.
// The returned function can be used to read any error that
// occurred during iteration when iteration is done.
func ReadMap[K, V any](m *Reader, readKey func() (K, error), readVal func() (V, error)) (iter.Seq2[K, V], func() error) {
var err error
return func(yield func(K, V) bool) {
var sz uint32
if m.IsNil() {
err = m.ReadNil()
return
}
sz, err = m.ReadMapHeader()
if err != nil {
err = fmt.Errorf("cannot read map header: %w", err)
return
}
for range sz {
var k K
k, err = readKey()
if err != nil {
err = fmt.Errorf("cannot read key: %w", err)
return
}
var v V
v, err = readVal()
if err != nil {
err = fmt.Errorf("cannot read value: %w", err)
return
}
if !yield(k, v) {
return
}
}
}, func() error { return err }
}
// WriteMap writes a map to the provided Writer.
// The writeKey and writeVal parameters specify the functions
// to use to write each key and value of the map.
func WriteMap[K comparable, V any](w *Writer, m map[K]V, writeKey func(K) error, writeVal func(V) error) error {
if m == nil {
return w.WriteNil()
}
if uint64(len(m)) > math.MaxUint32 {
return fmt.Errorf("map too large to encode: %d elements", len(m))
}
// Write map header
err := w.WriteMapHeader(uint32(len(m)))
if err != nil {
return err
}
// Write elements
for k, v := range m {
err = writeKey(k)
if err != nil {
return err
}
err = writeVal(v)
if err != nil {
return err
}
}
return nil
}
// WriteMapSorted writes a map to the provided Writer.
// The keys of the map are sorted before writing.
// This provides deterministic output, but will allocate to sort the keys.
// The writeKey and writeVal parameters specify the functions
// to use to write each key and value of the map.
func WriteMapSorted[K cmp.Ordered, V any](w *Writer, m map[K]V, writeKey func(K) error, writeVal func(V) error) error {
if m == nil {
return w.WriteNil()
}
if uint64(len(m)) > math.MaxUint32 {
return fmt.Errorf("map too large to encode: %d elements", len(m))
}
// Write map header
err := w.WriteMapHeader(uint32(len(m)))
if err != nil {
return err
}
// Write elements
for _, k := range slices.Sorted(maps.Keys(m)) {
err = writeKey(k)
if err != nil {
return err
}
err = writeVal(m[k])
if err != nil {
return err
}
}
return nil
}
// ReadArrayBytes returns an iterator that can be used to iterate over the elements
// of an array in the MessagePack data while being read by the provided Reader.
// The type parameter V specifies the type of the elements in the array.
// After the iterator is exhausted, the remaining bytes in the buffer
// and any error can be read by calling the returned function.
func ReadArrayBytes[T any](b []byte, readFn func([]byte) (T, []byte, error)) (iter.Seq[T], func() (remain []byte, err error)) {
if IsNil(b) {
b, err := ReadNilBytes(b)
return func(yield func(T) bool) {}, func() ([]byte, error) { return b, err }
}
sz, b, err := ReadArrayHeaderBytes(b)
if err != nil || sz == 0 {
return func(yield func(T) bool) {}, func() ([]byte, error) { return b, err }
}
return func(yield func(T) bool) {
for range sz {
var v T
v, b, err = readFn(b)
if err != nil || !yield(v) {
return
}
}
}, func() ([]byte, error) {
return b, err
}
}
// AppendArray writes an array to the provided buffer.
// The writeFn parameter specifies the function to use to write each element of the array.
// The returned buffer contains the encoded array.
// The function panics if the array is larger than math.MaxUint32 elements.
func AppendArray[T any](b []byte, a []T, writeFn func(b []byte, v T) []byte) []byte {
if a == nil {
return AppendNil(b)
}
if uint64(len(a)) > math.MaxUint32 {
panic(fmt.Sprintf("array too large to encode: %d elements", len(a)))
}
b = AppendArrayHeader(b, uint32(len(a)))
for _, v := range a {
b = writeFn(b, v)
}
return b
}
// ReadMapBytes returns an iterator over key/value
// pairs from a MessagePack map encoded in b.
// The iterator yields K,V pairs, and this function also returns
// a closure to get the remaining bytes and any error.
func ReadMapBytes[K any, V any](b []byte,
readK func([]byte) (K, []byte, error),
readV func([]byte) (V, []byte, error)) (iter.Seq2[K, V], func() (remain []byte, err error)) {
var err error
var sz uint32
if IsNil(b) {
b, err = ReadNilBytes(b)
return func(yield func(K, V) bool) {}, func() ([]byte, error) { return b, err }
}
sz, b, err = ReadMapHeaderBytes(b)
if err != nil || sz == 0 {
return func(yield func(K, V) bool) {}, func() ([]byte, error) { return b, err }
}
return func(yield func(K, V) bool) {
for range sz {
var k K
k, b, err = readK(b)
if err != nil {
err = fmt.Errorf("cannot read map key: %w", err)
return
}
var v V
v, b, err = readV(b)
if err != nil {
err = fmt.Errorf("cannot read map value: %w", err)
return
}
if !yield(k, v) {
return
}
}
}, func() ([]byte, error) { return b, err }
}
// AppendMap writes a map to the provided buffer.
// The writeK and writeV parameters specify the functions to use to write each key and value of the map.
// The returned buffer contains the encoded map.
// The function panics if the map is larger than math.MaxUint32 elements.
func AppendMap[K comparable, V any](b []byte, m map[K]V,
writeK func(b []byte, k K) []byte,
writeV func(b []byte, v V) []byte) []byte {
if m == nil {
return AppendNil(b)
}
if uint64(len(m)) > math.MaxUint32 {
panic(fmt.Sprintf("map too large to encode: %d elements", len(m)))
}
b = AppendMapHeader(b, uint32(len(m)))
for k, v := range m {
b = writeK(b, k)
b = writeV(b, v)
}
return b
}
// AppendMapSorted writes a map to the provided buffer.
// Keys are sorted before writing.
// This provides deterministic output, but will allocate to sort the keys.
// The writeK and writeV parameters specify the functions to use to write each key and value of the map.
// The returned buffer contains the encoded map.
// The function panics if the map is larger than math.MaxUint32 elements.
func AppendMapSorted[K cmp.Ordered, V any](b []byte, m map[K]V,
writeK func(b []byte, k K) []byte,
writeV func(b []byte, v V) []byte) []byte {
if m == nil {
return AppendNil(b)
}
if uint64(len(m)) > math.MaxUint32 {
panic(fmt.Sprintf("map too large to encode: %d elements", len(m)))
}
b = AppendMapHeader(b, uint32(len(m)))
for _, k := range slices.Sorted(maps.Keys(m)) {
b = writeK(b, k)
b = writeV(b, m[k])
}
return b
}
// DecodePtr is a convenience type for decoding into a pointer.
type DecodePtr[T any] interface {
*T
Decodable
}
// DecoderFrom allows augmenting any type with a DecodeMsg method into a method
// that reads from Reader and returns a T.
// Provide an instance of T. This value isn't used.
// See ReadArray/ReadMap "struct" examples for usage.
func DecoderFrom[T any, PT DecodePtr[T]](r *Reader, _ T) func() (T, error) {
return func() (T, error) {
var t T
tPtr := PT(&t)
err := tPtr.DecodeMsg(r)
return t, err
}
}
// FlexibleEncoder is a constraint for types where either T or *T implements Encodable
type FlexibleEncoder[T any] interface {
Encodable
*T
}
// EncoderTo allows augmenting any type with an EncodeMsg
// method into a method that writes to Writer on each call.
// Provide an instance of T. This value isn't used.
// See ReadArray or ReadMap "struct" examples for usage.
func EncoderTo[T any, _ FlexibleEncoder[T]](w *Writer, _ T) func(T) error {
return func(t T) error {
// Check if T implements Marshaler
if marshaler, ok := any(t).(Encodable); ok {
return marshaler.EncodeMsg(w)
}
// Check if *T implements Marshaler
if ptrMarshaler, ok := any(&t).(Encodable); ok {
return ptrMarshaler.EncodeMsg(w)
}
// The compiler should have asserted this.
panic("type does not implement Marshaler")
}
}
// UnmarshalPtr is a convenience type for unmarshaling into a pointer.
type UnmarshalPtr[T any] interface {
*T
Unmarshaler
}
// DecoderFromBytes allows augmenting any type with an UnmarshalMsg
// method into a method that reads from []byte and returns a T.
// Provide an instance of T. This value isn't used.
// See ReadArrayBytes or ReadMapBytes "struct" examples for usage.
func DecoderFromBytes[T any, PT UnmarshalPtr[T]](_ T) func([]byte) (T, []byte, error) {
return func(b []byte) (T, []byte, error) {
var t T
tPtr := PT(&t)
b, err := tPtr.UnmarshalMsg(b)
return t, b, err
}
}
// FlexibleMarshaler is a constraint for types where either T or *T implements Marshaler
type FlexibleMarshaler[T any] interface {
Marshaler
*T // Include *T in the interface
}
// EncoderToBytes allows augmenting any type with a MarshalMsg method into a method
// that reads from T and returns a []byte.
// Provide an instance of T. This value isn't used.
// See ReadArrayBytes or ReadMapBytes "struct" examples for usage.
func EncoderToBytes[T any, _ FlexibleMarshaler[T]](_ T) func([]byte, T) []byte {
return func(b []byte, t T) []byte {
// Check if T implements Marshaler
if marshaler, ok := any(t).(Marshaler); ok {
b, _ = marshaler.MarshalMsg(b)
return b
}
// Check if *T implements Marshaler
if ptrMarshaler, ok := any(&t).(Marshaler); ok {
b, _ = ptrMarshaler.MarshalMsg(b)
return b
}
// The compiler should have asserted this.
panic("type does not implement Marshaler")
}
}
+7 -3
View File
@@ -60,7 +60,7 @@ func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
// WriteToJSON translates MessagePack from 'r' and writes it as
// JSON to 'w' until the underlying reader returns io.EOF. It returns
// the number of bytes written, and an error if it stopped before EOF.
func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
func (m *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
var j jsWriter
var bf *bufio.Writer
if jsw, ok := w.(jsWriter); ok {
@@ -71,7 +71,7 @@ func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
}
var nn int
for err == nil {
nn, err = rwNext(j, r)
nn, err = rwNext(j, m)
n += int64(nn)
}
if err != io.EOF {
@@ -364,7 +364,7 @@ func rwString(dst jsWriter, src *Reader) (n int, err error) {
if err != nil {
return
}
read = int(uint8(p[1]))
read = int(p[1])
case mstr16:
p, err = src.R.Next(3)
if err != nil {
@@ -382,6 +382,10 @@ func rwString(dst jsWriter, src *Reader) (n int, err error) {
return
}
write:
if uint64(read) > src.GetMaxStringLength() {
err = ErrLimitExceeded
return
}
p, err = src.R.Next(read)
if err != nil {
return
+2 -2
View File
@@ -91,7 +91,7 @@ func rwArrayBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []
if err != nil {
return msg, scratch, err
}
for i := uint32(0); i < sz; i++ {
for i := range sz {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
@@ -119,7 +119,7 @@ func rwMapBytes(w jsWriter, msg []byte, scratch []byte, depth int) ([]byte, []by
if err != nil {
return msg, scratch, err
}
for i := uint32(0); i < sz; i++ {
for i := range sz {
if i != 0 {
err = w.WriteByte(',')
if err != nil {
+126 -2
View File
@@ -2,6 +2,7 @@ package msgp
import (
"math"
"math/bits"
"strconv"
)
@@ -77,7 +78,7 @@ func (n *Number) Uint() (uint64, bool) {
}
// Float casts the number to a float64, and
// returns whether or not that was the underlying
// returns whether that was the underlying
// type (either a float64 or a float32).
func (n *Number) Float() (float64, bool) {
switch n.typ {
@@ -182,7 +183,7 @@ func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
case IntType:
return AppendInt64(b, int64(n.bits)), nil
case UintType:
return AppendUint64(b, uint64(n.bits)), nil
return AppendUint64(b, n.bits), nil
case Float64Type:
return AppendFloat64(b, math.Float64frombits(n.bits)), nil
case Float32Type:
@@ -208,6 +209,129 @@ func (n *Number) EncodeMsg(w *Writer) error {
}
}
// CoerceInt attempts to coerce the value of
// the number into a signed integer and returns
// whether it was successful.
// "Success" implies that no precision in the value of
// the number was lost, which means that the number was an integer or
// a floating point that mapped exactly to an integer without rounding.
func (n *Number) CoerceInt() (int64, bool) {
switch n.typ {
case InvalidType, IntType:
// InvalidType just means un-initialized.
return int64(n.bits), true
case UintType:
return int64(n.bits), n.bits <= math.MaxInt64
case Float32Type:
f := math.Float32frombits(uint32(n.bits))
if n.isExactInt() && f <= math.MaxInt64 && f >= math.MinInt64 {
return int64(f), true
}
if n.bits == 0 || n.bits == 1<<31 {
return 0, true
}
case Float64Type:
f := math.Float64frombits(n.bits)
if n.isExactInt() && f <= math.MaxInt64 && f >= math.MinInt64 {
return int64(f), true
}
return 0, n.bits == 0 || n.bits == 1<<63
}
return 0, false
}
// CoerceUInt attempts to coerce the value of
// the number into an unsigned integer and returns
// whether it was successful.
// "Success" implies that no precision in the value of
// the number was lost, which means that the number was an integer or
// a floating point that mapped exactly to an integer without rounding.
func (n *Number) CoerceUInt() (uint64, bool) {
switch n.typ {
case InvalidType, IntType:
// InvalidType just means un-initialized.
if int64(n.bits) >= 0 {
return n.bits, true
}
case UintType:
return n.bits, true
case Float32Type:
f := math.Float32frombits(uint32(n.bits))
if f >= 0 && f <= math.MaxUint64 && n.isExactInt() {
return uint64(f), true
}
if n.bits == 0 || n.bits == 1<<31 {
return 0, true
}
case Float64Type:
f := math.Float64frombits(n.bits)
if f >= 0 && f <= math.MaxUint64 && n.isExactInt() {
return uint64(f), true
}
return 0, n.bits == 0 || n.bits == 1<<63
}
return 0, false
}
// isExactInt will return true if the number represents an integer value.
// NaN, Inf returns false.
func (n *Number) isExactInt() bool {
var eBits int // Exponent bits
var mBits int // Mantissa bits
switch n.typ {
case InvalidType, IntType, UintType:
return true
case Float32Type:
eBits = 8
mBits = 23
case Float64Type:
eBits = 11
mBits = 52
default:
return false
}
// Calculate float parts
exp := int(n.bits>>mBits) & ((1 << eBits) - 1)
mant := n.bits & ((1 << mBits) - 1)
if exp == 0 && mant == 0 {
// Handle zero value.
return true
}
exp -= (1 << (eBits - 1)) - 1
if exp < 0 || exp == 1<<(eBits-1) {
// Negative exponent is never integer (except zero handled above)
// Handles NaN (exp all 1s)
return false
}
if exp >= mBits {
// If we have more exponent than mantissa bits it is always an integer.
return true
}
// Check if all bits below the exponent are zero.
return bits.TrailingZeros64(mant) >= mBits-exp
}
// CoerceFloat returns the number as a float64.
// If the number is an integer, it will be
// converted to a float64 with the closest representation.
func (n *Number) CoerceFloat() float64 {
switch n.typ {
case IntType:
return float64(int64(n.bits))
case UintType:
return float64(n.bits)
case Float32Type:
return float64(math.Float32frombits(uint32(n.bits)))
case Float64Type:
return math.Float64frombits(n.bits)
default:
return 0.0
}
}
// Msgsize implements msgp.Sizer
func (n *Number) Msgsize() int {
switch n.typ {
-1
View File
@@ -1,5 +1,4 @@
//go:build (purego && !unsafe) || appengine
// +build purego,!unsafe appengine
package msgp
+141 -16
View File
@@ -1,8 +1,10 @@
package msgp
import (
"encoding"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"math"
"strconv"
@@ -13,7 +15,7 @@ import (
)
// where we keep old *Readers
var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }}
var readerPool = sync.Pool{New: func() any { return &Reader{} }}
// Type is a MessagePack wire type,
// including this package's built-in
@@ -152,6 +154,10 @@ type Reader struct {
R *fwd.Reader
scratch []byte
recursionDepth int
maxRecursionDepth int // maximum recursion depth
maxElements uint32 // maximum number of elements in arrays and maps
maxStrLen uint64 // maximum number of bytes in any string
}
// Read implements `io.Reader`
@@ -171,7 +177,7 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
// Opportunistic optimization: if we can fit the whole thing in the m.R
// buffer, then just get a pointer to that, and pass it to w.Write,
// avoiding an allocation.
if int(sz) <= m.R.BufferSize() {
if int(sz) >= 0 && int(sz) <= m.R.BufferSize() {
var nn int
var buf []byte
buf, err = m.R.Next(int(sz))
@@ -203,7 +209,7 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
defer done()
}
// for maps and slices, read elements
for x := uintptr(0); x < o; x++ {
for range o {
var n2 int64
n2, err = m.CopyNext(w)
if err != nil {
@@ -214,10 +220,53 @@ func (m *Reader) CopyNext(w io.Writer) (int64, error) {
return n, nil
}
// SetMaxRecursionDepth sets the maximum recursion depth.
func (m *Reader) SetMaxRecursionDepth(d int) {
m.maxRecursionDepth = d
}
// GetMaxRecursionDepth returns the maximum recursion depth.
// Set to 0 to use the default value of 100000.
func (m *Reader) GetMaxRecursionDepth() int {
if m.maxRecursionDepth <= 0 {
return recursionLimit
}
return m.maxRecursionDepth
}
// SetMaxElements sets the maximum number of elements to allow in map, bin, array or extension payload.
// Setting this to 0 will allow any number of elements - math.MaxUint32.
// This does currently apply to generated code.
func (m *Reader) SetMaxElements(d uint32) {
m.maxElements = d
}
// GetMaxElements will return the maximum number of elements in a map, bin, array or extension payload.
func (m *Reader) GetMaxElements() uint32 {
if m.maxElements <= 0 {
return math.MaxUint32
}
return m.maxElements
}
// SetMaxStringLength sets the maximum number of bytes to allow in strings.
// Setting this == 0 will allow any number of elements - math.MaxUint64.
func (m *Reader) SetMaxStringLength(d uint64) {
m.maxStrLen = d
}
// GetMaxStringLength will return the current string length limit.
func (m *Reader) GetMaxStringLength() uint64 {
if m.maxStrLen <= 0 {
return math.MaxUint64
}
return min(m.maxStrLen, math.MaxUint64)
}
// recursiveCall will increment the recursion depth and return an error if it is exceeded.
// If a nil error is returned, done must be called to decrement the counter.
func (m *Reader) recursiveCall() (done func(), err error) {
if m.recursionDepth >= recursionLimit {
if m.recursionDepth >= m.GetMaxRecursionDepth() {
return func() {}, ErrRecursion
}
m.recursionDepth++
@@ -415,7 +464,11 @@ func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) {
out, err := m.ReadStringAsBytes(scratch)
if err != nil {
if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
return m.ReadBytes(scratch)
key, err := m.ReadBytes(scratch)
if uint64(len(key)) > m.GetMaxStringLength() {
return nil, ErrLimitExceeded
}
return key, err
}
return nil, err
}
@@ -468,6 +521,9 @@ fill:
if read == 0 {
return nil, ErrShortBytes
}
if uint64(read) > m.GetMaxStringLength() {
return nil, ErrLimitExceeded
}
return m.R.Next(read)
}
@@ -528,7 +584,7 @@ func (m *Reader) ReadFloat64() (f float64, err error) {
var p []byte
p, err = m.R.Peek(9)
if err != nil {
// we'll allow a coversion from float32 to float64,
// we'll allow a conversion from float32 to float64,
// since we don't lose any precision
if err == io.EOF && len(p) > 0 && p[0] == mfloat32 {
ef, err := m.ReadFloat32()
@@ -816,7 +872,7 @@ func (m *Reader) ReadUint64() (u uint64, err error) {
if err != nil {
return
}
v := int64(getMint64(p))
v := getMint64(p)
if v < 0 {
err = UintBelowZero{Value: v}
return
@@ -941,6 +997,10 @@ func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
return
}
if int64(cap(scratch)) < read {
if read > int64(m.GetMaxElements()) {
err = ErrLimitExceeded
return
}
b = make([]byte, read)
} else {
b = scratch[0:read]
@@ -980,10 +1040,10 @@ func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
if err != nil {
return
}
sz = uint32(big.Uint32(p[1:]))
sz = big.Uint32(p[1:])
return
default:
err = badPrefix(BinType, p[0])
err = badPrefix(BinType, lead)
return
}
}
@@ -1052,7 +1112,7 @@ func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
if err != nil {
return
}
read = int64(uint8(p[1]))
read = int64(p[1])
case mstr16:
p, err = m.R.Next(3)
if err != nil {
@@ -1070,6 +1130,10 @@ func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
return
}
fill:
if uint64(read) > m.GetMaxStringLength() {
err = ErrLimitExceeded
return
}
if int64(cap(scratch)) < read {
b = make([]byte, read)
} else {
@@ -1143,7 +1207,7 @@ func (m *Reader) ReadString() (s string, err error) {
if err != nil {
return
}
read = int64(uint8(p[1]))
read = int64(p[1])
case mstr16:
p, err = m.R.Next(3)
if err != nil {
@@ -1165,6 +1229,11 @@ fill:
s, err = "", nil
return
}
if uint64(read) > m.GetMaxStringLength() {
err = ErrLimitExceeded
return
}
// reading into the memory
// that will become the string
// itself has vastly superior
@@ -1235,7 +1304,7 @@ func (m *Reader) ReadComplex128() (f complex128, err error) {
// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}.
// (You must pass a non-nil map into the function.)
func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
func (m *Reader) ReadMapStrIntf(mp map[string]any) (err error) {
var sz uint32
sz, err = m.ReadMapHeader()
if err != nil {
@@ -1244,9 +1313,13 @@ func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
for key := range mp {
delete(mp, key)
}
if sz > m.GetMaxElements() {
err = ErrLimitExceeded
return
}
for i := uint32(0); i < sz; i++ {
var key string
var val interface{}
var val any
key, err = m.ReadString()
if err != nil {
return
@@ -1376,7 +1449,7 @@ func (m *Reader) ReadJSONNumber() (n json.Number, err error) {
// Arrays are decoded as []interface{}, and maps are decoded
// as map[string]interface{}. Integers are decoded as int64
// and unsigned integers are decoded as uint64.
func (m *Reader) ReadIntf() (i interface{}, err error) {
func (m *Reader) ReadIntf() (i any, err error) {
var t Type
t, err = m.NextType()
if err != nil {
@@ -1446,7 +1519,7 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
defer done()
}
mp := make(map[string]interface{})
mp := make(map[string]any)
err = m.ReadMapStrIntf(mp)
i = mp
return
@@ -1477,8 +1550,12 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
} else {
defer done()
}
if sz > m.GetMaxElements() {
err = ErrLimitExceeded
return
}
out := make([]interface{}, int(sz))
out := make([]any, int(sz))
for j := range out {
out[j], err = m.ReadIntf()
if err != nil {
@@ -1492,3 +1569,51 @@ func (m *Reader) ReadIntf() (i interface{}, err error) {
return nil, fatal // unreachable
}
}
// ReadBinaryUnmarshal reads a binary-encoded object from the reader and unmarshals it into dst.
func (m *Reader) ReadBinaryUnmarshal(dst encoding.BinaryUnmarshaler) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during UnmarshalBinary: %v", r)
}
}()
tmp := bytesPool.Get().([]byte)
defer bytesPool.Put(tmp) //nolint:staticcheck
tmp, err = m.ReadBytes(tmp[:0])
if err != nil {
return
}
return dst.UnmarshalBinary(tmp)
}
// ReadTextUnmarshal reads a text-encoded bin array from the reader and unmarshals it into dst.
func (m *Reader) ReadTextUnmarshal(dst encoding.TextUnmarshaler) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during UnmarshalText: %v", r)
}
}()
tmp := bytesPool.Get().([]byte)
defer bytesPool.Put(tmp) //nolint:staticcheck
tmp, err = m.ReadBytes(tmp[:0])
if err != nil {
return
}
return dst.UnmarshalText(tmp)
}
// ReadTextUnmarshalString reads a text-encoded string from the reader and unmarshals it into dst.
func (m *Reader) ReadTextUnmarshalString(dst encoding.TextUnmarshaler) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during UnmarshalText: %v", r)
}
}()
tmp := bytesPool.Get().([]byte)
defer bytesPool.Put(tmp) //nolint:staticcheck
tmp, err = m.ReadStringAsBytes(tmp[:0])
if err != nil {
return
}
return dst.UnmarshalText(tmp)
}
+19 -10
View File
@@ -1130,7 +1130,7 @@ func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
return
}
default:
err = errExt(int8(b[2]), TimeExtension)
err = errExt(typ, TimeExtension)
return
}
}
@@ -1138,11 +1138,11 @@ func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
// ReadMapStrIntfBytes reads a map[string]interface{}
// out of 'b' and returns the map and remaining bytes.
// If 'old' is non-nil, the values will be read into that map.
func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) {
func ReadMapStrIntfBytes(b []byte, old map[string]any) (v map[string]any, o []byte, err error) {
return readMapStrIntfBytesDepth(b, old, 0)
}
func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (v map[string]interface{}, o []byte, err error) {
func readMapStrIntfBytesDepth(b []byte, old map[string]any, depth int) (v map[string]any, o []byte, err error) {
if depth >= recursionLimit {
err = ErrRecursion
return
@@ -1155,14 +1155,18 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
if err != nil {
return
}
// Map key, min size is 2 bytes. Value min 1 byte.
if int64(len(b)) < int64(sz)*3 {
err = ErrShortBytes
return
}
if old != nil {
for key := range old {
delete(old, key)
}
v = old
} else {
v = make(map[string]interface{}, int(sz))
v = make(map[string]any, int(sz))
}
for z := uint32(0); z < sz; z++ {
@@ -1175,7 +1179,7 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
if err != nil {
return
}
var val interface{}
var val any
val, o, err = readIntfBytesDepth(o, depth)
if err != nil {
return
@@ -1188,11 +1192,11 @@ func readMapStrIntfBytesDepth(b []byte, old map[string]interface{}, depth int) (
// ReadIntfBytes attempts to read
// the next object out of 'b' as a raw interface{} and
// return the remaining bytes.
func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) {
func ReadIntfBytes(b []byte) (i any, o []byte, err error) {
return readIntfBytesDepth(b, 0)
}
func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error) {
func readIntfBytesDepth(b []byte, depth int) (i any, o []byte, err error) {
if depth >= recursionLimit {
err = ErrRecursion
return
@@ -1215,7 +1219,12 @@ func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error
if err != nil {
return
}
j := make([]interface{}, int(sz))
// Each element will at least be 1 byte.
if uint32(len(o)) < sz {
err = ErrShortBytes
return
}
j := make([]any, int(sz))
i = j
for d := range j {
j[d], o, err = readIntfBytesDepth(o, depth+1)
@@ -1274,7 +1283,7 @@ func readIntfBytesDepth(b []byte, depth int) (i interface{}, o []byte, err error
}
// last resort is a raw extension
e := RawExtension{}
e.Type = int8(t)
e.Type = t
o, err = ReadExtensionBytes(b, &e)
i = &e
return
File diff suppressed because it is too large Load Diff
+19
View File
@@ -0,0 +1,19 @@
// Package setof allows serializing sets map[T]struct{} as arrays.
//
// Nil maps are preserved as a nil value on stream.
//
// A deterministic, sorted version is available, with slightly lower performance.
package setof
// ensure 'sz' extra bytes in 'b' can be appended without reallocating
func ensure(b []byte, sz int) []byte {
l := len(b)
c := cap(b)
if c-l < sz {
o := make([]byte, l, l+sz)
copy(o, b)
return o
}
return b
}
+9
View File
@@ -37,4 +37,13 @@ const (
BytesPrefixSize = 5
StringPrefixSize = 5
ExtensionPrefixSize = 6
// We cannot determine the exact size of the marshalled bytes,
// so we assume 32 bytes
BinaryMarshalerSize = BytesPrefixSize + 32
BinaryAppenderSize
TextMarshalerBinSize
TextAppenderBinSize
TextMarshalerStringSize = StringPrefixSize + 32
TextAppenderStringSize
)
-1
View File
@@ -1,5 +1,4 @@
//go:build (!purego && !appengine) || (!appengine && purego && unsafe)
// +build !purego,!appengine !appengine,purego,unsafe
package msgp
+74 -8
View File
@@ -1,9 +1,11 @@
package msgp
import (
"encoding"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
@@ -33,7 +35,7 @@ var (
btsType = reflect.TypeOf(([]byte)(nil))
writerPool = sync.Pool{
New: func() interface{} {
New: func() any {
return &Writer{buf: make([]byte, 2048)}
},
}
@@ -430,7 +432,7 @@ func (mw *Writer) WriteUint64(u uint64) error {
}
// WriteByte is analogous to WriteUint8
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(u) }
// WriteUint8 writes a uint8 to the writer
func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
@@ -446,6 +448,9 @@ func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
// WriteBytes writes binary as 'bin' to the writer
func (mw *Writer) WriteBytes(b []byte) error {
if uint64(len(b)) > math.MaxUint32 {
return ErrLimitExceeded
}
sz := uint32(len(b))
var err error
switch {
@@ -488,6 +493,10 @@ func (mw *Writer) WriteBool(b bool) error {
// WriteString writes a messagepack string to the writer.
// (This is NOT an implementation of io.StringWriter)
func (mw *Writer) WriteString(s string) error {
if uint64(len(s)) > math.MaxUint32 {
return ErrLimitExceeded
}
sz := uint32(len(s))
var err error
switch {
@@ -526,6 +535,9 @@ func (mw *Writer) WriteStringHeader(sz uint32) error {
// WriteStringFromBytes writes a 'str' object
// from a []byte.
func (mw *Writer) WriteStringFromBytes(str []byte) error {
if uint64(len(str)) > math.MaxUint32 {
return ErrLimitExceeded
}
sz := uint32(len(str))
var err error
switch {
@@ -591,7 +603,7 @@ func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
}
// WriteMapStrIntf writes a map[string]interface to the writer
func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
func (mw *Writer) WriteMapStrIntf(mp map[string]any) (err error) {
err = mw.WriteMapHeader(uint32(len(mp)))
if err != nil {
return
@@ -703,7 +715,7 @@ func (mw *Writer) WriteJSONNumber(n json.Number) error {
// - A pointer to a supported type
// - A type that satisfies the msgp.Encodable interface
// - A type that satisfies the msgp.Extension interface
func (mw *Writer) WriteIntf(v interface{}) error {
func (mw *Writer) WriteIntf(v any) error {
if v == nil {
return mw.WriteNil()
}
@@ -754,7 +766,7 @@ func (mw *Writer) WriteIntf(v interface{}) error {
return mw.WriteBytes(v)
case map[string]string:
return mw.WriteMapStrStr(v)
case map[string]interface{}:
case map[string]any:
return mw.WriteMapStrIntf(v)
case time.Time:
return mw.WriteTime(v)
@@ -817,7 +829,7 @@ func (mw *Writer) writeSlice(v reflect.Value) (err error) {
if err != nil {
return
}
for i := uint32(0); i < sz; i++ {
for i := range sz {
err = mw.WriteIntf(v.Index(int(i)).Interface())
if err != nil {
return
@@ -840,7 +852,7 @@ func isSupported(k reflect.Kind) bool {
// value of 'i'. If the underlying value is not
// a simple builtin (or []byte), GuessSize defaults
// to 512.
func GuessSize(i interface{}) int {
func GuessSize(i any) int {
if i == nil {
return NilSize
}
@@ -868,7 +880,7 @@ func GuessSize(i interface{}) int {
return Complex128Size
case bool:
return BoolSize
case map[string]interface{}:
case map[string]any:
s := MapHeaderSize
for key, val := range i {
s += StringPrefixSize + len(key) + GuessSize(val)
@@ -884,3 +896,57 @@ func GuessSize(i interface{}) int {
return 512
}
}
// Temporary buffer for reading/writing binary data.
var bytesPool = sync.Pool{New: func() any { return make([]byte, 0, 1024) }}
// WriteBinaryAppender will write the bytes from the given
// encoding.BinaryAppender as a bin array.
func (mw *Writer) WriteBinaryAppender(b encoding.BinaryAppender) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during AppendBinary: %v", r)
}
}()
dst := bytesPool.Get().([]byte)
defer bytesPool.Put(dst) //nolint:staticcheck
dst, err = b.AppendBinary(dst[:0])
if err != nil {
return err
}
return mw.WriteBytes(dst)
}
// WriteTextAppender will write the bytes from the given
// encoding.TextAppender as a bin array.
func (mw *Writer) WriteTextAppender(b encoding.TextAppender) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during AppendText: %v", r)
}
}()
dst := bytesPool.Get().([]byte)
defer bytesPool.Put(dst) //nolint:staticcheck
dst, err = b.AppendText(dst[:0])
if err != nil {
return err
}
return mw.WriteBytes(dst)
}
// WriteTextAppenderString will write the bytes from the given
// encoding.TextAppender as a string.
func (mw *Writer) WriteTextAppenderString(b encoding.TextAppender) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("msgp: panic during AppendText: %v", r)
}
}()
dst := bytesPool.Get().([]byte)
defer bytesPool.Put(dst) //nolint:staticcheck
dst, err = b.AppendText(dst[:0])
if err != nil {
return err
}
return mw.WriteStringFromBytes(dst)
}
+52 -6
View File
@@ -181,7 +181,7 @@ func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
// AppendByte is analogous to AppendUint8
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, u) }
// AppendUint16 appends a uint16 to the slice
func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
@@ -371,7 +371,7 @@ func AppendMapStrStr(b []byte, m map[string]string) []byte {
// AppendMapStrIntf appends a map[string]interface{} to the slice
// as a MessagePack map with 'str'-type keys.
func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
func AppendMapStrIntf(b []byte, m map[string]any) ([]byte, error) {
sz := uint32(len(m))
b = AppendMapHeader(b, sz)
var err error
@@ -394,7 +394,7 @@ func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
// - A *T, where T is another supported type
// - A type that satisfies the msgp.Marshaler interface
// - A type that satisfies the msgp.Extension interface
func AppendIntf(b []byte, i interface{}) ([]byte, error) {
func AppendIntf(b []byte, i any) ([]byte, error) {
if i == nil {
return AppendNil(b), nil
}
@@ -444,13 +444,13 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) {
return AppendTime(b, i), nil
case time.Duration:
return AppendDuration(b, i), nil
case map[string]interface{}:
case map[string]any:
return AppendMapStrIntf(b, i)
case map[string]string:
return AppendMapStrStr(b, i), nil
case json.Number:
return AppendJSONNumber(b, i)
case []interface{}:
case []any:
b = AppendArrayHeader(b, uint32(len(i)))
var err error
for _, k := range i {
@@ -483,7 +483,7 @@ func AppendIntf(b []byte, i interface{}) ([]byte, error) {
case reflect.Array, reflect.Slice:
l := v.Len()
b = AppendArrayHeader(b, uint32(l))
for i := 0; i < l; i++ {
for i := range l {
b, err = AppendIntf(b, v.Index(i).Interface())
if err != nil {
return b, err
@@ -518,3 +518,49 @@ func AppendJSONNumber(b []byte, n json.Number) ([]byte, error) {
}
return b, err
}
// AppendBytesTwoPrefixed will add the length to a bin section written with
// 2 bytes of space saved for a bin8 header.
// If the sz cannot fit inside a bin8, the data will be moved to make space for the header.
func AppendBytesTwoPrefixed(b []byte, sz int) []byte {
off := len(b) - sz - 2
switch {
case sz <= math.MaxUint8:
// Just write header...
prefixu8(b[off:], mbin8, uint8(sz))
case sz <= math.MaxUint16:
// Scoot one
b = append(b, 0)
copy(b[off+1:], b[off:])
prefixu16(b[off:], mbin16, uint16(sz))
default:
// Scoot three
b = append(b, 0, 0, 0)
copy(b[off+3:], b[off:])
prefixu32(b[off:], mbin32, uint32(sz))
}
return b
}
// AppendBytesStringTwoPrefixed will add the length to a string section written with
// 2 bytes of space saved for a str8 header.
// If the sz cannot fit inside a str8, the data will be moved to make space for the header.
func AppendBytesStringTwoPrefixed(b []byte, sz int) []byte {
off := len(b) - sz - 2
switch {
case sz <= math.MaxUint8:
// Just write header...
prefixu8(b[off:], mstr8, uint8(sz))
case sz <= math.MaxUint16:
// Scoot one
b = append(b, 0)
copy(b[off+1:], b[off:])
prefixu16(b[off:], mstr16, uint16(sz))
default:
// Scoot three
b = append(b, 0, 0, 0)
copy(b[off+3:], b[off:])
prefixu32(b[off:], mstr32, uint32(sz))
}
return b
}
+15 -9
View File
@@ -15,7 +15,7 @@ github.com/Azure/go-ansiterm/winterm
# github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
## explicit
github.com/Azure/go-ntlmssp
# github.com/BurntSushi/toml v1.5.0
# github.com/BurntSushi/toml v1.6.0
## explicit; go 1.18
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
@@ -821,6 +821,11 @@ github.com/hashicorp/go-plugin/internal/cmdrunner
github.com/hashicorp/go-plugin/internal/grpcmux
github.com/hashicorp/go-plugin/internal/plugin
github.com/hashicorp/go-plugin/runner
# github.com/hashicorp/golang-lru/v2 v2.0.7
## explicit; go 1.18
github.com/hashicorp/golang-lru/v2
github.com/hashicorp/golang-lru/v2/internal
github.com/hashicorp/golang-lru/v2/simplelru
# github.com/hashicorp/yamux v0.1.2
## explicit; go 1.20
github.com/hashicorp/yamux
@@ -1064,7 +1069,7 @@ github.com/miekg/dns
# github.com/mileusna/useragent v1.3.5
## explicit; go 1.14
github.com/mileusna/useragent
# github.com/minio/crc64nvme v1.1.0
# github.com/minio/crc64nvme v1.1.1
## explicit; go 1.22
github.com/minio/crc64nvme
# github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76
@@ -1073,8 +1078,8 @@ github.com/minio/highwayhash
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.97
## explicit; go 1.23.0
# github.com/minio/minio-go/v7 v7.0.98
## explicit; go 1.24.0
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/cors
github.com/minio/minio-go/v7/pkg/credentials
@@ -1370,7 +1375,7 @@ github.com/opencloud-eu/icap-client
# github.com/opencloud-eu/libre-graph-api-go v1.0.8-0.20250724122329-41ba6b191e76
## explicit; go 1.18
github.com/opencloud-eu/libre-graph-api-go
# github.com/opencloud-eu/reva/v2 v2.41.1-0.20260107152322-93760b632993
# github.com/opencloud-eu/reva/v2 v2.41.1-0.20260120144836-2769c3c07a19
## explicit; go 1.24.1
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
@@ -1835,7 +1840,7 @@ github.com/power-devops/perfstat
## explicit; go 1.16
github.com/pquerna/cachecontrol
github.com/pquerna/cachecontrol/cacheobject
# github.com/prometheus/alertmanager v0.30.0
# github.com/prometheus/alertmanager v0.30.1
## explicit; go 1.24.0
github.com/prometheus/alertmanager/asset
github.com/prometheus/alertmanager/featurecontrol
@@ -1938,7 +1943,7 @@ github.com/segmentio/asm/cpu/arm64
github.com/segmentio/asm/cpu/cpuid
github.com/segmentio/asm/cpu/x86
github.com/segmentio/asm/internal/unsafebytes
# github.com/segmentio/kafka-go v0.4.49
# github.com/segmentio/kafka-go v0.4.50
## explicit; go 1.23
github.com/segmentio/kafka-go
github.com/segmentio/kafka-go/compress
@@ -2121,9 +2126,10 @@ github.com/tidwall/pretty
# github.com/tidwall/sjson v1.2.5
## explicit; go 1.14
github.com/tidwall/sjson
# github.com/tinylib/msgp v1.3.0
## explicit; go 1.20
# github.com/tinylib/msgp v1.6.1
## explicit; go 1.24
github.com/tinylib/msgp/msgp
github.com/tinylib/msgp/msgp/setof
# github.com/tklauser/go-sysconf v0.3.14
## explicit; go 1.18
github.com/tklauser/go-sysconf