mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-30 17:00:57 -06:00
Merge pull request #249 from rhafer/bump-reva
Bump reva for latest fixes
This commit is contained in:
16
go.mod
16
go.mod
@@ -58,19 +58,19 @@ require (
|
||||
github.com/mna/pigeon v1.3.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/nats-io/nats-server/v2 v2.10.25
|
||||
github.com/nats-io/nats.go v1.38.0
|
||||
github.com/nats-io/nats.go v1.39.1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.22.2
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220153133-2081521720f4
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_golang v1.21.0
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
github.com/riandyrn/otelchi v0.12.0
|
||||
github.com/rogpeppe/go-internal v1.13.1
|
||||
@@ -177,7 +177,7 @@ require (
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/emvi/iso-639-1 v1.1.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.5.0 // indirect
|
||||
github.com/fatih/color v1.14.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/gdexlab/go-render v1.0.1 // indirect
|
||||
@@ -188,7 +188,7 @@ require (
|
||||
github.com/go-git/go-git/v5 v5.13.2 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
@@ -245,9 +245,9 @@ require (
|
||||
github.com/longsleep/go-metrics v1.0.0 // indirect
|
||||
github.com/longsleep/rndm v1.2.0 // indirect
|
||||
github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b // indirect
|
||||
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 // indirect
|
||||
@@ -277,7 +277,7 @@ require (
|
||||
github.com/pquerna/cachecontrol v0.2.0 // indirect
|
||||
github.com/prometheus/alertmanager v0.27.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/statsd_exporter v0.22.8 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
|
||||
31
go.sum
31
go.sum
@@ -305,8 +305,8 @@ github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl
|
||||
github.com/exoscale/egoscale v0.46.0/go.mod h1:mpEXBpROAa/2i5GC0r33rfxG+TxSEka11g1PIXt9+zc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@@ -359,8 +359,8 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
|
||||
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
@@ -748,8 +748,9 @@ github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
@@ -763,8 +764,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
@@ -824,8 +825,8 @@ github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.25 h1:J0GWLDDXo5HId7ti/lTmBfs+lzhmu8RPkoKl0eSCqwc=
|
||||
github.com/nats-io/nats-server/v2 v2.10.25/go.mod h1:/YYYQO7cuoOBt+A7/8cVjuhWTaTUEAlZbJT+3sMAfFU=
|
||||
github.com/nats-io/nats.go v1.38.0 h1:A7P+g7Wjp4/NWqDOOP/K6hfhr54DvdDQUznt5JFg9XA=
|
||||
github.com/nats-io/nats.go v1.38.0/go.mod h1:IGUM++TwokGnXPs82/wCuiHS02/aKrdYUQkU8If6yjw=
|
||||
github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk=
|
||||
github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
@@ -860,8 +861,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220153133-2081521720f4 h1:uV5++OW0uDBe+n8Bnh8P0scNiZQisnKWoFZYLyYzhvU=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220153133-2081521720f4/go.mod h1:GJE98Psk3SKJqQm+YHsrPB+b52aIaJCr4rCZplPZrJI=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520 h1:JCFtRPS6l0zouBBMBewWDXOBiKBhmAzsw6liG83b+Xw=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520/go.mod h1:ZERf8ae/ppN23rL0TwUH+oxxGY2DZ0x3o31ho3w7GeY=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
@@ -919,8 +920,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
|
||||
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
@@ -940,8 +941,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
|
||||
23
vendor/github.com/fatih/color/README.md
generated
vendored
23
vendor/github.com/fatih/color/README.md
generated
vendored
@@ -9,7 +9,7 @@ suits you.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
```
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
@@ -30,6 +30,18 @@ color.Magenta("And many others ..")
|
||||
|
||||
```
|
||||
|
||||
### RGB colors
|
||||
|
||||
If your terminal supports 24-bit colors, you can use RGB color codes.
|
||||
|
||||
```go
|
||||
color.RGB(255, 128, 0).Println("foreground orange")
|
||||
color.RGB(230, 42, 42).Println("foreground red")
|
||||
|
||||
color.BgRGB(255, 128, 0).Println("background orange")
|
||||
color.BgRGB(230, 42, 42).Println("background red")
|
||||
```
|
||||
|
||||
### Mix and reuse colors
|
||||
|
||||
```go
|
||||
@@ -49,6 +61,11 @@ boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with white background.")
|
||||
|
||||
// Mix with RGB color codes
|
||||
color.RGB(255, 128, 0).AddBgRGB(0, 0, 0).Println("orange with black background")
|
||||
|
||||
color.BgRGB(255, 128, 0).AddRGB(255, 255, 255).Println("orange background with white foreground")
|
||||
```
|
||||
|
||||
### Use your own output (io.Writer)
|
||||
@@ -161,10 +178,6 @@ c.Println("This prints again cyan...")
|
||||
|
||||
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
## Credits
|
||||
|
||||
|
||||
91
vendor/github.com/fatih/color/color.go
generated
vendored
91
vendor/github.com/fatih/color/color.go
generated
vendored
@@ -65,6 +65,29 @@ const (
|
||||
CrossedOut
|
||||
)
|
||||
|
||||
const (
|
||||
ResetBold Attribute = iota + 22
|
||||
ResetItalic
|
||||
ResetUnderline
|
||||
ResetBlinking
|
||||
_
|
||||
ResetReversed
|
||||
ResetConcealed
|
||||
ResetCrossedOut
|
||||
)
|
||||
|
||||
var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{
|
||||
Bold: ResetBold,
|
||||
Faint: ResetBold,
|
||||
Italic: ResetItalic,
|
||||
Underline: ResetUnderline,
|
||||
BlinkSlow: ResetBlinking,
|
||||
BlinkRapid: ResetBlinking,
|
||||
ReverseVideo: ResetReversed,
|
||||
Concealed: ResetConcealed,
|
||||
CrossedOut: ResetCrossedOut,
|
||||
}
|
||||
|
||||
// Foreground text colors
|
||||
const (
|
||||
FgBlack Attribute = iota + 30
|
||||
@@ -75,6 +98,9 @@ const (
|
||||
FgMagenta
|
||||
FgCyan
|
||||
FgWhite
|
||||
|
||||
// used internally for 256 and 24-bit coloring
|
||||
foreground
|
||||
)
|
||||
|
||||
// Foreground Hi-Intensity text colors
|
||||
@@ -99,6 +125,9 @@ const (
|
||||
BgMagenta
|
||||
BgCyan
|
||||
BgWhite
|
||||
|
||||
// used internally for 256 and 24-bit coloring
|
||||
background
|
||||
)
|
||||
|
||||
// Background Hi-Intensity text colors
|
||||
@@ -127,6 +156,30 @@ func New(value ...Attribute) *Color {
|
||||
return c
|
||||
}
|
||||
|
||||
// RGB returns a new foreground color in 24-bit RGB.
|
||||
func RGB(r, g, b int) *Color {
|
||||
return New(foreground, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
}
|
||||
|
||||
// BgRGB returns a new background color in 24-bit RGB.
|
||||
func BgRGB(r, g, b int) *Color {
|
||||
return New(background, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
}
|
||||
|
||||
// AddRGB is used to chain foreground RGB SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0).
|
||||
func (c *Color) AddRGB(r, g, b int) *Color {
|
||||
c.params = append(c.params, foreground, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
return c
|
||||
}
|
||||
|
||||
// AddRGB is used to chain background RGB SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0).
|
||||
func (c *Color) AddBgRGB(r, g, b int) *Color {
|
||||
c.params = append(c.params, background, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
return c
|
||||
}
|
||||
|
||||
// Set sets the given parameters immediately. It will change the color of
|
||||
// output with the given SGR parameters until color.Unset() is called.
|
||||
func Set(p ...Attribute) *Color {
|
||||
@@ -246,10 +299,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||
// type *os.File.
|
||||
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
c.SetWriter(w)
|
||||
defer c.UnsetWriter(w)
|
||||
|
||||
return fmt.Fprintln(w, a...)
|
||||
return fmt.Fprintln(w, c.wrap(sprintln(a...)))
|
||||
}
|
||||
|
||||
// Println formats using the default formats for its operands and writes to
|
||||
@@ -258,10 +308,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintln(Output, a...)
|
||||
return fmt.Fprintln(Output, c.wrap(sprintln(a...)))
|
||||
}
|
||||
|
||||
// Sprint is just like Print, but returns a string instead of printing it.
|
||||
@@ -271,7 +318,7 @@ func (c *Color) Sprint(a ...interface{}) string {
|
||||
|
||||
// Sprintln is just like Println, but returns a string instead of printing it.
|
||||
func (c *Color) Sprintln(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
return c.wrap(sprintln(a...)) + "\n"
|
||||
}
|
||||
|
||||
// Sprintf is just like Printf, but returns a string instead of printing it.
|
||||
@@ -353,7 +400,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||
// string. Windows users should use this in conjunction with color.Output.
|
||||
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
return c.wrap(sprintln(a...)) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -383,7 +430,18 @@ func (c *Color) format() string {
|
||||
}
|
||||
|
||||
func (c *Color) unformat() string {
|
||||
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
//return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
//for each element in sequence let's use the specific reset escape, or the generic one if not found
|
||||
format := make([]string, len(c.params))
|
||||
for i, v := range c.params {
|
||||
format[i] = strconv.Itoa(int(Reset))
|
||||
ra, ok := mapResetAttributes[v]
|
||||
if ok {
|
||||
format[i] = strconv.Itoa(int(ra))
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
|
||||
}
|
||||
|
||||
// DisableColor disables the color output. Useful to not change any existing
|
||||
@@ -411,6 +469,12 @@ func (c *Color) isNoColorSet() bool {
|
||||
|
||||
// Equals returns a boolean value indicating whether two colors are equal.
|
||||
func (c *Color) Equals(c2 *Color) bool {
|
||||
if c == nil && c2 == nil {
|
||||
return true
|
||||
}
|
||||
if c == nil || c2 == nil {
|
||||
return false
|
||||
}
|
||||
if len(c.params) != len(c2.params) {
|
||||
return false
|
||||
}
|
||||
@@ -614,3 +678,8 @@ func HiCyanString(format string, a ...interface{}) string { return colorString(f
|
||||
func HiWhiteString(format string, a ...interface{}) string {
|
||||
return colorString(format, FgHiWhite, a...)
|
||||
}
|
||||
|
||||
// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline.
|
||||
func sprintln(a ...interface{}) string {
|
||||
return strings.TrimSuffix(fmt.Sprintln(a...), "\n")
|
||||
}
|
||||
|
||||
19
vendor/github.com/fatih/color/color_windows.go
generated
vendored
Normal file
19
vendor/github.com/fatih/color/color_windows.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package color
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Opt-in for ansi color support for current process.
|
||||
// https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences
|
||||
var outMode uint32
|
||||
out := windows.Handle(os.Stdout.Fd())
|
||||
if err := windows.GetConsoleMode(out, &outMode); err != nil {
|
||||
return
|
||||
}
|
||||
outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
_ = windows.SetConsoleMode(out, outMode)
|
||||
}
|
||||
24
vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
generated
vendored
24
vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
generated
vendored
@@ -1,3 +1,27 @@
|
||||
# v4.0.4
|
||||
|
||||
## Fixed
|
||||
|
||||
- Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
|
||||
breaking change. See #136 / #137.
|
||||
|
||||
# v4.0.3
|
||||
|
||||
## Changed
|
||||
|
||||
- Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
|
||||
- Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
|
||||
- Dependency updates
|
||||
|
||||
# v4.0.2
|
||||
|
||||
## Changed
|
||||
|
||||
- Improved documentation of Verify() to note that JSONWebKeySet is a supported
|
||||
argument type (#104)
|
||||
- Defined exported error values for missing x5c header and unsupported elliptic
|
||||
curves error cases (#117)
|
||||
|
||||
# v4.0.1
|
||||
|
||||
## Fixed
|
||||
|
||||
6
vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
generated
vendored
6
vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md
generated
vendored
@@ -7,9 +7,3 @@ When submitting code, please make every effort to follow existing conventions
|
||||
and style in order to keep the code as readable as possible. Please also make
|
||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
||||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
|
||||
will be prompted to sign once a pull request is opened.
|
||||
|
||||
[1]: https://cla-assistant.io/
|
||||
|
||||
10
vendor/github.com/go-jose/go-jose/v4/README.md
generated
vendored
10
vendor/github.com/go-jose/go-jose/v4/README.md
generated
vendored
@@ -9,14 +9,6 @@ Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
States law, directive or regulation. In particular this software may not be
|
||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
||||
US maintained blocked list.
|
||||
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
@@ -109,6 +101,6 @@ allows attaching a key id.
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/v4/jose-util)
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example as well.
|
||||
|
||||
10
vendor/github.com/go-jose/go-jose/v4/crypter.go
generated
vendored
10
vendor/github.com/go-jose/go-jose/v4/crypter.go
generated
vendored
@@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
key, err := tryJWKS(decryptionKey, obj.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
key, err := tryJWKS(decryptionKey, obj.Header)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
}
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
|
||||
5
vendor/github.com/go-jose/go-jose/v4/jwe.go
generated
vendored
5
vendor/github.com/go-jose/go-jose/v4/jwe.go
generated
vendored
@@ -288,10 +288,11 @@ func ParseEncryptedCompact(
|
||||
keyAlgorithms []KeyAlgorithm,
|
||||
contentEncryption []ContentEncryption,
|
||||
) (*JSONWebEncryption, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 5 {
|
||||
// Five parts is four separators
|
||||
if strings.Count(input, ".") != 4 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
|
||||
}
|
||||
parts := strings.SplitN(input, ".", 5)
|
||||
|
||||
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
|
||||
if err != nil {
|
||||
|
||||
25
vendor/github.com/go-jose/go-jose/v4/jwk.go
generated
vendored
25
vendor/github.com/go-jose/go-jose/v4/jwk.go
generated
vendored
@@ -239,10 +239,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
||||
keyPub = key
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
|
||||
return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
||||
return key.K.bytes(), nil
|
||||
}
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
var (
|
||||
// ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a
|
||||
// key ID which matches one in the provided tokens headers.
|
||||
ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set")
|
||||
)
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) (interface{}, error) {
|
||||
var jwks JSONWebKeySet
|
||||
|
||||
switch jwksType := key.(type) {
|
||||
@@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
case JSONWebKeySet:
|
||||
jwks = jwksType
|
||||
default:
|
||||
return key
|
||||
// If the specified key is not a JWKS, return as is.
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Determine the KID to search for from the headers.
|
||||
var kid string
|
||||
for _, header := range headers {
|
||||
if header.KeyID != "" {
|
||||
@@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
// If no KID is specified in the headers, reject.
|
||||
if kid == "" {
|
||||
return key
|
||||
return nil, ErrJWKSKidNotFound
|
||||
}
|
||||
|
||||
// Find the JWK with the matching KID. If no JWK with the specified KID is
|
||||
// found, reject.
|
||||
keys := jwks.Key(kid)
|
||||
if len(keys) == 0 {
|
||||
return key
|
||||
return nil, ErrJWKSKidNotFound
|
||||
}
|
||||
|
||||
return keys[0].Key
|
||||
return keys[0].Key, nil
|
||||
}
|
||||
|
||||
5
vendor/github.com/go-jose/go-jose/v4/jws.go
generated
vendored
5
vendor/github.com/go-jose/go-jose/v4/jws.go
generated
vendored
@@ -327,10 +327,11 @@ func parseSignedCompact(
|
||||
payload []byte,
|
||||
signatureAlgorithms []SignatureAlgorithm,
|
||||
) (*JSONWebSignature, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 3 {
|
||||
// Three parts is two separators
|
||||
if strings.Count(input, ".") != 2 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
|
||||
}
|
||||
parts := strings.SplitN(input, ".", 3)
|
||||
|
||||
if parts[1] != "" && payload != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
|
||||
|
||||
3
vendor/github.com/go-jose/go-jose/v4/opaque.go
generated
vendored
3
vendor/github.com/go-jose/go-jose/v4/opaque.go
generated
vendored
@@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig
|
||||
}
|
||||
|
||||
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
|
||||
//
|
||||
// Note: this cannot currently be implemented outside this package because of its
|
||||
// unexported method.
|
||||
type OpaqueKeyEncrypter interface {
|
||||
// KeyID returns the kid
|
||||
KeyID() string
|
||||
|
||||
10
vendor/github.com/go-jose/go-jose/v4/signing.go
generated
vendored
10
vendor/github.com/go-jose/go-jose/v4/signing.go
generated
vendored
@@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
key, err := tryJWKS(verificationKey, obj.headers()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
key, err := tryJWKS(verificationKey, obj.headers()...)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
}
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
|
||||
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
||||
|
||||
// EnableColorsStdout enable colors if possible.
|
||||
func EnableColorsStdout(enabled *bool) func() {
|
||||
if enabled != nil {
|
||||
*enabled = true
|
||||
}
|
||||
return func() {}
|
||||
}
|
||||
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !windows && !appengine
|
||||
// +build !windows,!appengine
|
||||
//go:build !windows || appengine
|
||||
// +build !windows appengine
|
||||
|
||||
package colorable
|
||||
|
||||
|
||||
22
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
22
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
syscall "golang.org/x/sys/windows"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
@@ -73,7 +73,7 @@ type consoleCursorInfo struct {
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
kernel32 = syscall.NewLazySystemDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
@@ -87,8 +87,8 @@ var (
|
||||
procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
|
||||
)
|
||||
|
||||
// Writer provides colorable Writer to the console
|
||||
type Writer struct {
|
||||
// writer provides colorable Writer to the console
|
||||
type writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
althandle syscall.Handle
|
||||
@@ -98,7 +98,7 @@ type Writer struct {
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence from File.
|
||||
// NewColorable returns new instance of writer which handles escape sequence from File.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
@@ -112,17 +112,17 @@ func NewColorable(file *os.File) io.Writer {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
|
||||
return &writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
// NewColorableStdout returns new instance of writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return NewColorable(os.Stdout)
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
// NewColorableStderr returns new instance of writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return NewColorable(os.Stderr)
|
||||
}
|
||||
@@ -434,7 +434,7 @@ func atoiWithDefault(s string, def int) (int, error) {
|
||||
}
|
||||
|
||||
// Write writes data on console
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
func (w *writer) Write(data []byte) (n int, err error) {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
var csbi consoleScreenBufferInfo
|
||||
@@ -560,7 +560,7 @@ loop:
|
||||
}
|
||||
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
n, err = atoiWithDefault(buf.String(), 1)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -569,7 +569,7 @@ loop:
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
n, err = atoiWithDefault(buf.String(), 1)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
323
vendor/github.com/mattn/go-runewidth/runewidth_table.go
generated
vendored
323
vendor/github.com/mattn/go-runewidth/runewidth_table.go
generated
vendored
@@ -4,20 +4,21 @@ package runewidth
|
||||
|
||||
var combining = table{
|
||||
{0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
|
||||
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01},
|
||||
{0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0},
|
||||
{0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF},
|
||||
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3},
|
||||
{0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F},
|
||||
{0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF},
|
||||
{0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
|
||||
{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
|
||||
{0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
|
||||
{0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
|
||||
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301},
|
||||
{0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374},
|
||||
{0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
|
||||
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85},
|
||||
{0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C},
|
||||
{0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D},
|
||||
{0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
|
||||
{0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
|
||||
{0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
|
||||
{0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
|
||||
{0x1E8D0, 0x1E8D6},
|
||||
{0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6},
|
||||
}
|
||||
|
||||
var doublewidth = table{
|
||||
@@ -33,33 +34,34 @@ var doublewidth = table{
|
||||
{0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
|
||||
{0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
|
||||
{0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
|
||||
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
|
||||
{0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
|
||||
{0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3},
|
||||
{0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF},
|
||||
{0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C},
|
||||
{0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19},
|
||||
{0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B},
|
||||
{0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4},
|
||||
{0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5},
|
||||
{0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152},
|
||||
{0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004},
|
||||
{0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
|
||||
{0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
|
||||
{0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320},
|
||||
{0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393},
|
||||
{0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0},
|
||||
{0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440},
|
||||
{0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E},
|
||||
{0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596},
|
||||
{0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5},
|
||||
{0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7},
|
||||
{0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB},
|
||||
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978},
|
||||
{0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74},
|
||||
{0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8},
|
||||
{0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6},
|
||||
{0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
|
||||
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E},
|
||||
{0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F},
|
||||
{0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E},
|
||||
{0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C},
|
||||
{0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
|
||||
{0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
|
||||
{0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
|
||||
{0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1},
|
||||
{0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08},
|
||||
{0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE},
|
||||
{0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152},
|
||||
{0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB},
|
||||
{0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E},
|
||||
{0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B},
|
||||
{0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265},
|
||||
{0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C},
|
||||
{0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3},
|
||||
{0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E},
|
||||
{0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D},
|
||||
{0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A},
|
||||
{0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F},
|
||||
{0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2},
|
||||
{0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC},
|
||||
{0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0},
|
||||
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF},
|
||||
{0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD},
|
||||
{0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8},
|
||||
{0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
|
||||
}
|
||||
|
||||
var ambiguous = table{
|
||||
@@ -154,43 +156,43 @@ var neutral = table{
|
||||
{0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
|
||||
{0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
|
||||
{0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
|
||||
{0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A},
|
||||
{0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D},
|
||||
{0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E},
|
||||
{0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7},
|
||||
{0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990},
|
||||
{0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2},
|
||||
{0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8},
|
||||
{0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD},
|
||||
{0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03},
|
||||
{0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
|
||||
{0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
|
||||
{0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
|
||||
{0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
|
||||
{0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76},
|
||||
{0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
|
||||
{0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
|
||||
{0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9},
|
||||
{0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3},
|
||||
{0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03},
|
||||
{0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28},
|
||||
{0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39},
|
||||
{0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D},
|
||||
{0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63},
|
||||
{0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A},
|
||||
{0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A},
|
||||
{0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4},
|
||||
{0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2},
|
||||
{0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0},
|
||||
{0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C},
|
||||
{0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
|
||||
{0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
|
||||
{0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63},
|
||||
{0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
|
||||
{0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E},
|
||||
{0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A},
|
||||
{0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983},
|
||||
{0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8},
|
||||
{0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9},
|
||||
{0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE},
|
||||
{0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3},
|
||||
{0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A},
|
||||
{0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30},
|
||||
{0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39},
|
||||
{0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48},
|
||||
{0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C},
|
||||
{0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83},
|
||||
{0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8},
|
||||
{0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9},
|
||||
{0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD},
|
||||
{0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1},
|
||||
{0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C},
|
||||
{0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30},
|
||||
{0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44},
|
||||
{0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57},
|
||||
{0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77},
|
||||
{0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90},
|
||||
{0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C},
|
||||
{0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA},
|
||||
{0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
|
||||
{0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7},
|
||||
{0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10},
|
||||
{0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44},
|
||||
{0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
|
||||
{0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63},
|
||||
{0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
|
||||
{0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
|
||||
{0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
|
||||
{0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
|
||||
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C},
|
||||
{0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3},
|
||||
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C},
|
||||
{0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
|
||||
{0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
|
||||
{0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
|
||||
@@ -200,7 +202,7 @@ var neutral = table{
|
||||
{0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
|
||||
{0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
|
||||
{0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
|
||||
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
|
||||
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9},
|
||||
{0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
|
||||
{0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
|
||||
{0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
|
||||
@@ -212,20 +214,19 @@ var neutral = table{
|
||||
{0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
|
||||
{0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
|
||||
{0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
|
||||
{0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
|
||||
{0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
|
||||
{0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
|
||||
{0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
|
||||
{0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
|
||||
{0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
|
||||
{0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
|
||||
{0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
|
||||
{0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
|
||||
{0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
|
||||
{0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
|
||||
{0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
|
||||
{0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7},
|
||||
{0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15},
|
||||
{0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753},
|
||||
{0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773},
|
||||
{0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9},
|
||||
{0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA},
|
||||
{0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B},
|
||||
{0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D},
|
||||
{0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9},
|
||||
{0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E},
|
||||
{0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99},
|
||||
{0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C},
|
||||
{0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37},
|
||||
{0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA},
|
||||
{0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15},
|
||||
{0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
|
||||
{0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
|
||||
{0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
|
||||
@@ -237,7 +238,7 @@ var neutral = table{
|
||||
{0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
|
||||
{0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
|
||||
{0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
|
||||
{0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0},
|
||||
{0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0},
|
||||
{0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
|
||||
{0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
|
||||
{0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
|
||||
@@ -275,15 +276,15 @@ var neutral = table{
|
||||
{0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
|
||||
{0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
|
||||
{0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
|
||||
{0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E},
|
||||
{0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27},
|
||||
{0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70},
|
||||
{0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE},
|
||||
{0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6},
|
||||
{0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE},
|
||||
{0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF},
|
||||
{0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF},
|
||||
{0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839},
|
||||
{0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25},
|
||||
{0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67},
|
||||
{0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6},
|
||||
{0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE},
|
||||
{0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6},
|
||||
{0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F},
|
||||
{0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7},
|
||||
{0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3},
|
||||
{0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839},
|
||||
{0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
|
||||
{0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
|
||||
{0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
|
||||
@@ -294,8 +295,8 @@ var neutral = table{
|
||||
{0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
|
||||
{0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
|
||||
{0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
|
||||
{0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
|
||||
{0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
|
||||
{0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F},
|
||||
{0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF},
|
||||
{0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
|
||||
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
|
||||
{0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
|
||||
@@ -307,44 +308,48 @@ var neutral = table{
|
||||
{0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
|
||||
{0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
|
||||
{0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
|
||||
{0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
|
||||
{0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
|
||||
{0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
|
||||
{0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
|
||||
{0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
|
||||
{0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
|
||||
{0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
|
||||
{0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35},
|
||||
{0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58},
|
||||
{0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
|
||||
{0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
|
||||
{0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
|
||||
{0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
|
||||
{0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E},
|
||||
{0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1},
|
||||
{0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB},
|
||||
{0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F},
|
||||
{0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8},
|
||||
{0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147},
|
||||
{0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4},
|
||||
{0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286},
|
||||
{0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
|
||||
{0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9},
|
||||
{0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310},
|
||||
{0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333},
|
||||
{0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348},
|
||||
{0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357},
|
||||
{0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
|
||||
{0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7},
|
||||
{0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD},
|
||||
{0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C},
|
||||
{0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A},
|
||||
{0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B},
|
||||
{0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909},
|
||||
{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935},
|
||||
{0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959},
|
||||
{0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4},
|
||||
{0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8},
|
||||
{0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592},
|
||||
{0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1},
|
||||
{0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736},
|
||||
{0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785},
|
||||
{0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805},
|
||||
{0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838},
|
||||
{0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E},
|
||||
{0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5},
|
||||
{0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F},
|
||||
{0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03},
|
||||
{0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17},
|
||||
{0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48},
|
||||
{0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6},
|
||||
{0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55},
|
||||
{0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C},
|
||||
{0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2},
|
||||
{0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39},
|
||||
{0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD},
|
||||
{0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59},
|
||||
{0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6},
|
||||
{0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2},
|
||||
{0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9},
|
||||
{0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176},
|
||||
{0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211},
|
||||
{0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288},
|
||||
{0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9},
|
||||
{0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303},
|
||||
{0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328},
|
||||
{0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339},
|
||||
{0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D},
|
||||
{0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363},
|
||||
{0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B},
|
||||
{0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
|
||||
{0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
|
||||
{0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9},
|
||||
{0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B},
|
||||
{0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2},
|
||||
{0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913},
|
||||
{0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938},
|
||||
{0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7},
|
||||
{0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47},
|
||||
{0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09},
|
||||
{0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
|
||||
{0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
|
||||
{0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
|
||||
@@ -352,30 +357,36 @@ var neutral = table{
|
||||
{0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
|
||||
{0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
|
||||
{0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
|
||||
{0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59},
|
||||
{0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
|
||||
{0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
|
||||
{0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646},
|
||||
{0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646},
|
||||
{0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
|
||||
{0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5},
|
||||
{0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61},
|
||||
{0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A},
|
||||
{0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F},
|
||||
{0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88},
|
||||
{0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5},
|
||||
{0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245},
|
||||
{0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378},
|
||||
{0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
|
||||
{0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
|
||||
{0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
|
||||
{0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
|
||||
{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
|
||||
{0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
|
||||
{0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
|
||||
{0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
|
||||
{0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
|
||||
{0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
|
||||
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9},
|
||||
{0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
|
||||
{0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED},
|
||||
{0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59},
|
||||
{0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F},
|
||||
{0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87},
|
||||
{0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
|
||||
{0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
|
||||
{0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3},
|
||||
{0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA},
|
||||
{0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3},
|
||||
{0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454},
|
||||
{0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2},
|
||||
{0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9},
|
||||
{0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505},
|
||||
{0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C},
|
||||
{0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544},
|
||||
{0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5},
|
||||
{0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F},
|
||||
{0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A},
|
||||
{0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021},
|
||||
{0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D},
|
||||
{0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
|
||||
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE},
|
||||
{0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9},
|
||||
{0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE},
|
||||
{0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
|
||||
{0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
|
||||
{0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
|
||||
{0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
|
||||
@@ -400,8 +411,8 @@ var neutral = table{
|
||||
{0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
|
||||
{0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
|
||||
{0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
|
||||
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773},
|
||||
{0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
|
||||
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776},
|
||||
{0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
|
||||
{0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
|
||||
{0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
|
||||
{0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},
|
||||
|
||||
2
vendor/github.com/nats-io/nats.go/README.md
generated
vendored
2
vendor/github.com/nats-io/nats.go/README.md
generated
vendored
@@ -23,7 +23,7 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io
|
||||
go get github.com/nats-io/nats.go@latest
|
||||
|
||||
# To get a specific version:
|
||||
go get github.com/nats-io/nats.go@v1.38.0
|
||||
go get github.com/nats-io/nats.go@v1.39.1
|
||||
|
||||
# Note that the latest major version for NATS Server is v2:
|
||||
go get github.com/nats-io/nats-server/v2@latest
|
||||
|
||||
3
vendor/github.com/nats-io/nats.go/dependencies.tpl
generated
vendored
3
vendor/github.com/nats-io/nats.go/dependencies.tpl
generated
vendored
@@ -2,7 +2,8 @@
|
||||
|
||||
This file lists the dependencies used in this repository.
|
||||
|
||||
{{/* compress has actually a BSD 3-Clause license, but the License file in the repo confuses go-license tooling, hence the manual exception */}}
|
||||
| Dependency | License |
|
||||
|--------------------------------------------------|-----------------------------------------|
|
||||
{{ range . }}| {{.Name}} | {{.LicenseName}} |
|
||||
{{ range . }}| {{ .Name }} | {{ if eq .Name "github.com/klauspost/compress/flate" }}BSD 3-Clause{{ else }}{{ .LicenseName }}{{ end }} |
|
||||
{{ end }}
|
||||
|
||||
14
vendor/github.com/nats-io/nats.go/go_test.mod
generated
vendored
14
vendor/github.com/nats-io/nats.go/go_test.mod
generated
vendored
@@ -1,14 +1,12 @@
|
||||
module github.com/nats-io/nats.go
|
||||
|
||||
go 1.21
|
||||
|
||||
toolchain go1.22.5
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/klauspost/compress v1.17.9
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/nats-io/jwt v1.2.2
|
||||
github.com/nats-io/nats-server/v2 v2.10.17
|
||||
github.com/nats-io/nats-server/v2 v2.10.24
|
||||
github.com/nats-io/nkeys v0.4.9
|
||||
github.com/nats-io/nuid v1.0.1
|
||||
go.uber.org/goleak v1.3.0
|
||||
@@ -17,9 +15,9 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/minio/highwayhash v1.0.2 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.7 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.7.3 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
)
|
||||
|
||||
22
vendor/github.com/nats-io/nats.go/go_test.sum
generated
vendored
22
vendor/github.com/nats-io/nats.go/go_test.sum
generated
vendored
@@ -11,16 +11,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
|
||||
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
|
||||
github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
|
||||
github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
||||
github.com/nats-io/nats-server/v2 v2.10.17 h1:PTVObNBD3TZSNUDgzFb1qQsQX4mOgFmOuG9vhT+KBUY=
|
||||
github.com/nats-io/nats-server/v2 v2.10.17/go.mod h1:5OUyc4zg42s/p2i92zbbqXvUNsbF0ivdTLKshVMn2YQ=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.24 h1:KcqqQAD0ZZcG4yLxtvSFJY7CYKVYlnlWoAiVZ6i/IY4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.24/go.mod h1:olvKt8E5ZlnjyqBGbAXtxvSQKsPodISK5Eo/euIta4s=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
@@ -37,16 +37,16 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
||||
1
vendor/github.com/nats-io/nats.go/js.go
generated
vendored
1
vendor/github.com/nats-io/nats.go/js.go
generated
vendored
@@ -120,6 +120,7 @@ type JetStream interface {
|
||||
// PullSubscribe creates a Subscription that can fetch messages.
|
||||
// See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be
|
||||
// set to an empty string.
|
||||
// When using PullSubscribe, the messages are fetched using Fetch() and FetchBatch() methods.
|
||||
PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error)
|
||||
}
|
||||
|
||||
|
||||
8
vendor/github.com/nats-io/nats.go/nats.go
generated
vendored
8
vendor/github.com/nats-io/nats.go/nats.go
generated
vendored
@@ -47,7 +47,7 @@ import (
|
||||
|
||||
// Default Constants
|
||||
const (
|
||||
Version = "1.38.0"
|
||||
Version = "1.39.1"
|
||||
DefaultURL = "nats://127.0.0.1:4222"
|
||||
DefaultPort = 4222
|
||||
DefaultMaxReconnect = 60
|
||||
@@ -4685,14 +4685,14 @@ func (s *Subscription) Unsubscribe() error {
|
||||
// checkDrained will watch for a subscription to be fully drained
|
||||
// and then remove it.
|
||||
func (nc *Conn) checkDrained(sub *Subscription) {
|
||||
if nc == nil || sub == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
sub.mu.Lock()
|
||||
defer sub.mu.Unlock()
|
||||
sub.draining = false
|
||||
}()
|
||||
if nc == nil || sub == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// This allows us to know that whatever we have in the client pending
|
||||
// is correct and the server will not send additional information.
|
||||
|
||||
1
vendor/github.com/nats-io/nats.go/nats_iter.go
generated
vendored
1
vendor/github.com/nats-io/nats.go/nats_iter.go
generated
vendored
@@ -12,7 +12,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.23
|
||||
// +build go1.23
|
||||
|
||||
package nats
|
||||
|
||||
|
||||
1
vendor/github.com/nats-io/nats.go/testing_internal.go
generated
vendored
1
vendor/github.com/nats-io/nats.go/testing_internal.go
generated
vendored
@@ -12,7 +12,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
//go:build internal_testing
|
||||
// +build internal_testing
|
||||
|
||||
// Functions in this file are only available when building nats.go with the
|
||||
// internal_testing build tag. They are used by the nats.go test suite.
|
||||
|
||||
3
vendor/github.com/nats-io/nats.go/util/tls.go
generated
vendored
3
vendor/github.com/nats-io/nats.go/util/tls.go
generated
vendored
@@ -11,9 +11,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.8
|
||||
// +build go1.8
|
||||
|
||||
package util
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
50
vendor/github.com/nats-io/nats.go/util/tls_go17.go
generated
vendored
50
vendor/github.com/nats-io/nats.go/util/tls_go17.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
// Copyright 2016-2022 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.7 && !go1.8
|
||||
// +build go1.7,!go1.8
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
|
||||
// This is temporary, until this is provided by the language.
|
||||
// https://go-review.googlesource.com/#/c/28075/
|
||||
func CloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
Certificates: c.Certificates,
|
||||
NameToCertificate: c.NameToCertificate,
|
||||
GetCertificate: c.GetCertificate,
|
||||
RootCAs: c.RootCAs,
|
||||
NextProtos: c.NextProtos,
|
||||
ServerName: c.ServerName,
|
||||
ClientAuth: c.ClientAuth,
|
||||
ClientCAs: c.ClientCAs,
|
||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||
CipherSuites: c.CipherSuites,
|
||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||
SessionTicketKey: c.SessionTicketKey,
|
||||
ClientSessionCache: c.ClientSessionCache,
|
||||
MinVersion: c.MinVersion,
|
||||
MaxVersion: c.MaxVersion,
|
||||
CurvePreferences: c.CurvePreferences,
|
||||
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||
Renegotiation: c.Renegotiation,
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/nats-io/nats.go/ws.go
generated
vendored
20
vendor/github.com/nats-io/nats.go/ws.go
generated
vendored
@@ -76,14 +76,15 @@ var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff}
|
||||
|
||||
type websocketReader struct {
|
||||
r io.Reader
|
||||
pending [][]byte
|
||||
ib []byte
|
||||
ff bool
|
||||
fc bool
|
||||
nl bool
|
||||
dc *wsDecompressor
|
||||
nc *Conn
|
||||
r io.Reader
|
||||
pending [][]byte
|
||||
compress bool
|
||||
ib []byte
|
||||
ff bool
|
||||
fc bool
|
||||
nl bool
|
||||
dc *wsDecompressor
|
||||
nc *Conn
|
||||
}
|
||||
|
||||
type wsDecompressor struct {
|
||||
@@ -312,6 +313,8 @@ func (r *websocketReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
r.fc = false
|
||||
}
|
||||
} else if r.compress {
|
||||
b = bytes.Clone(b)
|
||||
}
|
||||
// Add to the pending list if dealing with uncompressed frames or
|
||||
// after we have received the full compressed message and decompressed it.
|
||||
@@ -647,6 +650,7 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error {
|
||||
|
||||
wsr := wsNewReader(nc.br.r)
|
||||
wsr.nc = nc
|
||||
wsr.compress = compress
|
||||
// We have to slurp whatever is in the bufio reader and copy to br.r
|
||||
if n := br.Buffered(); n != 0 {
|
||||
wsr.ib, _ = br.Peek(n)
|
||||
|
||||
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go
generated
vendored
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options/options.go
generated
vendored
@@ -54,6 +54,11 @@ func New(m map[string]interface{}) (*Options, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// default to hybrid metadatabackend for posixfs
|
||||
if _, ok := m["metadata_backend"]; !ok {
|
||||
m["metadata_backend"] = "hybrid"
|
||||
}
|
||||
|
||||
do, err := decomposedoptions.New(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
16
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go
generated
vendored
16
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/posix.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
@@ -78,10 +79,19 @@ func New(m map[string]interface{}, stream events.Stream, log *zerolog.Logger) (s
|
||||
switch o.MetadataBackend {
|
||||
case "xattrs":
|
||||
lu = lookup.New(metadata.NewXattrsBackend(o.Root, o.FileMetadataCache), um, o, &timemanager.Manager{})
|
||||
case "messagepack":
|
||||
lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), um, o, &timemanager.Manager{})
|
||||
case "hybrid":
|
||||
lu = lookup.New(metadata.NewHybridBackend(1024, // start offloading grants after 1KB
|
||||
func(n metadata.MetadataNode) string {
|
||||
spaceRoot, _ := lu.IDCache.Get(context.Background(), n.GetSpaceID(), n.GetSpaceID())
|
||||
if len(spaceRoot) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return filepath.Join(spaceRoot, lookup.RevisionsDir, lookup.Pathify(n.GetID(), 4, 2)+".mpk")
|
||||
},
|
||||
o.FileMetadataCache), um, o, &timemanager.Manager{})
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown metadata backend %s, only 'messagepack' or 'xattrs' (default) supported", o.MetadataBackend)
|
||||
return nil, fmt.Errorf("unknown metadata backend %s, only 'xattrs' or 'hybrid' (default) supported", o.MetadataBackend)
|
||||
}
|
||||
|
||||
trashbin, err := trashbin.New(o, lu, log)
|
||||
|
||||
44
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
44
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
@@ -39,7 +39,6 @@ import (
|
||||
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/events"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
@@ -273,27 +272,32 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
}
|
||||
|
||||
func (t *Tree) HandleFileDelete(path string) error {
|
||||
n, err := t.getNodeForPath(filepath.Dir(path))
|
||||
spaceID, id, err := t.lookup.IDsForPath(context.Background(), path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := node.NewBaseNode(spaceID, id, t.lookup)
|
||||
|
||||
// purge metadata
|
||||
if err := t.lookup.(*lookup.Lookup).IDCache.DeleteByPath(context.Background(), path); err != nil {
|
||||
if err := t.lookup.IDCache.DeleteByPath(context.Background(), path); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not delete id cache entry by path")
|
||||
}
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), n); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not purge metadata")
|
||||
}
|
||||
|
||||
parentNode, err := t.getNodeForPath(filepath.Dir(path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.PublishEvent(events.ItemTrashed{
|
||||
Owner: n.Owner(),
|
||||
Executant: n.Owner(),
|
||||
Owner: parentNode.Owner(),
|
||||
Executant: parentNode.Owner(),
|
||||
Ref: &provider.Reference{
|
||||
ResourceId: &provider.ResourceId{
|
||||
StorageId: t.options.MountID,
|
||||
SpaceId: n.SpaceID,
|
||||
OpaqueId: n.ParentID,
|
||||
OpaqueId: parentNode.ID,
|
||||
},
|
||||
Path: filepath.Base(path),
|
||||
},
|
||||
@@ -309,14 +313,12 @@ func (t *Tree) HandleFileDelete(path string) error {
|
||||
}
|
||||
|
||||
func (t *Tree) getNodeForPath(path string) (*node.Node, error) {
|
||||
lu := t.lookup.(*lookup.Lookup)
|
||||
|
||||
spaceID, nodeID, err := lu.IDsForPath(context.Background(), path)
|
||||
spaceID, nodeID, err := t.lookup.IDsForPath(context.Background(), path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return node.ReadNode(context.Background(), lu, spaceID, nodeID, false, nil, false)
|
||||
return node.ReadNode(context.Background(), t.lookup, spaceID, nodeID, false, nil, false)
|
||||
}
|
||||
|
||||
func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
@@ -324,7 +326,7 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
spaceCandidate := path
|
||||
spaceAttrs := node.Attributes{}
|
||||
for strings.HasPrefix(spaceCandidate, t.options.Root) {
|
||||
spaceID, _, err := t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), spaceCandidate)
|
||||
spaceID, _, err := t.lookup.IDsForPath(context.Background(), spaceCandidate)
|
||||
if err == nil && len(spaceID) > 0 {
|
||||
if t.options.UseSpaceGroups {
|
||||
// set the uid and gid for the space
|
||||
@@ -385,7 +387,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
// the file has an id set, we already know it from the past
|
||||
n := node.NewBaseNode(spaceID, id, t.lookup)
|
||||
|
||||
previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), spaceID, string(id))
|
||||
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, string(id))
|
||||
previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr)
|
||||
|
||||
// compare metadata mtime with actual mtime. if it matches AND the path hasn't changed (move operation)
|
||||
@@ -416,7 +418,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
// this is a move
|
||||
t.log.Debug().Str("path", item.Path).Msg("move detected")
|
||||
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
_, attrs, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
@@ -425,7 +427,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
}
|
||||
|
||||
// Delete the path entry using DeletePath(reverse lookup), not the whole entry pair.
|
||||
if err := t.lookup.(*lookup.Lookup).IDCache.DeletePath(context.Background(), previousPath); err != nil {
|
||||
if err := t.lookup.IDCache.DeletePath(context.Background(), previousPath); err != nil {
|
||||
t.log.Error().Err(err).Str("path", previousPath).Msg("could not delete id cache entry by path")
|
||||
}
|
||||
|
||||
@@ -469,7 +471,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
} else {
|
||||
// This item had already been assimilated in the past. Update the path
|
||||
t.log.Debug().Str("path", item.Path).Msg("updating cached path")
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
|
||||
@@ -530,7 +532,7 @@ assimilate:
|
||||
if id != spaceID {
|
||||
// read parent
|
||||
var err error
|
||||
_, parentID, err = t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), filepath.Dir(path))
|
||||
_, parentID, err = t.lookup.IDsForPath(context.Background(), filepath.Dir(path))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read parent id")
|
||||
}
|
||||
@@ -668,7 +670,7 @@ assimilate:
|
||||
return nil, nil, errors.Wrap(err, "failed to set attributes")
|
||||
}
|
||||
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
|
||||
}
|
||||
|
||||
@@ -741,7 +743,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
// try to find space
|
||||
spaceCandidate := path
|
||||
for strings.HasPrefix(spaceCandidate, t.options.Root) {
|
||||
spaceID, _, err = t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), spaceCandidate)
|
||||
spaceID, _, err = t.lookup.IDsForPath(context.Background(), spaceCandidate)
|
||||
if err == nil && len(spaceID) > 0 {
|
||||
err = scopeSpace(path)
|
||||
if err != nil {
|
||||
@@ -767,7 +769,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
|
||||
if id != "" {
|
||||
// Check if the item on the previous still exists. In this case it might have been a copy with extended attributes -> set new ID
|
||||
previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), spaceID, id)
|
||||
previousPath, ok := t.lookup.GetCachedID(context.Background(), spaceID, id)
|
||||
if ok && previousPath != path {
|
||||
// this id clashes with an existing id -> re-assimilate
|
||||
_, err := os.Stat(previousPath)
|
||||
@@ -775,7 +777,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
_ = t.assimilate(scanItem{Path: path, ForceRescan: true})
|
||||
}
|
||||
}
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
if err := t.lookup.CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
|
||||
}
|
||||
}
|
||||
@@ -788,7 +790,7 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
})
|
||||
|
||||
for dir, size := range sizes {
|
||||
spaceID, id, err := t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), dir)
|
||||
spaceID, id, err := t.lookup.IDsForPath(context.Background(), dir)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", dir).Msg("could not get ids for path")
|
||||
continue
|
||||
|
||||
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
@@ -81,7 +81,7 @@ type scanItem struct {
|
||||
|
||||
// Tree manages a hierarchical tree
|
||||
type Tree struct {
|
||||
lookup node.PathLookup
|
||||
lookup *lookup.Lookup
|
||||
blobstore Blobstore
|
||||
trashbin *trashbin.Trashbin
|
||||
propagator propagator.Propagator
|
||||
@@ -106,7 +106,7 @@ type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool
|
||||
func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trashbin.Trashbin, permissions permissions.Permissions, o *options.Options, es events.Stream, cache store.Store, log *zerolog.Logger) (*Tree, error) {
|
||||
scanQueue := make(chan scanItem)
|
||||
t := &Tree{
|
||||
lookup: lu,
|
||||
lookup: lu.(*lookup.Lookup),
|
||||
blobstore: bs,
|
||||
userMapper: um,
|
||||
trashbin: trashbin,
|
||||
@@ -220,7 +220,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
|
||||
n.SetType(provider.ResourceType_RESOURCE_TYPE_FILE)
|
||||
|
||||
// Set id in cache
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), n.SpaceID, n.ID, nodePath); err != nil {
|
||||
if err := t.lookup.CacheID(context.Background(), n.SpaceID, n.ID, nodePath); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", n.SpaceID).Str("id", n.ID).Str("path", nodePath).Msg("could not cache id")
|
||||
}
|
||||
|
||||
@@ -325,7 +325,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
|
||||
if newNode.ID == "" {
|
||||
newNode.ID = oldNode.ID
|
||||
}
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil {
|
||||
if err := t.lookup.CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", newNode.SpaceID).Str("id", newNode.ID).Str("path", filepath.Join(newNode.ParentPath(), newNode.Name)).Msg("could not cache id")
|
||||
}
|
||||
|
||||
@@ -437,7 +437,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
|
||||
for name := range work {
|
||||
path := filepath.Join(dir, name)
|
||||
|
||||
_, nodeID, err := t.lookup.(*lookup.Lookup).IDsForPath(ctx, path)
|
||||
_, nodeID, err := t.lookup.IDsForPath(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -536,7 +536,7 @@ func (t *Tree) WriteBlob(n *node.Node, source string) error {
|
||||
var err error
|
||||
|
||||
if t.options.EnableFSRevisions {
|
||||
currentPath = t.lookup.(*lookup.Lookup).CurrentPath(n.SpaceID, n.ID)
|
||||
currentPath = t.lookup.CurrentPath(n.SpaceID, n.ID)
|
||||
|
||||
defer func() {
|
||||
_ = t.lookup.CopyMetadata(context.Background(), n, node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup), func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
@@ -612,7 +612,7 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
ctx, span := tracer.Start(ctx, "createDirNode")
|
||||
defer span.End()
|
||||
|
||||
idcache := t.lookup.(*lookup.Lookup).IDCache
|
||||
idcache := t.lookup.IDCache
|
||||
// create a directory node
|
||||
parentPath, ok := idcache.Get(ctx, n.SpaceID, n.ParentID)
|
||||
if !ok {
|
||||
|
||||
515
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go
generated
vendored
Normal file
515
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/hybrid_backend.go
generated
vendored
Normal file
@@ -0,0 +1,515 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/renameio/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rogpeppe/go-internal/lockedfile"
|
||||
"github.com/shamaton/msgpack/v2"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/filelocks"
|
||||
)
|
||||
|
||||
var _metadataOffloadedAttr = prefixes.OcPrefix + "metadata_offloaded"
|
||||
|
||||
type MetadataPathFunc func(MetadataNode) string
|
||||
|
||||
// HybridBackend stores the file attributes in extended attributes
|
||||
type HybridBackend struct {
|
||||
offloadLimit int
|
||||
metaCache cache.FileMetadataCache
|
||||
metadataPathFunc MetadataPathFunc
|
||||
}
|
||||
|
||||
// NewMessageBackend returns a new HybridBackend instance
|
||||
func NewHybridBackend(offloadLimit int, metadataPathFunc MetadataPathFunc, o cache.Config) HybridBackend {
|
||||
return HybridBackend{
|
||||
offloadLimit: offloadLimit,
|
||||
metaCache: cache.GetFileMetadataCache(o),
|
||||
metadataPathFunc: metadataPathFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the backend
|
||||
func (HybridBackend) Name() string { return "hybrid" }
|
||||
|
||||
// IdentifyPath returns the space id, node id and mtime of a file
|
||||
func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
|
||||
spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr)
|
||||
id, _ := xattr.Get(path, prefixes.IDAttr)
|
||||
|
||||
mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr)
|
||||
mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr))
|
||||
return string(spaceID), string(id), mtime, nil
|
||||
}
|
||||
|
||||
// Get an extended attribute value for the given key
|
||||
// No file locking is involved here as reading a single xattr is
|
||||
// considered to be atomic.
|
||||
func (b HybridBackend) Get(ctx context.Context, n MetadataNode, key string) ([]byte, error) {
|
||||
attribs := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(n), &attribs)
|
||||
if err == nil && len(attribs[key]) > 0 {
|
||||
return attribs[key], err
|
||||
}
|
||||
|
||||
if isOffloadingAttribute(key) {
|
||||
// check if key is offloaded
|
||||
offloaded, err := xattr.Get(n.InternalPath(), _metadataOffloadedAttr)
|
||||
if err == nil && string(offloaded) == "1" {
|
||||
msgpackAttribs := map[string][]byte{}
|
||||
msgBytes, err := os.ReadFile(b.MetadataPath(n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = msgpack.Unmarshal(msgBytes, &msgpackAttribs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val, ok := msgpackAttribs[key]; ok {
|
||||
return val, nil
|
||||
} else {
|
||||
return nil, &xattr.Error{Op: "HybridBackend.Get", Path: n.InternalPath(), Err: xattr.ENOATTR} // attribute not found
|
||||
}
|
||||
}
|
||||
}
|
||||
return xattr.Get(n.InternalPath(), key)
|
||||
}
|
||||
|
||||
// GetInt64 reads a string as int64 from the xattrs
|
||||
func (b HybridBackend) GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error) {
|
||||
attr, err := b.Get(ctx, n, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
v, err := strconv.ParseInt(string(attr), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (b HybridBackend) list(ctx context.Context, n MetadataNode, acquireLock bool) (attribs []string, err error) {
|
||||
filePath := n.InternalPath()
|
||||
attrs, err := xattr.List(filePath)
|
||||
if err == nil {
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
// listing xattrs failed, try again, either with lock or without
|
||||
if acquireLock {
|
||||
f, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanupLockfile(ctx, f)
|
||||
|
||||
}
|
||||
return xattr.List(filePath)
|
||||
}
|
||||
|
||||
// All reads all extended attributes for a node, protected by a
|
||||
// shared file lock
|
||||
func (b HybridBackend) All(ctx context.Context, n MetadataNode) (map[string][]byte, error) {
|
||||
return b.getAll(ctx, n, false, false, true)
|
||||
}
|
||||
|
||||
func (b HybridBackend) getAll(ctx context.Context, n MetadataNode, skipCache, skipOffloaded, acquireLock bool) (map[string][]byte, error) {
|
||||
attribs := map[string][]byte{}
|
||||
|
||||
if !skipCache {
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(n), &attribs)
|
||||
if err == nil {
|
||||
return attribs, err
|
||||
}
|
||||
}
|
||||
|
||||
attrNames, err := b.list(ctx, n, acquireLock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(attrNames) == 0 {
|
||||
return attribs, nil
|
||||
}
|
||||
|
||||
var (
|
||||
xerrs = 0
|
||||
xerr error
|
||||
)
|
||||
// error handling: Count if there are errors while reading all attribs.
|
||||
// if there were any, return an error.
|
||||
attribs = make(map[string][]byte, len(attrNames))
|
||||
path := n.InternalPath()
|
||||
for _, name := range attrNames {
|
||||
var val []byte
|
||||
if val, xerr = xattr.Get(path, name); xerr != nil && !IsAttrUnset(xerr) {
|
||||
xerrs++
|
||||
} else {
|
||||
attribs[name] = val
|
||||
}
|
||||
}
|
||||
|
||||
if xerrs > 0 {
|
||||
return nil, errors.Wrap(xerr, "Failed to read all xattrs")
|
||||
}
|
||||
|
||||
// merge the attributes from the offload file
|
||||
offloaded, err := xattr.Get(path, _metadataOffloadedAttr)
|
||||
if !skipOffloaded && err == nil && string(offloaded) == "1" {
|
||||
msgpackAttribs := map[string][]byte{}
|
||||
msgBytes, err := os.ReadFile(b.MetadataPath(n))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = msgpack.Unmarshal(msgBytes, &msgpackAttribs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for key, val := range msgpackAttribs {
|
||||
attribs[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return attribs, nil
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (b HybridBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) (err error) {
|
||||
return b.SetMultiple(ctx, n, map[string][]byte{key: val}, true)
|
||||
}
|
||||
|
||||
// SetMultiple sets a set of attribute for the given path
|
||||
func (b HybridBackend) SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) (err error) {
|
||||
path := n.InternalPath()
|
||||
if acquireLock {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lockedFile, err := lockedfile.OpenFile(b.LockfilePath(n), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanupLockfile(ctx, lockedFile)
|
||||
}
|
||||
|
||||
offloadAttr, err := xattr.Get(path, _metadataOffloadedAttr)
|
||||
offloaded := err == nil && string(offloadAttr) == "1"
|
||||
|
||||
// offload if the offloading metadata size exceeds the limit
|
||||
hasOffloadingAttrs := false
|
||||
for key := range attribs {
|
||||
if isOffloadingAttribute(key) {
|
||||
hasOffloadingAttrs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasOffloadingAttrs && !offloaded {
|
||||
mdSize := 0
|
||||
for key := range attribs {
|
||||
if isOffloadingAttribute(key) {
|
||||
mdSize += len(attribs[key]) + len(key)
|
||||
}
|
||||
}
|
||||
existingAttribs, err := b.getAll(ctx, n, true, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for key := range existingAttribs {
|
||||
if isOffloadingAttribute(key) {
|
||||
mdSize += len(existingAttribs[key]) + len(key)
|
||||
}
|
||||
}
|
||||
|
||||
if mdSize > b.offloadLimit {
|
||||
err = b.offloadMetadata(ctx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offloaded = true
|
||||
}
|
||||
}
|
||||
|
||||
if offloaded {
|
||||
metaPath := b.MetadataPath(n)
|
||||
var msgBytes []byte
|
||||
msgBytes, err = os.ReadFile(metaPath)
|
||||
|
||||
mpkAttribs := map[string][]byte{}
|
||||
switch {
|
||||
case err != nil:
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
err = msgpack.Unmarshal(msgBytes, &mpkAttribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// prepare offloaded metadata
|
||||
for key, val := range attribs {
|
||||
if isOffloadingAttribute(key) {
|
||||
mpkAttribs[key] = val
|
||||
delete(attribs, key)
|
||||
}
|
||||
}
|
||||
var d []byte
|
||||
d, err = msgpack.Marshal(mpkAttribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// overwrite file atomically
|
||||
err = renameio.WriteFile(metaPath, d, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
xerrs := 0
|
||||
var xerr error
|
||||
// error handling: Count if there are errors while setting the attribs.
|
||||
// if there were any, return an error.
|
||||
for key, val := range attribs {
|
||||
if xerr = xattr.Set(path, key, val); xerr != nil {
|
||||
// log
|
||||
xerrs++
|
||||
}
|
||||
}
|
||||
if xerrs > 0 {
|
||||
return errors.Wrap(xerr, "Failed to set all xattrs")
|
||||
}
|
||||
|
||||
attribs, err = b.getAll(ctx, n, true, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b HybridBackend) offloadMetadata(ctx context.Context, n MetadataNode) error {
|
||||
path := n.InternalPath()
|
||||
msgpackAttribs := map[string][]byte{}
|
||||
xerrs := 0
|
||||
var xerr error
|
||||
|
||||
// collect attributes to move
|
||||
existingAttribs, err := b.getAll(ctx, n, true, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for key, val := range existingAttribs {
|
||||
if isOffloadingAttribute(key) {
|
||||
msgpackAttribs[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
var d []byte
|
||||
d, err = msgpack.Marshal(msgpackAttribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Dir(b.MetadataPath(n)), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = renameio.WriteFile(b.MetadataPath(n), d, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set the metadata offloaded attribute
|
||||
err = xattr.Set(path, _metadataOffloadedAttr, []byte("1"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove offloaded attributes from xattrs
|
||||
for key := range msgpackAttribs {
|
||||
xerr = xattr.Remove(path, key)
|
||||
if xerr != nil {
|
||||
xerrs++
|
||||
}
|
||||
}
|
||||
if xerrs > 0 {
|
||||
return errors.Wrap(xerr, "Failed to remove xattrs while offloading")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an extended attribute key
|
||||
func (b HybridBackend) Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error {
|
||||
path := n.InternalPath()
|
||||
if acquireLock {
|
||||
lockedFile, err := lockedfile.OpenFile(path+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanupLockfile(ctx, lockedFile)
|
||||
}
|
||||
|
||||
if isOffloadingAttribute(key) {
|
||||
offloadAttr, err := xattr.Get(path, _metadataOffloadedAttr)
|
||||
offloaded := err == nil && string(offloadAttr) == "1"
|
||||
if offloaded {
|
||||
// remove from offloaded metadata
|
||||
|
||||
// 1. read offloaded metadata
|
||||
metaPath := b.MetadataPath(n)
|
||||
var msgBytes []byte
|
||||
msgBytes, err = os.ReadFile(metaPath)
|
||||
|
||||
mpkAttribs := map[string][]byte{}
|
||||
switch {
|
||||
case err != nil:
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
err = msgpack.Unmarshal(msgBytes, &mpkAttribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, ok := mpkAttribs[key]; !ok {
|
||||
return &xattr.Error{Op: "HybridBackend.Remove", Path: n.InternalPath(), Err: xattr.ENOATTR} // attribute not found
|
||||
}
|
||||
|
||||
// 2. remove attribute
|
||||
delete(mpkAttribs, key)
|
||||
|
||||
// 3. write back to file
|
||||
var d []byte
|
||||
d, err = msgpack.Marshal(mpkAttribs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = renameio.WriteFile(b.MetadataPath(n), d, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// remove from xattrs
|
||||
err := xattr.Remove(path, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// remove from xattrs
|
||||
err := xattr.Remove(path, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
attribs, err := b.getAll(ctx, n, true, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
}
|
||||
|
||||
// IsMetaFile returns whether the given path represents a meta file
|
||||
func (HybridBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".meta.lock") }
|
||||
|
||||
// Purge purges the data of a given path
|
||||
func (b HybridBackend) Purge(ctx context.Context, n MetadataNode) error {
|
||||
path := n.InternalPath()
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
attribs, err := b.getAll(ctx, n, true, false, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for attr := range attribs {
|
||||
if strings.HasPrefix(attr, prefixes.OcPrefix) {
|
||||
err := xattr.Remove(path, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(n))
|
||||
}
|
||||
|
||||
// Rename moves the data for a given path to a new path
|
||||
func (b HybridBackend) Rename(oldNode, newNode MetadataNode) error {
|
||||
data := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(oldNode), &data)
|
||||
if err == nil {
|
||||
err = b.metaCache.PushToCache(b.cacheKey(newNode), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(oldNode))
|
||||
}
|
||||
|
||||
// MetadataPath returns the path of the file holding the metadata for the given path
|
||||
func (b HybridBackend) MetadataPath(n MetadataNode) string { return b.metadataPathFunc(n) }
|
||||
|
||||
// LockfilePath returns the path of the lock file
|
||||
func (HybridBackend) LockfilePath(n MetadataNode) string { return n.InternalPath() + ".mlock" }
|
||||
|
||||
// Lock locks the metadata for the given path
|
||||
func (b HybridBackend) Lock(n MetadataNode) (UnlockFunc, error) {
|
||||
metaLockPath := b.LockfilePath(n)
|
||||
mlock, err := lockedfile.OpenFile(metaLockPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return func() error {
|
||||
err := mlock.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(metaLockPath)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AllWithLockedSource reads all extended attributes from the given reader.
|
||||
// The path argument is used for storing the data in the cache
|
||||
func (b HybridBackend) AllWithLockedSource(ctx context.Context, n MetadataNode, _ io.Reader) (map[string][]byte, error) {
|
||||
return b.All(ctx, n)
|
||||
}
|
||||
|
||||
func (b HybridBackend) cacheKey(n MetadataNode) string {
|
||||
// rootPath is guaranteed to have no trailing slash
|
||||
// the cache key shouldn't begin with a slash as some stores drop it which can cause
|
||||
// confusion
|
||||
return n.GetSpaceID() + "/" + n.GetID()
|
||||
}
|
||||
|
||||
func isOffloadingAttribute(key string) bool {
|
||||
return strings.HasPrefix(key, prefixes.GrantPrefix) || strings.HasPrefix(key, prefixes.MetadataPrefix)
|
||||
}
|
||||
@@ -126,20 +126,6 @@ func (b MessagePackBackend) GetInt64(ctx context.Context, n MetadataNode, key st
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (b MessagePackBackend) List(ctx context.Context, n MetadataNode) ([]string, error) {
|
||||
attribs, err := b.loadAttributes(ctx, n, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys := []string{}
|
||||
for k := range attribs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (b MessagePackBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) error {
|
||||
return b.SetMultiple(ctx, n, map[string][]byte{key: val}, true)
|
||||
|
||||
@@ -54,7 +54,6 @@ type Backend interface {
|
||||
|
||||
Get(ctx context.Context, n MetadataNode, key string) ([]byte, error)
|
||||
GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error)
|
||||
List(ctx context.Context, n MetadataNode) (attribs []string, err error)
|
||||
Set(ctx context.Context, n MetadataNode, key string, val []byte) error
|
||||
SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) error
|
||||
Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error
|
||||
@@ -94,12 +93,6 @@ func (NullBackend) GetInt64(ctx context.Context, n MetadataNode, key string) (in
|
||||
return 0, errUnconfiguredError
|
||||
}
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (NullBackend) List(ctx context.Context, n MetadataNode) ([]string, error) {
|
||||
return nil, errUnconfiguredError
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (NullBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) error {
|
||||
return errUnconfiguredError
|
||||
|
||||
@@ -87,12 +87,6 @@ func (b XattrsBackend) GetInt64(ctx context.Context, n MetadataNode, key string)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (b XattrsBackend) List(ctx context.Context, n MetadataNode) (attribs []string, err error) {
|
||||
return b.list(ctx, n, true)
|
||||
}
|
||||
|
||||
func (b XattrsBackend) list(ctx context.Context, n MetadataNode, acquireLock bool) (attribs []string, err error) {
|
||||
filePath := n.InternalPath()
|
||||
attrs, err := xattr.List(filePath)
|
||||
|
||||
4
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
4
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
@@ -101,8 +101,8 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.R
|
||||
items := make([]*provider.RecycleItem, 0)
|
||||
|
||||
trashRootPath := filepath.Join(tb.getRecycleRoot(spaceID), lookup.Pathify(key, 4, 2))
|
||||
originalPath, _, timeSuffix, err := readTrashLink(trashRootPath)
|
||||
originalNode := node.NewBaseNode(spaceID, key, tb.fs.lu)
|
||||
originalPath, id, timeSuffix, err := readTrashLink(trashRootPath)
|
||||
originalNode := node.NewBaseNode(spaceID, id+node.TrashIDDelimiter+timeSuffix, tb.fs.lu)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link")
|
||||
return nil, err
|
||||
|
||||
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
5
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
@@ -884,6 +884,9 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
|
||||
|
||||
trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2))
|
||||
resolvedTrashRootNodePath, err := filepath.EvalSymlinks(trashItem)
|
||||
trashedNodeId := nodeFullIDRegep.ReplaceAllString(resolvedTrashRootNodePath, "$1")
|
||||
trashedNodeId = strings.ReplaceAll(trashedNodeId, "/", "")
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -936,7 +939,7 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
|
||||
origin = "/"
|
||||
|
||||
// lookup origin path in extended attributes
|
||||
rootNode := node.NewBaseNode(spaceID, nodeID, t.lookup)
|
||||
rootNode := node.NewBaseNode(spaceID, trashedNodeId, t.lookup)
|
||||
if attrBytes, err = backend.Get(ctx, rootNode, prefixes.TrashOriginAttr); err == nil {
|
||||
origin = filepath.Join(string(attrBytes), path)
|
||||
} else {
|
||||
|
||||
50
vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
generated
vendored
Normal file
50
vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// atomicUpdateFloat atomically updates the float64 value pointed to by bits
|
||||
// using the provided updateFunc, with an exponential backoff on contention.
|
||||
func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
|
||||
const (
|
||||
// both numbers are derived from empirical observations
|
||||
// documented in this PR: https://github.com/prometheus/client_golang/pull/1661
|
||||
maxBackoff = 320 * time.Millisecond
|
||||
initialBackoff = 10 * time.Millisecond
|
||||
)
|
||||
backoff := initialBackoff
|
||||
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
oldFloat := math.Float64frombits(loadedBits)
|
||||
newFloat := updateFunc(oldFloat)
|
||||
newBits := math.Float64bits(newFloat)
|
||||
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
} else {
|
||||
// Exponential backoff with sleep and cap to avoid infinite wait
|
||||
time.Sleep(backoff)
|
||||
backoff *= 2
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
10
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&c.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||
|
||||
15
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
15
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@@ -189,12 +189,15 @@ func (d *Desc) String() string {
|
||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||
)
|
||||
}
|
||||
vlStrings := make([]string, 0, len(d.variableLabels.names))
|
||||
for _, vl := range d.variableLabels.names {
|
||||
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||
} else {
|
||||
vlStrings = append(vlStrings, vl)
|
||||
vlStrings := []string{}
|
||||
if d.variableLabels != nil {
|
||||
vlStrings = make([]string, 0, len(d.variableLabels.names))
|
||||
for _, vl := range d.variableLabels.names {
|
||||
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||
} else {
|
||||
vlStrings = append(vlStrings, vl)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
|
||||
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@@ -120,13 +120,9 @@ func (g *gauge) Dec() {
|
||||
}
|
||||
|
||||
func (g *gauge) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&g.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + val
|
||||
})
|
||||
}
|
||||
|
||||
func (g *gauge) Sub(val float64) {
|
||||
|
||||
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
@@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
||||
}
|
||||
|
||||
func attachOriginalName(desc, origName string) string {
|
||||
return fmt.Sprintf("%s Sourced from %s", desc, origName)
|
||||
return fmt.Sprintf("%s Sourced from %s.", desc, origName)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
|
||||
259
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
259
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
@@ -28,6 +29,11 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
nativeHistogramSchemaMaximum = 8
|
||||
nativeHistogramSchemaMinimum = -4
|
||||
)
|
||||
|
||||
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
||||
// schema > 0. The position in the slice is the schema. (0 is never used, just
|
||||
// here for convenience of using the schema directly as the index.)
|
||||
@@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||
// used for the Buckets field of HistogramOpts.
|
||||
//
|
||||
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
|
||||
func ExponentialBucketsRange(min, max float64, count int) []float64 {
|
||||
func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("ExponentialBucketsRange count needs a positive count")
|
||||
}
|
||||
if min <= 0 {
|
||||
if minBucket <= 0 {
|
||||
panic("ExponentialBucketsRange min needs to be greater than 0")
|
||||
}
|
||||
|
||||
@@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 {
|
||||
// max = min*growthFactor^(bucketCount-1)
|
||||
|
||||
// We know max/min and highest bucket. Solve for growthFactor.
|
||||
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
|
||||
growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
|
||||
|
||||
// Now that we know growthFactor, solve for each bucket.
|
||||
buckets := make([]float64, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
|
||||
buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
@@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
// findBucket returns the index of the bucket for the provided value, or
|
||||
// len(h.upperBounds) for the +Inf bucket.
|
||||
func (h *histogram) findBucket(v float64) int {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
n := len(h.upperBounds)
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is less than or equal to the first upper bound, return 0
|
||||
if v <= h.upperBounds[0] {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
|
||||
if v > h.upperBounds[n-1] {
|
||||
return n
|
||||
}
|
||||
|
||||
// For small arrays, use simple linear search
|
||||
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
|
||||
// see more details here: https://github.com/prometheus/client_golang/pull/1662
|
||||
if n < 35 {
|
||||
for i, bound := range h.upperBounds {
|
||||
if v <= bound {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If v is greater than all upper bounds, return len(h.upperBounds)
|
||||
return n
|
||||
}
|
||||
|
||||
// For larger arrays, use stdlib's binary search
|
||||
return sort.SearchFloat64s(h.upperBounds, v)
|
||||
}
|
||||
|
||||
@@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 {
|
||||
floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
|
||||
switch {
|
||||
case floor <= -8:
|
||||
return 8
|
||||
return nativeHistogramSchemaMaximum
|
||||
case floor >= 4:
|
||||
return -4
|
||||
return nativeHistogramSchemaMinimum
|
||||
default:
|
||||
return -int32(floor)
|
||||
}
|
||||
@@ -1621,13 +1647,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
|
||||
// atomicAddFloat adds the provided float atomically to another float
|
||||
// represented by the bit pattern the bits pointer is pointing to.
|
||||
func atomicAddFloat(bits *uint64, v float64) {
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(bits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
// atomicDecUint32 atomically decrements the uint32 p points to. See
|
||||
@@ -1835,3 +1857,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
||||
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
|
||||
}
|
||||
}
|
||||
|
||||
type constNativeHistogram struct {
|
||||
desc *Desc
|
||||
dto.Histogram
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
|
||||
var bucketPopulationSum int64
|
||||
for _, v := range positiveBuckets {
|
||||
bucketPopulationSum += v
|
||||
}
|
||||
for _, v := range negativeBuckets {
|
||||
bucketPopulationSum += v
|
||||
}
|
||||
bucketPopulationSum += int64(zeroBucket)
|
||||
|
||||
// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
|
||||
// Otherwise, the number of observations must be equal to the sum of all bucket counts .
|
||||
|
||||
if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
|
||||
!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
|
||||
return errors.New("the sum of all bucket populations exceeds the count of observations")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
|
||||
// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
|
||||
// cannot be changed, the returned value does not implement the Histogram
|
||||
// interface (but only the Metric interface). Users of this package will not
|
||||
// have much use for it in regular operations. However, when implementing custom
|
||||
// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
|
||||
// to send it to Prometheus in the Collect method.
|
||||
//
|
||||
// zeroBucket counts all (positive and negative)
|
||||
// observations in the zero bucket (with an absolute value less or equal
|
||||
// the current threshold).
|
||||
// positiveBuckets and negativeBuckets are separate maps for negative and positive
|
||||
// observations. The map's value is an int64, counting observations in
|
||||
// that bucket. The map's key is the
|
||||
// index of the bucket according to the used
|
||||
// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
|
||||
// NewConstNativeHistogram returns an error if
|
||||
// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
|
||||
// - the schema passed is not between 8 and -4
|
||||
// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
|
||||
//
|
||||
// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
|
||||
func NewConstNativeHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
positiveBuckets, negativeBuckets map[int]int64,
|
||||
zeroBucket uint64,
|
||||
schema int32,
|
||||
zeroThreshold float64,
|
||||
createdTimestamp time.Time,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
|
||||
return nil, errors.New("invalid native histogram schema")
|
||||
}
|
||||
if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
|
||||
PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
|
||||
ret := &constNativeHistogram{
|
||||
desc: desc,
|
||||
Histogram: dto.Histogram{
|
||||
CreatedTimestamp: timestamppb.New(createdTimestamp),
|
||||
Schema: &schema,
|
||||
ZeroThreshold: &zeroThreshold,
|
||||
SampleCount: &count,
|
||||
SampleSum: &sum,
|
||||
|
||||
NegativeSpan: NegativeSpan,
|
||||
NegativeDelta: NegativeDelta,
|
||||
|
||||
PositiveSpan: PositiveSpan,
|
||||
PositiveDelta: PositiveDelta,
|
||||
|
||||
ZeroCount: proto.Uint64(zeroBucket),
|
||||
},
|
||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||
}
|
||||
if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
|
||||
ret.PositiveSpan = []*dto.BucketSpan{{
|
||||
Offset: proto.Int32(0),
|
||||
Length: proto.Uint32(0),
|
||||
}}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
|
||||
// NewConstNativeHistogram would have returned an error.
|
||||
func MustNewConstNativeHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
positiveBuckets, negativeBuckets map[int]int64,
|
||||
zeroBucket uint64,
|
||||
nativeHistogramSchema int32,
|
||||
nativeHistogramZeroThreshold float64,
|
||||
createdTimestamp time.Time,
|
||||
labelValues ...string,
|
||||
) Metric {
|
||||
nativehistogram, err := NewConstNativeHistogram(desc,
|
||||
count,
|
||||
sum,
|
||||
positiveBuckets,
|
||||
negativeBuckets,
|
||||
zeroBucket,
|
||||
nativeHistogramSchema,
|
||||
nativeHistogramZeroThreshold,
|
||||
createdTimestamp,
|
||||
labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nativehistogram
|
||||
}
|
||||
|
||||
func (h *constNativeHistogram) Desc() *Desc {
|
||||
return h.desc
|
||||
}
|
||||
|
||||
func (h *constNativeHistogram) Write(out *dto.Metric) error {
|
||||
out.Histogram = &h.Histogram
|
||||
out.Label = h.labelPairs
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
|
||||
if len(buckets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var ii []int
|
||||
for k := range buckets {
|
||||
ii = append(ii, k)
|
||||
}
|
||||
sort.Ints(ii)
|
||||
|
||||
var (
|
||||
spans []*dto.BucketSpan
|
||||
deltas []int64
|
||||
prevCount int64
|
||||
nextI int
|
||||
)
|
||||
|
||||
appendDelta := func(count int64) {
|
||||
*spans[len(spans)-1].Length++
|
||||
deltas = append(deltas, count-prevCount)
|
||||
prevCount = count
|
||||
}
|
||||
|
||||
for n, i := range ii {
|
||||
count := buckets[i]
|
||||
// Multiple spans with only small gaps in between are probably
|
||||
// encoded more efficiently as one larger span with a few empty
|
||||
// buckets. Needs some research to find the sweet spot. For now,
|
||||
// we assume that gaps of one or two buckets should not create
|
||||
// a new span.
|
||||
iDelta := int32(i - nextI)
|
||||
if n == 0 || iDelta > 2 {
|
||||
// We have to create a new span, either because we are
|
||||
// at the very beginning, or because we have found a gap
|
||||
// of more than two buckets.
|
||||
spans = append(spans, &dto.BucketSpan{
|
||||
Offset: proto.Int32(iDelta),
|
||||
Length: proto.Uint32(0),
|
||||
})
|
||||
} else {
|
||||
// We have found a small gap (or no gap at all).
|
||||
// Insert empty buckets as needed.
|
||||
for j := int32(0); j < iDelta; j++ {
|
||||
appendDelta(0)
|
||||
}
|
||||
}
|
||||
appendDelta(count)
|
||||
nextI = i + 1
|
||||
}
|
||||
return spans, deltas
|
||||
}
|
||||
|
||||
19
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
@@ -22,17 +22,18 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
func minInt(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
@@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
@@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{
|
||||
c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n),
|
||||
c.Tag, i1, minInt(i2, i1+n),
|
||||
j1, minInt(j2, j1+n),
|
||||
})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
@@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
return calculateRatio(minInt(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
@@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
return strconv.Itoa(beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning-- // empty ranges begin at line just before the range
|
||||
|
||||
3
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
@@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
|
||||
name += "_total"
|
||||
}
|
||||
|
||||
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||
// Our current conversion moves to legacy naming, so use legacy validation.
|
||||
valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
|
||||
switch d.Kind {
|
||||
case metrics.KindUint64:
|
||||
case metrics.KindFloat64:
|
||||
|
||||
24
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
switch {
|
||||
case namespace != "" && subsystem != "":
|
||||
return strings.Join([]string{namespace, subsystem, name}, "_")
|
||||
case namespace != "":
|
||||
return strings.Join([]string{namespace, name}, "_")
|
||||
case subsystem != "":
|
||||
return strings.Join([]string{subsystem, name}, "_")
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
|
||||
|
||||
if namespace != "" {
|
||||
sb.WriteString(namespace)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
return name
|
||||
|
||||
if subsystem != "" {
|
||||
sb.WriteString(subsystem)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
|
||||
sb.WriteString(name)
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
|
||||
31
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
31
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
type processCollector struct {
|
||||
collectFn func(chan<- Metric)
|
||||
describeFn func(chan<- *Desc)
|
||||
pidFn func() (int, error)
|
||||
reportErrors bool
|
||||
cpuTotal *Desc
|
||||
@@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if canCollectProcess() {
|
||||
c.collectFn = c.processCollect
|
||||
c.describeFn = c.describe
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
c.collectFn = c.errorCollectFn
|
||||
c.describeFn = c.errorDescribeFn
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
func (c *processCollector) errorCollectFn(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
|
||||
func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
|
||||
if c.reportErrors {
|
||||
ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
@@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||
c.collectFn(ch)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
c.describeFn(ch)
|
||||
}
|
||||
|
||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||
if !c.reportErrors {
|
||||
return
|
||||
|
||||
84
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c
generated
vendored
Normal file
84
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.c
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin && cgo
|
||||
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/task.h>
|
||||
#include <mach/mach_vm.h>
|
||||
|
||||
// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
|
||||
// mach/shared_region.h instead. But that doesn't define
|
||||
// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
|
||||
// avoid a warning message when running tests.
|
||||
#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
|
||||
#define SHARED_DATA_REGION_SIZE 0x10000000
|
||||
#define SHARED_TEXT_REGION_SIZE 0x10000000
|
||||
|
||||
|
||||
int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
|
||||
{
|
||||
// This is lightly adapted from how ps(1) obtains its memory info.
|
||||
// https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
|
||||
|
||||
kern_return_t error;
|
||||
task_t task = MACH_PORT_NULL;
|
||||
mach_task_basic_info_data_t info;
|
||||
mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT;
|
||||
|
||||
error = task_info(
|
||||
mach_task_self(),
|
||||
MACH_TASK_BASIC_INFO,
|
||||
(task_info_t) &info,
|
||||
&info_count );
|
||||
|
||||
if( error != KERN_SUCCESS )
|
||||
{
|
||||
return error;
|
||||
}
|
||||
|
||||
*rss = info.resident_size;
|
||||
*vsize = info.virtual_size;
|
||||
|
||||
{
|
||||
vm_region_basic_info_data_64_t b_info;
|
||||
mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
|
||||
mach_vm_size_t size;
|
||||
mach_port_t object_name;
|
||||
|
||||
/*
|
||||
* try to determine if this task has the split libraries
|
||||
* mapped in... if so, adjust its virtual size down by
|
||||
* the 2 segments that are used for split libraries
|
||||
*/
|
||||
info_count = VM_REGION_BASIC_INFO_COUNT_64;
|
||||
|
||||
error = mach_vm_region(
|
||||
mach_task_self(),
|
||||
&address,
|
||||
&size,
|
||||
VM_REGION_BASIC_INFO_64,
|
||||
(vm_region_info_t) &b_info,
|
||||
&info_count,
|
||||
&object_name);
|
||||
|
||||
if (error == KERN_SUCCESS) {
|
||||
if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
|
||||
*vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
|
||||
*vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
51
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
generated
vendored
Normal file
51
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin && cgo
|
||||
|
||||
package prometheus
|
||||
|
||||
/*
|
||||
int get_memory_info(unsigned long long *rss, unsigned long long *vs);
|
||||
*/
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
var rss, vsize C.ulonglong
|
||||
|
||||
if err := C.get_memory_info(&rss, &vsize); err != 0 {
|
||||
return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
|
||||
}
|
||||
|
||||
return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for Darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.maxVsize
|
||||
ch <- c.startTime
|
||||
ch <- c.rss
|
||||
ch <- c.vsize
|
||||
|
||||
/* the process could be collected but not implemented yet
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
*/
|
||||
}
|
||||
128
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
Normal file
128
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
|
||||
// isn't available.
|
||||
var notImplementedErr = errors.New("not implemented")
|
||||
|
||||
type memoryInfo struct {
|
||||
vsize uint64 // Virtual memory size in bytes
|
||||
rss uint64 // Resident memory size in bytes
|
||||
}
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func getSoftLimit(which int) (uint64, error) {
|
||||
rlimit := syscall.Rlimit{}
|
||||
|
||||
if err := syscall.Getrlimit(which, &rlimit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return rlimit.Cur, nil
|
||||
}
|
||||
|
||||
func getOpenFileCount() (float64, error) {
|
||||
// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
|
||||
// return a list of open fds, but that requires a way to call C APIs. The
|
||||
// benefits, however, include fewer system calls and not failing when at the
|
||||
// open file soft limit.
|
||||
|
||||
if dir, err := os.Open("/dev/fd"); err != nil {
|
||||
return 0.0, err
|
||||
} else {
|
||||
defer dir.Close()
|
||||
|
||||
// Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is
|
||||
// that info not used, but KQUEUE descriptors fail stat(2), which causes
|
||||
// the whole method to fail.
|
||||
if names, err := dir.Readdirnames(0); err != nil {
|
||||
return 0.0, err
|
||||
} else {
|
||||
// Subtract 1 to ignore the open /dev/fd descriptor above.
|
||||
return float64(len(names) - 1), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
|
||||
if len(procs) == 1 {
|
||||
startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
} else {
|
||||
err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
} else {
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
|
||||
// The proc structure returned by kern.proc.pid above has an Rusage member,
|
||||
// but it is not filled in, so it needs to be fetched by getrusage(2). For
|
||||
// that call, the UTime, STime, and Maxrss members are filled out, but not
|
||||
// Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require
|
||||
// access to the C API to call task_info(TASK_BASIC_INFO).
|
||||
rusage := unix.Rusage{}
|
||||
|
||||
if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
|
||||
cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
|
||||
} else {
|
||||
c.reportError(ch, c.cpuTotal, err)
|
||||
}
|
||||
|
||||
if memInfo, err := getMemory(); err == nil {
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
|
||||
} else if !errors.Is(err, notImplementedErr) {
|
||||
// Don't report an error when support is not compiled in.
|
||||
c.reportError(ch, c.rss, err)
|
||||
c.reportError(ch, c.vsize, err)
|
||||
}
|
||||
|
||||
if fds, err := getOpenFileCount(); err == nil {
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
|
||||
} else {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
|
||||
} else {
|
||||
c.reportError(ch, c.maxFDs, err)
|
||||
}
|
||||
|
||||
if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
|
||||
} else {
|
||||
c.reportError(ch, c.maxVsize, err)
|
||||
}
|
||||
|
||||
// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
|
||||
// be able to get the per-process network send/receive counts.
|
||||
}
|
||||
39
vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin && !cgo
|
||||
|
||||
package prometheus
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
return nil, notImplementedErr
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for Darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.maxVsize
|
||||
ch <- c.startTime
|
||||
|
||||
/* the process could be collected but not implemented yet
|
||||
ch <- c.rss
|
||||
ch <- c.vsize
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
*/
|
||||
}
|
||||
20
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
20
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
@@ -11,8 +11,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows && !js && !wasip1
|
||||
// +build !windows,!js,!wasip1
|
||||
//go:build !windows && !js && !wasip1 && !darwin
|
||||
// +build !windows,!js,!wasip1,!darwin
|
||||
|
||||
package prometheus
|
||||
|
||||
@@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
}
|
||||
|
||||
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build wasip1
|
||||
// +build wasip1
|
||||
|
||||
package prometheus
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*processCollector) processCollect(chan<- Metric) {
|
||||
// noop on this platform
|
||||
return
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@@ -11,8 +11,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build js
|
||||
// +build js
|
||||
//go:build wasip1 || js
|
||||
// +build wasip1 js
|
||||
|
||||
package prometheus
|
||||
|
||||
@@ -21,6 +21,13 @@ func canCollectProcess() bool {
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
// noop on this platform
|
||||
return
|
||||
c.errorCollectFn(ch)
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for wasip1 and js.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
c.errorDescribeFn(ch)
|
||||
}
|
||||
21
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
21
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
h, err := windows.GetCurrentProcess()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
h := windows.CurrentProcess()
|
||||
|
||||
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||
err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
@@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for windows.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
|
||||
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||
}
|
||||
|
||||
23
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
23
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@@ -207,7 +207,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
||||
if encodingHeader != string(Identity) {
|
||||
rsp.Header().Set(contentEncodingHeader, encodingHeader)
|
||||
}
|
||||
enc := expfmt.NewEncoder(w, contentType)
|
||||
|
||||
var enc expfmt.Encoder
|
||||
if opts.EnableOpenMetricsTextCreatedSamples {
|
||||
enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
|
||||
} else {
|
||||
enc = expfmt.NewEncoder(w, contentType)
|
||||
}
|
||||
|
||||
// handleError handles the error according to opts.ErrorHandling
|
||||
// and returns true if we have to abort after the handling.
|
||||
@@ -408,6 +414,21 @@ type HandlerOpts struct {
|
||||
// (which changes the identity of the resulting series on the Prometheus
|
||||
// server).
|
||||
EnableOpenMetrics bool
|
||||
// EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
|
||||
// Created Timestamps for counters, histograms and summaries, which for the current
|
||||
// version of OpenMetrics are defined as extra series with the same name and "_created"
|
||||
// suffix. See also the OpenMetrics specification for more details
|
||||
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
|
||||
//
|
||||
// Created timestamps are used to improve the accuracy of reset detection,
|
||||
// but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
|
||||
// if the scraper does not handle those metrics correctly (converting to created timestamp
|
||||
// instead of leaving those series as-is). New OpenMetrics versions might improve
|
||||
// this situation.
|
||||
//
|
||||
// Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
|
||||
// in version 2.50.0 to handle this situation.
|
||||
EnableOpenMetricsTextCreatedSamples bool
|
||||
// ProcessStartTime allows setting process start timevalue that will be exposed
|
||||
// with "Process-Start-Time-Unix" response header along with the metrics
|
||||
// payload. This allow callers to have efficient transformations to cumulative
|
||||
|
||||
32
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
32
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
|
||||
s := &summary{
|
||||
desc: desc,
|
||||
now: opts.now,
|
||||
|
||||
objectives: opts.Objectives,
|
||||
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
||||
@@ -280,6 +281,8 @@ type summary struct {
|
||||
|
||||
desc *Desc
|
||||
|
||||
now func() time.Time
|
||||
|
||||
objectives map[float64]float64
|
||||
sortedObjectives []float64
|
||||
|
||||
@@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
|
||||
s.bufMtx.Lock()
|
||||
defer s.bufMtx.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
now := s.now()
|
||||
if now.After(s.hotBufExpTime) {
|
||||
s.asyncFlush(now)
|
||||
}
|
||||
@@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
|
||||
s.bufMtx.Lock()
|
||||
s.mtx.Lock()
|
||||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
||||
s.swapBufs(time.Now())
|
||||
s.swapBufs(s.now())
|
||||
s.bufMtx.Unlock()
|
||||
|
||||
s.flushColdBuf()
|
||||
@@ -468,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||
hotCounts := s.counts[n>>63]
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
@@ -516,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Use atomicUpdateFloat to update hotCounts.sumBits atomically.
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + sum.GetSampleSum()
|
||||
})
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
14
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
|
||||
|
||||
mediatype, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
const textType = "text/plain"
|
||||
@@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
return fmtProtoDelim
|
||||
return FmtProtoDelim
|
||||
|
||||
case textType:
|
||||
if v, ok := params["version"]; ok && v != TextVersion {
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
return fmtText
|
||||
return FmtText
|
||||
}
|
||||
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder based on the given input format.
|
||||
|
||||
28
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
28
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format {
|
||||
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
|
||||
switch Format(escapeParam) {
|
||||
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
|
||||
escapingScheme = Format("; escaping=" + escapeParam)
|
||||
default:
|
||||
// If the escaping parameter is unknown, ignore it.
|
||||
}
|
||||
@@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return fmtProtoDelim + escapingScheme
|
||||
return FmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return fmtProtoText + escapingScheme
|
||||
return FmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return fmtProtoCompact + escapingScheme
|
||||
return FmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return fmtText + escapingScheme
|
||||
return FmtText + escapingScheme
|
||||
}
|
||||
}
|
||||
return fmtText + escapingScheme
|
||||
return FmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||
@@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
|
||||
switch Format(escapeParam) {
|
||||
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
|
||||
escapingScheme = Format("; escaping=" + escapeParam)
|
||||
default:
|
||||
// If the escaping parameter is unknown, ignore it.
|
||||
}
|
||||
@@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return fmtProtoDelim + escapingScheme
|
||||
return FmtProtoDelim + escapingScheme
|
||||
case "text":
|
||||
return fmtProtoText + escapingScheme
|
||||
return FmtProtoText + escapingScheme
|
||||
case "compact-text":
|
||||
return fmtProtoCompact + escapingScheme
|
||||
return FmtProtoCompact + escapingScheme
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return fmtText + escapingScheme
|
||||
return FmtText + escapingScheme
|
||||
}
|
||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
|
||||
switch ver {
|
||||
case OpenMetricsVersion_1_0_0:
|
||||
return fmtOpenMetrics_1_0_0 + escapingScheme
|
||||
return FmtOpenMetrics_1_0_0 + escapingScheme
|
||||
default:
|
||||
return fmtOpenMetrics_0_0_1 + escapingScheme
|
||||
return FmtOpenMetrics_0_0_1 + escapingScheme
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmtText + escapingScheme
|
||||
return FmtText + escapingScheme
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||
|
||||
78
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
78
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -15,7 +15,7 @@
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@@ -32,24 +32,31 @@ type Format string
|
||||
// it on the wire, new content-type strings will have to be agreed upon and
|
||||
// added here.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
OpenMetricsType = `application/openmetrics-text`
|
||||
OpenMetricsVersion_0_0_1 = "0.0.1"
|
||||
OpenMetricsVersion_1_0_0 = "1.0.0"
|
||||
|
||||
// The Content-Type values for the different wire protocols. Note that these
|
||||
// values are now unexported. If code was relying on comparisons to these
|
||||
// constants, instead use FormatType().
|
||||
fmtUnknown Format = `<unknown>`
|
||||
fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
fmtProtoDelim Format = protoFmt + ` encoding=delimited`
|
||||
fmtProtoText Format = protoFmt + ` encoding=text`
|
||||
fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
|
||||
fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
// The Content-Type values for the different wire protocols. Do not do direct
|
||||
// comparisons to these constants, instead use the comparison functions.
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
|
||||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
|
||||
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
|
||||
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -79,17 +86,17 @@ const (
|
||||
func NewFormat(t FormatType) Format {
|
||||
switch t {
|
||||
case TypeProtoCompact:
|
||||
return fmtProtoCompact
|
||||
return FmtProtoCompact
|
||||
case TypeProtoDelim:
|
||||
return fmtProtoDelim
|
||||
return FmtProtoDelim
|
||||
case TypeProtoText:
|
||||
return fmtProtoText
|
||||
return FmtProtoText
|
||||
case TypeTextPlain:
|
||||
return fmtText
|
||||
return FmtText
|
||||
case TypeOpenMetrics:
|
||||
return fmtOpenMetrics_1_0_0
|
||||
return FmtOpenMetrics_1_0_0
|
||||
default:
|
||||
return fmtUnknown
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format {
|
||||
// specified version number.
|
||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||
if version == OpenMetricsVersion_0_0_1 {
|
||||
return fmtOpenMetrics_0_0_1, nil
|
||||
return FmtOpenMetrics_0_0_1, nil
|
||||
}
|
||||
if version == OpenMetricsVersion_1_0_0 {
|
||||
return fmtOpenMetrics_1_0_0, nil
|
||||
return FmtOpenMetrics_1_0_0, nil
|
||||
}
|
||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
return FmtUnknown, errors.New("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// WithEscapingScheme returns a copy of Format with the specified escaping
|
||||
// scheme appended to the end. If an escaping scheme already exists it is
|
||||
// removed.
|
||||
func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
|
||||
var terms []string
|
||||
for _, p := range strings.Split(string(f), ";") {
|
||||
toks := strings.Split(p, "=")
|
||||
if len(toks) != 2 {
|
||||
trimmed := strings.TrimSpace(p)
|
||||
if len(trimmed) > 0 {
|
||||
terms = append(terms, trimmed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(toks[0])
|
||||
if key != model.EscapingKey {
|
||||
terms = append(terms, strings.TrimSpace(p))
|
||||
}
|
||||
}
|
||||
terms = append(terms, model.EscapingKey+"="+s.String())
|
||||
return Format(strings.Join(terms, "; "))
|
||||
}
|
||||
|
||||
// FormatType deduces an overall FormatType for the given format.
|
||||
|
||||
10
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
10
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption)
|
||||
|
||||
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
|
||||
// to include _created lines (See
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
|
||||
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
|
||||
// Created timestamps can improve the accuracy of series reset detection, but
|
||||
// come with a bandwidth cost.
|
||||
//
|
||||
@@ -102,7 +102,7 @@ func WithUnit() EncoderOption {
|
||||
//
|
||||
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
|
||||
// the unit has to be present in the metric name as its suffix:
|
||||
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
|
||||
// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
|
||||
// However, in order to accommodate any potential scenario where such a change in the
|
||||
// metric name is not desirable, the users are here given the choice of either explicitly
|
||||
// opt in, in case they wish for the unit to be included in the output AND in the metric name
|
||||
@@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
|
||||
compliantName = name[:len(name)-6]
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
|
||||
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
|
||||
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
|
||||
compliantName = compliantName + "_" + *in.Unit
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
@@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces, quoted.
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
if !model.IsValidLegacyMetricName(name) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
|
||||
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
|
||||
if name != "" {
|
||||
// If the name does not pass the legacy validity check, we must put the
|
||||
// metric name inside the braces.
|
||||
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
if !model.IsValidLegacyMetricName(name) {
|
||||
metricInsideBraces = true
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
|
||||
// writeName writes a string as-is if it complies with the legacy naming
|
||||
// scheme, or escapes it in double quotes if not.
|
||||
func writeName(w enhancedWriter, name string) (int, error) {
|
||||
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
|
||||
if model.IsValidLegacyMetricName(name) {
|
||||
return w.WriteString(name)
|
||||
}
|
||||
var written int
|
||||
|
||||
164
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
164
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -22,9 +22,9 @@ import (
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@@ -60,6 +60,7 @@ type TextParser struct {
|
||||
currentMF *dto.MetricFamily
|
||||
currentMetric *dto.Metric
|
||||
currentLabelPair *dto.LabelPair
|
||||
currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
|
||||
|
||||
// The remaining member variables are only used for summaries/histograms.
|
||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
||||
@@ -74,6 +75,9 @@ type TextParser struct {
|
||||
// count and sum of that summary/histogram.
|
||||
currentIsSummaryCount, currentIsSummarySum bool
|
||||
currentIsHistogramCount, currentIsHistogramSum bool
|
||||
// These indicate if the metric name from the current line being parsed is inside
|
||||
// braces and if that metric name was found respectively.
|
||||
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
|
||||
}
|
||||
|
||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||
@@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) {
|
||||
}
|
||||
p.currentQuantile = math.NaN()
|
||||
p.currentBucket = math.NaN()
|
||||
p.currentMF = nil
|
||||
}
|
||||
|
||||
// startOfLine represents the state where the next byte read from p.buf is the
|
||||
// start of a line (or whitespace leading up to it).
|
||||
func (p *TextParser) startOfLine() stateFn {
|
||||
p.lineCount++
|
||||
p.currentMetricIsInsideBraces = false
|
||||
p.currentMetricInsideBracesIsPresent = false
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
// This is the only place that we expect to see io.EOF,
|
||||
// which is not an error but the signal that we are done.
|
||||
@@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn {
|
||||
return p.startComment
|
||||
case '\n':
|
||||
return p.startOfLine // Empty line, start the next one.
|
||||
case '{':
|
||||
p.currentMetricIsInsideBraces = true
|
||||
return p.readingLabels
|
||||
}
|
||||
return p.readingMetricName
|
||||
}
|
||||
@@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '}' {
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
@@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
if p.currentMetricIsInsideBraces {
|
||||
if p.currentMetricInsideBracesIsPresent {
|
||||
p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
switch p.currentByte {
|
||||
case ',':
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
p.currentMetricInsideBracesIsPresent = true
|
||||
return p.startLabelName
|
||||
case '}':
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||
@@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn {
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
return nil
|
||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||
}
|
||||
// Check for duplicate label names.
|
||||
labels := make(map[string]struct{})
|
||||
for _, l := range p.currentMetric.Label {
|
||||
for _, l := range p.currentLabelPairs {
|
||||
lName := l.GetName()
|
||||
if _, exists := labels[lName]; !exists {
|
||||
labels[lName] = struct{}{}
|
||||
} else {
|
||||
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
@@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn {
|
||||
return p.startLabelName
|
||||
|
||||
case '}':
|
||||
if p.currentMF == nil {
|
||||
p.parseError("invalid metric name")
|
||||
return nil
|
||||
}
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
|
||||
p.currentLabelPairs = nil
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||
p.currentLabelPairs = nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
@@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsMetricName() {
|
||||
p.currentToken.Reset()
|
||||
// A UTF-8 metric name must be quoted and may have escaped characters.
|
||||
quoted := false
|
||||
escaped := false
|
||||
if !isValidMetricNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
for p.err == nil {
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
quoted = !quoted
|
||||
if !quoted {
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
return
|
||||
}
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
||||
if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() {
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsLabelName() {
|
||||
p.currentToken.Reset()
|
||||
// A UTF-8 label name must be quoted and may have escaped characters.
|
||||
quoted := false
|
||||
escaped := false
|
||||
if !isValidLabelNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
for p.err == nil {
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
case '"':
|
||||
p.currentToken.WriteByte('"')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
quoted = !quoted
|
||||
if !quoted {
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
return
|
||||
}
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
||||
if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() {
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
p.currentLabelPairs = nil
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
@@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
|
||||
}
|
||||
|
||||
func isValidLabelNameStart(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
|
||||
}
|
||||
|
||||
func isValidLabelNameContinuation(b byte) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
||||
func isValidLabelNameContinuation(b byte, quoted bool) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
|
||||
}
|
||||
|
||||
func isValidMetricNameStart(b byte) bool {
|
||||
return isValidLabelNameStart(b) || b == ':'
|
||||
}
|
||||
|
||||
func isValidMetricNameContinuation(b byte) bool {
|
||||
return isValidLabelNameContinuation(b) || b == ':'
|
||||
func isValidMetricNameContinuation(b byte, quoted bool) bool {
|
||||
return isValidLabelNameContinuation(b, quoted) || b == ':'
|
||||
}
|
||||
|
||||
func isBlankOrTab(b byte) bool {
|
||||
@@ -775,7 +895,7 @@ func histogramMetricName(name string) string {
|
||||
|
||||
func parseFloat(s string) (float64, error) {
|
||||
if strings.ContainsAny(s, "pP_") {
|
||||
return 0, fmt.Errorf("unsupported character in float")
|
||||
return 0, errors.New("unsupported character in float")
|
||||
}
|
||||
return strconv.ParseFloat(s, 64)
|
||||
}
|
||||
|
||||
7
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
7
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@@ -14,6 +14,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
@@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus {
|
||||
// Validate checks whether the alert data is inconsistent.
|
||||
func (a *Alert) Validate() error {
|
||||
if a.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
return errors.New("start time missing")
|
||||
}
|
||||
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
return errors.New("start time must be before end time")
|
||||
}
|
||||
if err := a.Labels.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid label set: %w", err)
|
||||
}
|
||||
if len(a.Labels) == 0 {
|
||||
return fmt.Errorf("at least one label pair required")
|
||||
return errors.New("at least one label pair required")
|
||||
}
|
||||
if err := a.Annotations.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid annotations: %w", err)
|
||||
|
||||
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
27
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
|
||||
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
|
||||
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
|
||||
// check but a much faster hardcoded implementation.
|
||||
// IsValid returns true iff the name matches the pattern of LabelNameRE when
|
||||
// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
|
||||
// NameValidationScheme is set to UTF8Validation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ln.IsValidLegacy()
|
||||
case UTF8Validation:
|
||||
return utf8.ValidString(string(ln))
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
|
||||
}
|
||||
}
|
||||
|
||||
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
|
||||
// legacy names. It does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
func (ln LabelName) IsValidLegacy() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
2
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
@@ -11,8 +11,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
|
||||
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// String was optimized using functions not available for go 1.20
|
||||
// or lower. We keep the old implementation for compatibility with client_golang.
|
||||
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
||||
// file can be removed.
|
||||
func (l LabelSet) String() string {
|
||||
labelNames := make([]string, 0, len(l))
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for _, name := range labelNames {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
76
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
76
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -14,9 +14,11 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -26,18 +28,21 @@ import (
|
||||
|
||||
var (
|
||||
// NameValidationScheme determines the method of name validation to be used by
|
||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
|
||||
// in isolation from other components that don't support UTF-8 may result in
|
||||
// bugs or other undefined behavior. This value is intended to be set by
|
||||
// UTF-8-aware binaries as part of their startup. To avoid need for locking,
|
||||
// this value should be set once, ideally in an init(), before multiple
|
||||
// goroutines are started.
|
||||
NameValidationScheme = LegacyValidation
|
||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
|
||||
// mode in isolation from other components that don't support UTF-8 may result
|
||||
// in bugs or other undefined behavior. This value can be set to
|
||||
// LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
|
||||
// avoid need for locking, this value should be set once, ideally in an
|
||||
// init(), before multiple goroutines are started.
|
||||
NameValidationScheme = UTF8Validation
|
||||
|
||||
// NameEscapingScheme defines the default way that names will be
|
||||
// escaped when presented to systems that do not support UTF-8 names. If the
|
||||
// Content-Type "escaping" term is specified, that will override this value.
|
||||
NameEscapingScheme = ValueEncodingEscaping
|
||||
// NameEscapingScheme defines the default way that names will be escaped when
|
||||
// presented to systems that do not support UTF-8 names. If the Content-Type
|
||||
// "escaping" term is specified, that will override this value.
|
||||
// NameEscapingScheme should not be set to the NoEscaping value. That string
|
||||
// is used in content negotiation to indicate that a system supports UTF-8 and
|
||||
// has that feature enabled.
|
||||
NameEscapingScheme = UnderscoreEscaping
|
||||
)
|
||||
|
||||
// ValidationScheme is a Go enum for determining how metric and label names will
|
||||
@@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint {
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
switch NameValidationScheme {
|
||||
case LegacyValidation:
|
||||
return IsValidLegacyMetricName(n)
|
||||
return IsValidLegacyMetricName(string(n))
|
||||
case UTF8Validation:
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
@@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool {
|
||||
// legacy validation scheme regardless of the value of NameValidationScheme.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidLegacyMetricName(n LabelValue) bool {
|
||||
func IsValidLegacyMetricName(n string) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
@@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
}
|
||||
|
||||
// If the name is nil, copy as-is, don't try to escape.
|
||||
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
|
||||
if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
|
||||
out.Name = v.Name
|
||||
} else {
|
||||
out.Name = proto.String(EscapeName(v.GetName(), scheme))
|
||||
@@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel {
|
||||
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
@@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
})
|
||||
continue
|
||||
}
|
||||
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
|
||||
escaped.Label = append(escaped.Label, l)
|
||||
continue
|
||||
}
|
||||
@@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
|
||||
func metricNeedsEscaping(m *dto.Metric) bool {
|
||||
for _, l := range m.Label {
|
||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
|
||||
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
|
||||
return true
|
||||
}
|
||||
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
|
||||
if !IsValidLegacyMetricName(l.GetName()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
lowerhex = "0123456789abcdef"
|
||||
)
|
||||
|
||||
// EscapeName escapes the incoming name according to the provided escaping
|
||||
// scheme. Depending on the rules of escaping, this may cause no change in the
|
||||
// string that is returned. (Especially NoEscaping, which by definition is a
|
||||
@@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
||||
case NoEscaping:
|
||||
return name
|
||||
case UnderscoreEscaping:
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
if IsValidLegacyMetricName(name) {
|
||||
return name
|
||||
}
|
||||
for i, b := range name {
|
||||
@@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string {
|
||||
} else if isValidLegacyRune(b, i) {
|
||||
escaped.WriteRune(b)
|
||||
} else {
|
||||
escaped.WriteRune('_')
|
||||
escaped.WriteString("__")
|
||||
}
|
||||
}
|
||||
return escaped.String()
|
||||
case ValueEncodingEscaping:
|
||||
if IsValidLegacyMetricName(LabelValue(name)) {
|
||||
if IsValidLegacyMetricName(name) {
|
||||
return name
|
||||
}
|
||||
escaped.WriteString("U__")
|
||||
for i, b := range name {
|
||||
if isValidLegacyRune(b, i) {
|
||||
if b == '_' {
|
||||
escaped.WriteString("__")
|
||||
} else if isValidLegacyRune(b, i) {
|
||||
escaped.WriteRune(b)
|
||||
} else if !utf8.ValidRune(b) {
|
||||
escaped.WriteString("_FFFD_")
|
||||
} else if b < 0x100 {
|
||||
} else {
|
||||
escaped.WriteRune('_')
|
||||
for s := 4; s >= 0; s -= 4 {
|
||||
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
|
||||
}
|
||||
escaped.WriteRune('_')
|
||||
} else if b < 0x10000 {
|
||||
escaped.WriteRune('_')
|
||||
for s := 12; s >= 0; s -= 4 {
|
||||
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
|
||||
}
|
||||
escaped.WriteString(strconv.FormatInt(int64(b), 16))
|
||||
escaped.WriteRune('_')
|
||||
}
|
||||
}
|
||||
@@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string {
|
||||
// We think we are in a UTF-8 code, process it.
|
||||
var utf8Val uint
|
||||
for j := 0; i < len(escapedName); j++ {
|
||||
// This is too many characters for a utf8 value.
|
||||
if j > 4 {
|
||||
// This is too many characters for a utf8 value based on the MaxRune
|
||||
// value of '\U0010FFFF'.
|
||||
if j >= 6 {
|
||||
return name
|
||||
}
|
||||
// Found a closing underscore, convert to a rune, check validity, and append.
|
||||
@@ -440,7 +436,7 @@ func (e EscapingScheme) String() string {
|
||||
|
||||
func ToEscapingScheme(s string) (EscapingScheme, error) {
|
||||
if s == "" {
|
||||
return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
|
||||
return NoEscaping, errors.New("got empty string instead of escaping scheme")
|
||||
}
|
||||
switch s {
|
||||
case AllowUTF8:
|
||||
@@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
|
||||
case EscapeValues:
|
||||
return ValueEncodingEscaping, nil
|
||||
default:
|
||||
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
|
||||
return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
17
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
17
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
@@ -15,6 +15,7 @@ package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
@@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
|
||||
if len(m.Name) == 0 {
|
||||
return fmt.Errorf("label name in matcher must not be empty")
|
||||
return errors.New("label name in matcher must not be empty")
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
@@ -77,7 +78,7 @@ type Silence struct {
|
||||
// Validate returns true iff all fields of the silence have valid values.
|
||||
func (s *Silence) Validate() error {
|
||||
if len(s.Matchers) == 0 {
|
||||
return fmt.Errorf("at least one matcher required")
|
||||
return errors.New("at least one matcher required")
|
||||
}
|
||||
for _, m := range s.Matchers {
|
||||
if err := m.Validate(); err != nil {
|
||||
@@ -85,22 +86,22 @@ func (s *Silence) Validate() error {
|
||||
}
|
||||
}
|
||||
if s.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
return errors.New("start time missing")
|
||||
}
|
||||
if s.EndsAt.IsZero() {
|
||||
return fmt.Errorf("end time missing")
|
||||
return errors.New("end time missing")
|
||||
}
|
||||
if s.EndsAt.Before(s.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
return errors.New("start time must be before end time")
|
||||
}
|
||||
if s.CreatedBy == "" {
|
||||
return fmt.Errorf("creator information missing")
|
||||
return errors.New("creator information missing")
|
||||
}
|
||||
if s.Comment == "" {
|
||||
return fmt.Errorf("comment missing")
|
||||
return errors.New("comment missing")
|
||||
}
|
||||
if s.CreatedAt.IsZero() {
|
||||
return fmt.Errorf("creation timestamp missing")
|
||||
return errors.New("creation timestamp missing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/prometheus/common/model/value_float.go
generated
vendored
3
vendor/github.com/prometheus/common/model/value_float.go
generated
vendored
@@ -15,6 +15,7 @@ package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
@@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) {
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
||||
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return fmt.Errorf("sample value must be a quoted string")
|
||||
return errors.New("sample value must be a quoted string")
|
||||
}
|
||||
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
||||
if err != nil {
|
||||
|
||||
7
vendor/github.com/prometheus/common/model/value_histogram.go
generated
vendored
7
vendor/github.com/prometheus/common/model/value_histogram.go
generated
vendored
@@ -15,6 +15,7 @@ package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) {
|
||||
|
||||
func (v *FloatString) UnmarshalJSON(b []byte) error {
|
||||
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return fmt.Errorf("float value must be a quoted string")
|
||||
return errors.New("float value must be a quoted string")
|
||||
}
|
||||
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
||||
if err != nil {
|
||||
@@ -141,7 +142,7 @@ type SampleHistogramPair struct {
|
||||
|
||||
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
|
||||
if s.Histogram == nil {
|
||||
return nil, fmt.Errorf("histogram is nil")
|
||||
return nil, errors.New("histogram is nil")
|
||||
}
|
||||
t, err := json.Marshal(s.Timestamp)
|
||||
if err != nil {
|
||||
@@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
|
||||
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
|
||||
}
|
||||
if s.Histogram == nil {
|
||||
return fmt.Errorf("histogram is null")
|
||||
return errors.New("histogram is null")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
24
vendor/modules.txt
vendored
24
vendor/modules.txt
vendored
@@ -410,7 +410,7 @@ github.com/emvi/iso-639-1
|
||||
# github.com/evanphx/json-patch/v5 v5.5.0
|
||||
## explicit; go 1.12
|
||||
github.com/evanphx/json-patch/v5
|
||||
# github.com/fatih/color v1.14.1
|
||||
# github.com/fatih/color v1.18.0
|
||||
## explicit; go 1.17
|
||||
github.com/fatih/color
|
||||
# github.com/felixge/httpsnoop v1.0.4
|
||||
@@ -517,7 +517,7 @@ github.com/go-jose/go-jose/v3
|
||||
github.com/go-jose/go-jose/v3/cipher
|
||||
github.com/go-jose/go-jose/v3/json
|
||||
github.com/go-jose/go-jose/v3/jwt
|
||||
# github.com/go-jose/go-jose/v4 v4.0.2
|
||||
# github.com/go-jose/go-jose/v4 v4.0.5
|
||||
## explicit; go 1.21
|
||||
github.com/go-jose/go-jose/v4
|
||||
github.com/go-jose/go-jose/v4/cipher
|
||||
@@ -910,13 +910,13 @@ github.com/longsleep/rndm
|
||||
# github.com/mattermost/xml-roundtrip-validator v0.1.0
|
||||
## explicit; go 1.14
|
||||
github.com/mattermost/xml-roundtrip-validator
|
||||
# github.com/mattn/go-colorable v0.1.13
|
||||
## explicit; go 1.15
|
||||
# github.com/mattn/go-colorable v0.1.14
|
||||
## explicit; go 1.18
|
||||
github.com/mattn/go-colorable
|
||||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/mattn/go-runewidth v0.0.15
|
||||
# github.com/mattn/go-runewidth v0.0.16
|
||||
## explicit; go 1.9
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/mattn/go-sqlite3 v1.14.24
|
||||
@@ -1000,8 +1000,8 @@ github.com/nats-io/nats-server/v2/server/certstore
|
||||
github.com/nats-io/nats-server/v2/server/pse
|
||||
github.com/nats-io/nats-server/v2/server/stree
|
||||
github.com/nats-io/nats-server/v2/server/sysmem
|
||||
# github.com/nats-io/nats.go v1.38.0
|
||||
## explicit; go 1.20
|
||||
# github.com/nats-io/nats.go v1.39.1
|
||||
## explicit; go 1.22.0
|
||||
github.com/nats-io/nats.go
|
||||
github.com/nats-io/nats.go/encoders/builtin
|
||||
github.com/nats-io/nats.go/internal/parser
|
||||
@@ -1189,7 +1189,7 @@ github.com/open-policy-agent/opa/v1/types
|
||||
github.com/open-policy-agent/opa/v1/util
|
||||
github.com/open-policy-agent/opa/v1/util/decoding
|
||||
github.com/open-policy-agent/opa/v1/version
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220153133-2081521720f4
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250225150735-7d4559bbf520
|
||||
## explicit; go 1.23.1
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
|
||||
@@ -1633,8 +1633,8 @@ github.com/prometheus/alertmanager/matchers/parse
|
||||
github.com/prometheus/alertmanager/pkg/labels
|
||||
github.com/prometheus/alertmanager/template
|
||||
github.com/prometheus/alertmanager/types
|
||||
# github.com/prometheus/client_golang v1.20.5
|
||||
## explicit; go 1.20
|
||||
# github.com/prometheus/client_golang v1.21.0
|
||||
## explicit; go 1.21
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
@@ -1644,8 +1644,8 @@ github.com/prometheus/client_golang/prometheus/promhttp
|
||||
# github.com/prometheus/client_model v0.6.1
|
||||
## explicit; go 1.19
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.55.0
|
||||
## explicit; go 1.20
|
||||
# github.com/prometheus/common v0.62.0
|
||||
## explicit; go 1.21
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.15.1
|
||||
|
||||
Reference in New Issue
Block a user