mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-01-08 05:09:46 -06:00
Merge pull request #195 from opencloud-eu/dependabot/go_modules/github.com/open-policy-agent/opa-1.1.0
Bump github.com/open-policy-agent/opa from 0.70.0 to 1.1.0
This commit is contained in:
7
go.mod
7
go.mod
@@ -63,7 +63,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.22.2
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v0.70.0
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
@@ -178,7 +178,7 @@ require (
|
||||
github.com/evanphx/json-patch/v5 v5.5.0 // indirect
|
||||
github.com/fatih/color v1.14.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/gdexlab/go-render v1.0.1 // indirect
|
||||
github.com/go-acme/lego/v4 v4.4.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
|
||||
@@ -216,7 +216,6 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.9.2 // indirect
|
||||
github.com/google/flatbuffers v2.0.8+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
||||
github.com/google/renameio/v2 v2.0.0 // indirect
|
||||
@@ -297,7 +296,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/studio-b12/gowebdav v0.9.0 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
|
||||
|
||||
23
go.sum
23
go.sum
@@ -207,7 +207,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/ceph/go-ceph v0.30.0 h1:p/+rNnn9dUByrDhXfBFilVriRZKJghMJcts8N2wQ+ws=
|
||||
github.com/ceph/go-ceph v0.30.0/go.mod h1:OJFju/Xmtb7ihHo/aXOayw6RhVOUGNke5EwTipwaf6A=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -263,10 +262,12 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/deepmap/oapi-codegen v1.3.11/go.mod h1:suMvK7+rKlx3+tpa8ByptmvoXbAV70wERKTOGH3hLp0=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
|
||||
github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg=
|
||||
github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
|
||||
github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps=
|
||||
github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA=
|
||||
github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=
|
||||
github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=
|
||||
github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I=
|
||||
github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
@@ -320,8 +321,8 @@ github.com/fschade/icap-client v0.0.0-20240802074440-aade4a234387 h1:Y3wZgTr29sL
|
||||
github.com/fschade/icap-client v0.0.0-20240802074440-aade4a234387/go.mod h1:HpntrRsQA6RKNXy2Nbr4kVj+NO3OYWpAQUVxeya+3sU=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U=
|
||||
@@ -506,8 +507,8 @@ github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s
|
||||
github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
|
||||
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8=
|
||||
github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -859,8 +860,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
|
||||
github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206 h1:sTbtA2hU40r6eh24aswG0oP7NiJrVyEiqM1nn72TrHA=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250127153848-a84e6c39c206/go.mod h1:lk0GfBt0cLaOcc1nWJikinTK5ibFtKRxp10ATxtCalU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
@@ -1068,8 +1069,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE=
|
||||
github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU=
|
||||
github.com/thanhpk/randstr v1.0.6 h1:psAOktJFD4vV9NEVb3qkhRSMvYh4ORRaj1+w/hn4B+o=
|
||||
|
||||
7
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
7
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
@@ -1,7 +1,7 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
image_family: freebsd-14-1
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
@@ -9,5 +9,6 @@ freebsd_task:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
|
||||
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
12
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
@@ -1,12 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
@@ -1 +0,0 @@
|
||||
go.sum linguist-generated
|
||||
3
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
3
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@@ -5,3 +5,6 @@
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
|
||||
34
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
34
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -1,8 +1,36 @@
|
||||
# Changelog
|
||||
|
||||
Unreleased
|
||||
----------
|
||||
Nothing yet.
|
||||
1.8.0 2023-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
|
||||
120
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
120
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discus changes on the issue tracker first. You
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
@@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
|
||||
324
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
324
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@@ -1,8 +1,8 @@
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -12,150 +12,33 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type fen struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
dirs map[string]struct{} // Explicitly watched directories
|
||||
watches map[string]struct{} // Explicitly watched non-directories
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(0)
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(0, ev, errs)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
w := &Watcher{
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
dirs: make(map[string]struct{}),
|
||||
watches: make(map[string]struct{}),
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
@@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
|
||||
// sendEvent attempts to send an event to the user, returning true if the event
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
|
||||
func (w *fen) sendEvent(name string, op Op) (sent bool) {
|
||||
select {
|
||||
case w.Events <- Event{Name: name, Op: op}:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- Event{Name: name, Op: op}:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// sendError attempts to send an error to the user, returning true if the error
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendError(err error) (sent bool) {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
func (w *fen) sendError(err error) (sent bool) {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
func (w *fen) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
@@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
func (w *fen) Close() error {
|
||||
// Take the lock used by associateFile to prevent lingering events from
|
||||
// being processed after the close
|
||||
w.mu.Lock()
|
||||
@@ -213,60 +98,21 @@ func (w *Watcher) Close() error {
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if w.port.PathIsWatched(name) {
|
||||
return nil
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
_ = getOptions(opts...)
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
@@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = struct{}{}
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = struct{}{}
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
@@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error {
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
@@ -382,17 +224,19 @@ func (w *Watcher) readEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
@@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() {
|
||||
if watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
return nil
|
||||
}
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
@@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) updateDirectory(path string) error {
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen,
|
||||
// as everything else should still be watched.
|
||||
@@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(path, Create) {
|
||||
return nil
|
||||
@@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
@@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && err != unix.ENOENT {
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
|
||||
// targets.
|
||||
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
|
||||
if follow {
|
||||
// We *DO* follow symlinks for explicitly watched entries.
|
||||
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
return w.port.AssociatePath(path, stat,
|
||||
events,
|
||||
stat.Mode())
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
return w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
}
|
||||
|
||||
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
return w.port.DissociatePath(path)
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string {
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
594
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
594
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@@ -1,8 +1,4 @@
|
||||
//go:build linux && !appengine
|
||||
// +build linux,!appengine
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -10,127 +6,20 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type inotify struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
@@ -139,8 +28,26 @@ type Watcher struct {
|
||||
inotifyFile *os.File
|
||||
watches *watches
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
closeMu sync.Mutex
|
||||
doneMu sync.Mutex
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -150,9 +57,14 @@ type (
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
@@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) {
|
||||
func (w *watches) remove(wd uint32) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
delete(w.path, w.wd[wd].path)
|
||||
watch := w.wd[wd] // Could have had Remove() called. See #616.
|
||||
if watch == nil {
|
||||
return
|
||||
}
|
||||
delete(w.path, watch.path)
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
|
||||
func (w *watches) removePath(path string) (uint32, bool) {
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return 0, false
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
return wd, true
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if filepath.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch {
|
||||
@@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(0)
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(0, ev, errs)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
@@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
w := &inotify{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: newWatches(),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
@@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendEvent(e Event) bool {
|
||||
func (w *inotify) sendEvent(e Event) bool {
|
||||
select {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- e:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
func (w *inotify) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
func (w *inotify) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
@@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.closeMu.Lock()
|
||||
func (w *inotify) Close() error {
|
||||
w.doneMu.Lock()
|
||||
if w.isClosed() {
|
||||
w.closeMu.Unlock()
|
||||
w.doneMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
w.closeMu.Unlock()
|
||||
w.doneMu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
@@ -323,78 +250,104 @@ func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
_ = getOptions(opts...)
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return w.add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
return w.add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) add(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: name,
|
||||
flags: flags,
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
})
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *Watcher) remove(name string) error {
|
||||
wd, ok := w.watches.removePath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
success, errno := unix.InotifyRmWatch(w.fd, wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||
// The only two possible errors are:
|
||||
//
|
||||
// - EBADF, which happens when w.fd is not a valid file descriptor
|
||||
// of any kind.
|
||||
// - EINVAL, which is when fd is not an inotify descriptor or wd
|
||||
// is not a valid watch descriptor. Watch descriptors are
|
||||
// invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they
|
||||
// are watching is deleted.
|
||||
return errno
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string {
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
@@ -506,15 +454,17 @@ func (w *Watcher) readEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
var offset uint32
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
var (
|
||||
// Point "raw" to the event in the buffer
|
||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
mask = uint32(raw.Mask)
|
||||
nameLen = uint32(raw.Len)
|
||||
// Move to the next event in the buffer
|
||||
next = func() { offset += unix.SizeofInotifyEvent + nameLen }
|
||||
)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
@@ -523,21 +473,53 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
/// If the event happened to the watched directory or the watched
|
||||
/// file, the kernel doesn't append the filename to the event, but
|
||||
/// we would like to always fill the the "Name" field with a valid
|
||||
/// filename. We retrieve the path of the watch from the "paths"
|
||||
/// map.
|
||||
watch := w.watches.byWd(uint32(raw.Wd))
|
||||
/// Can be nil if Remove() was called in another goroutine for this
|
||||
/// path inbetween reading the events from the kernel and reading
|
||||
/// the internal state. Not much we can do about it, so just skip.
|
||||
/// See #616.
|
||||
if watch == nil {
|
||||
next()
|
||||
continue
|
||||
}
|
||||
|
||||
name := watch.path
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, raw.Mask, raw.Cookie)
|
||||
}
|
||||
|
||||
if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
|
||||
next()
|
||||
continue
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch.wd)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved;
|
||||
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
|
||||
// the watch.
|
||||
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse {
|
||||
next() // Do nothing
|
||||
continue
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
@@ -546,34 +528,69 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
var name string
|
||||
if watch != nil {
|
||||
name = watch.path
|
||||
}
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := w.newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if mask&unix.IN_IGNORED == 0 {
|
||||
if !w.sendEvent(event) {
|
||||
return
|
||||
/// Skip if we're watching both this path and the parent; the parent
|
||||
/// will already send a delete so no need to do it twice.
|
||||
if mask&unix.IN_DELETE_SELF != 0 {
|
||||
if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
|
||||
next()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
ev := w.newEvent(name, mask, raw.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all
|
||||
// the children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a
|
||||
// better data structure for storing all of this, e.g. store
|
||||
// children in the watch. I have some code for this in my
|
||||
// kqueue refactor we can use in the future. For now I'm
|
||||
// okay with this as it's not publicly available.
|
||||
// Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
w.watches.mu.Lock()
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
w.watches.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send the events that are not ignored on the events channel
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
next()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
@@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.watches.mu.Lock()
|
||||
defer w.watches.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
|
||||
747
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
747
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
File diff suppressed because it is too large
Load Diff
204
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
204
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@@ -1,205 +1,23 @@
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "errors"
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type other struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return nil }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return nil }
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return nil }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error { return nil }
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
return newBackend(ev, errs)
|
||||
}
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
|
||||
305
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
305
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@@ -1,12 +1,8 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
//
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -19,123 +15,15 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
@@ -147,48 +35,40 @@ type Watcher struct {
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(50)
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(50, ev, errs)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &Watcher{
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
@@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.quit:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -226,57 +108,21 @@ func (w *Watcher) Close() error {
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
@@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
@@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
@@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string {
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -361,7 +205,7 @@ const (
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
@@ -417,7 +261,7 @@ type (
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
@@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
@@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Watcher) getIno(path string) (ino *inode, err error) {
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
@@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
//pathname, recurse := recursivePath(pathname)
|
||||
recurse := false
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
@@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
@@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
@@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
@@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
@@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
@@ -700,7 +543,7 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
@@ -733,6 +576,10 @@ func (w *Watcher) readEvents() {
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
@@ -761,21 +608,22 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
w.sendEvent(fullname, watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
@@ -787,8 +635,7 @@ func (w *Watcher) readEvents() {
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New(
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -799,7 +646,7 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
@@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
@@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
368
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
368
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@@ -3,19 +3,146 @@
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// Linux 2.6.32+ via inotify
|
||||
// BSD, macOS via kqueue
|
||||
// Windows via ReadDirectoryChangesW
|
||||
// illumos via FEN
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
@@ -30,6 +157,16 @@ type Event struct {
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
@@ -50,7 +187,7 @@ const (
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watched on it will be
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
@@ -60,15 +197,155 @@ const (
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
// Common errors that can be reported.
|
||||
var (
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event), make(chan error)
|
||||
b, err := newBufferedBackend(sz, ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if o.Has(Create) {
|
||||
@@ -80,6 +357,18 @@ func (o Op) String() string {
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
@@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
o(&with)
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
@@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
|
||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
func SetRlimit() {
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = l.Cur
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||
{"NOTE_NONE", unix.NOTE_NONE},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_REAP", unix.NOTE_REAP},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_READ", unix.NOTE_READ},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, kevent *unix.Kevent_t) {
|
||||
mask := uint32(kevent.Fflags)
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask, cookie uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"IN_ACCESS", unix.IN_ACCESS},
|
||||
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||
{"IN_CLOSE", unix.IN_CLOSE},
|
||||
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||
{"IN_CREATE", unix.IN_CREATE},
|
||||
{"IN_DELETE", unix.IN_DELETE},
|
||||
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||
{"IN_IGNORED", unix.IN_IGNORED},
|
||||
{"IN_ISDIR", unix.IN_ISDIR},
|
||||
{"IN_MODIFY", unix.IN_MODIFY},
|
||||
{"IN_MOVE", unix.IN_MOVE},
|
||||
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||
{"IN_OPEN", unix.IN_OPEN},
|
||||
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
var c string
|
||||
if cookie > 0 {
|
||||
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||
}
|
||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EOF", unix.NOTE_EOF},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask int32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m int32
|
||||
}{
|
||||
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||
{"FILE_DELETE", unix.FILE_DELETE},
|
||||
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||
{"UNMOUNTED", unix.UNMOUNTED},
|
||||
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func Debug(name string, mask uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
|
||||
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
|
||||
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
|
||||
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
|
||||
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
|
||||
}
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
|
||||
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package internal contains some helpers.
|
||||
package internal
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build !windows && !darwin && !freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !windows
|
||||
|
||||
package internal
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
return true
|
||||
}
|
||||
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build windows
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Just a dummy.
|
||||
var (
|
||||
SyscallEACCES = errors.New("dummy")
|
||||
UnixEACCES = errors.New("dummy")
|
||||
)
|
||||
|
||||
func SetRlimit() {}
|
||||
func Maxfiles() uint64 { return 1<<64 - 1 }
|
||||
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
|
||||
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
var sid *windows.SID
|
||||
err := windows.AllocateAndInitializeSid(
|
||||
&windows.SECURITY_NT_AUTHORITY,
|
||||
2,
|
||||
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
&sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer windows.FreeSid(sid)
|
||||
token := windows.Token(0)
|
||||
member, err := token.IsMember(sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return member || token.IsElevated()
|
||||
}
|
||||
259
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
259
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
@@ -1,259 +0,0 @@
|
||||
#!/usr/bin/env zsh
|
||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
||||
setopt err_exit no_unset pipefail extended_glob
|
||||
|
||||
# Simple script to update the godoc comments on all watchers so you don't need
|
||||
# to update the same comment 5 times.
|
||||
|
||||
watcher=$(<<EOF
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
EOF
|
||||
)
|
||||
|
||||
new=$(<<EOF
|
||||
// NewWatcher creates a new Watcher.
|
||||
EOF
|
||||
)
|
||||
|
||||
newbuffered=$(<<EOF
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
EOF
|
||||
)
|
||||
|
||||
add=$(<<EOF
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
EOF
|
||||
)
|
||||
|
||||
addwith=$(<<EOF
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
EOF
|
||||
)
|
||||
|
||||
remove=$(<<EOF
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
close=$(<<EOF
|
||||
// Close removes all watches and closes the Events channel.
|
||||
EOF
|
||||
)
|
||||
|
||||
watchlist=$(<<EOF
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
events=$(<<EOF
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
EOF
|
||||
)
|
||||
|
||||
errors=$(<<EOF
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
EOF
|
||||
)
|
||||
|
||||
set-cmt() {
|
||||
local pat=$1
|
||||
local cmt=$2
|
||||
|
||||
IFS=$'\n' local files=($(grep -n $pat backend_*~*_test.go))
|
||||
for f in $files; do
|
||||
IFS=':' local fields=($=f)
|
||||
local file=$fields[1]
|
||||
local end=$(( $fields[2] - 1 ))
|
||||
|
||||
# Find start of comment.
|
||||
local start=0
|
||||
IFS=$'\n' local lines=($(head -n$end $file))
|
||||
for (( i = 1; i <= $#lines; i++ )); do
|
||||
local line=$lines[-$i]
|
||||
if ! grep -q '^[[:space:]]*//' <<<$line; then
|
||||
start=$(( end - (i - 2) ))
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
head -n $(( start - 1 )) $file >/tmp/x
|
||||
print -r -- $cmt >>/tmp/x
|
||||
tail -n+$(( end + 1 )) $file >>/tmp/x
|
||||
mv /tmp/x $file
|
||||
done
|
||||
}
|
||||
|
||||
set-cmt '^type Watcher struct ' $watcher
|
||||
set-cmt '^func NewWatcher(' $new
|
||||
set-cmt '^func NewBufferedWatcher(' $newbuffered
|
||||
set-cmt '^func (w \*Watcher) Add(' $add
|
||||
set-cmt '^func (w \*Watcher) AddWith(' $addwith
|
||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
||||
set-cmt '^func (w \*Watcher) Close(' $close
|
||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
||||
set-cmt '^[[:space:]]*Events *chan Event$' $events
|
||||
set-cmt '^[[:space:]]*Errors *chan error$' $errors
|
||||
1
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
||||
1
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
||||
964
vendor/github.com/open-policy-agent/opa/ast/annotations.go
generated
vendored
964
vendor/github.com/open-policy-agent/opa/ast/annotations.go
generated
vendored
@@ -5,973 +5,29 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
astJSON "github.com/open-policy-agent/opa/ast/json"
|
||||
"github.com/open-policy-agent/opa/internal/deepcopy"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
const (
|
||||
annotationScopePackage = "package"
|
||||
annotationScopeImport = "import"
|
||||
annotationScopeRule = "rule"
|
||||
annotationScopeDocument = "document"
|
||||
annotationScopeSubpackages = "subpackages"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
type (
|
||||
// Annotations represents metadata attached to other AST nodes such as rules.
|
||||
Annotations struct {
|
||||
Scope string `json:"scope"`
|
||||
Title string `json:"title,omitempty"`
|
||||
Entrypoint bool `json:"entrypoint,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Organizations []string `json:"organizations,omitempty"`
|
||||
RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"`
|
||||
Authors []*AuthorAnnotation `json:"authors,omitempty"`
|
||||
Schemas []*SchemaAnnotation `json:"schemas,omitempty"`
|
||||
Custom map[string]interface{} `json:"custom,omitempty"`
|
||||
Location *Location `json:"location,omitempty"`
|
||||
|
||||
comments []*Comment
|
||||
node Node
|
||||
jsonOptions astJSON.Options
|
||||
}
|
||||
Annotations = v1.Annotations
|
||||
|
||||
// SchemaAnnotation contains a schema declaration for the document identified by the path.
|
||||
SchemaAnnotation struct {
|
||||
Path Ref `json:"path"`
|
||||
Schema Ref `json:"schema,omitempty"`
|
||||
Definition *interface{} `json:"definition,omitempty"`
|
||||
}
|
||||
SchemaAnnotation = v1.SchemaAnnotation
|
||||
|
||||
AuthorAnnotation struct {
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email,omitempty"`
|
||||
}
|
||||
AuthorAnnotation = v1.AuthorAnnotation
|
||||
|
||||
RelatedResourceAnnotation struct {
|
||||
Ref url.URL `json:"ref"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
RelatedResourceAnnotation = v1.RelatedResourceAnnotation
|
||||
|
||||
AnnotationSet struct {
|
||||
byRule map[*Rule][]*Annotations
|
||||
byPackage map[int]*Annotations
|
||||
byPath *annotationTreeNode
|
||||
modules []*Module // Modules this set was constructed from
|
||||
}
|
||||
AnnotationSet = v1.AnnotationSet
|
||||
|
||||
annotationTreeNode struct {
|
||||
Value *Annotations
|
||||
Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!)
|
||||
}
|
||||
AnnotationsRef = v1.AnnotationsRef
|
||||
|
||||
AnnotationsRef struct {
|
||||
Path Ref `json:"path"` // The path of the node the annotations are applied to
|
||||
Annotations *Annotations `json:"annotations,omitempty"`
|
||||
Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to
|
||||
AnnotationsRefSet = v1.AnnotationsRefSet
|
||||
|
||||
jsonOptions astJSON.Options
|
||||
|
||||
node Node // The node the annotations are applied to
|
||||
}
|
||||
|
||||
AnnotationsRefSet []*AnnotationsRef
|
||||
|
||||
FlatAnnotationsRefSet AnnotationsRefSet
|
||||
FlatAnnotationsRefSet = v1.FlatAnnotationsRefSet
|
||||
)
|
||||
|
||||
func (a *Annotations) String() string {
|
||||
bs, _ := a.MarshalJSON()
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
// Loc returns the location of this annotation.
|
||||
func (a *Annotations) Loc() *Location {
|
||||
return a.Location
|
||||
}
|
||||
|
||||
// SetLoc updates the location of this annotation.
|
||||
func (a *Annotations) SetLoc(l *Location) {
|
||||
a.Location = l
|
||||
}
|
||||
|
||||
// EndLoc returns the location of this annotation's last comment line.
|
||||
func (a *Annotations) EndLoc() *Location {
|
||||
count := len(a.comments)
|
||||
if count == 0 {
|
||||
return a.Location
|
||||
}
|
||||
return a.comments[count-1].Location
|
||||
}
|
||||
|
||||
// Compare returns an integer indicating if a is less than, equal to, or greater
|
||||
// than other.
|
||||
func (a *Annotations) Compare(other *Annotations) int {
|
||||
|
||||
if a == nil && other == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if a == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
if other == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := strings.Compare(a.Title, other.Title); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := strings.Compare(a.Description, other.Description); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if a.Entrypoint != other.Entrypoint {
|
||||
if a.Entrypoint {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetTargetPath returns the path of the node these Annotations are applied to (the target)
|
||||
func (a *Annotations) GetTargetPath() Ref {
|
||||
switch n := a.node.(type) {
|
||||
case *Package:
|
||||
return n.Path
|
||||
case *Rule:
|
||||
return n.Ref().GroundPrefix()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Annotations) setJSONOptions(opts astJSON.Options) {
|
||||
a.jsonOptions = opts
|
||||
if a.Location != nil {
|
||||
a.Location.JSONOptions = opts
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Annotations) MarshalJSON() ([]byte, error) {
|
||||
if a == nil {
|
||||
return []byte(`{"scope":""}`), nil
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"scope": a.Scope,
|
||||
}
|
||||
|
||||
if a.Title != "" {
|
||||
data["title"] = a.Title
|
||||
}
|
||||
|
||||
if a.Description != "" {
|
||||
data["description"] = a.Description
|
||||
}
|
||||
|
||||
if a.Entrypoint {
|
||||
data["entrypoint"] = a.Entrypoint
|
||||
}
|
||||
|
||||
if len(a.Organizations) > 0 {
|
||||
data["organizations"] = a.Organizations
|
||||
}
|
||||
|
||||
if len(a.RelatedResources) > 0 {
|
||||
data["related_resources"] = a.RelatedResources
|
||||
}
|
||||
|
||||
if len(a.Authors) > 0 {
|
||||
data["authors"] = a.Authors
|
||||
}
|
||||
|
||||
if len(a.Schemas) > 0 {
|
||||
data["schemas"] = a.Schemas
|
||||
}
|
||||
|
||||
if len(a.Custom) > 0 {
|
||||
data["custom"] = a.Custom
|
||||
}
|
||||
|
||||
if a.jsonOptions.MarshalOptions.IncludeLocation.Annotations {
|
||||
if a.Location != nil {
|
||||
data["location"] = a.Location
|
||||
}
|
||||
}
|
||||
|
||||
return json.Marshal(data)
|
||||
}
|
||||
|
||||
func NewAnnotationsRef(a *Annotations) *AnnotationsRef {
|
||||
var loc *Location
|
||||
if a.node != nil {
|
||||
loc = a.node.Loc()
|
||||
}
|
||||
|
||||
return &AnnotationsRef{
|
||||
Location: loc,
|
||||
Path: a.GetTargetPath(),
|
||||
Annotations: a,
|
||||
node: a.node,
|
||||
jsonOptions: a.jsonOptions,
|
||||
}
|
||||
}
|
||||
|
||||
func (ar *AnnotationsRef) GetPackage() *Package {
|
||||
switch n := ar.node.(type) {
|
||||
case *Package:
|
||||
return n
|
||||
case *Rule:
|
||||
return n.Module.Package
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ar *AnnotationsRef) GetRule() *Rule {
|
||||
switch n := ar.node.(type) {
|
||||
case *Rule:
|
||||
return n
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) {
|
||||
data := map[string]interface{}{
|
||||
"path": ar.Path,
|
||||
}
|
||||
|
||||
if ar.Annotations != nil {
|
||||
data["annotations"] = ar.Annotations
|
||||
}
|
||||
|
||||
if ar.jsonOptions.MarshalOptions.IncludeLocation.AnnotationsRef {
|
||||
if ar.Location != nil {
|
||||
data["location"] = ar.Location
|
||||
}
|
||||
}
|
||||
|
||||
return json.Marshal(data)
|
||||
}
|
||||
|
||||
func scopeCompare(s1, s2 string) int {
|
||||
|
||||
o1 := scopeOrder(s1)
|
||||
o2 := scopeOrder(s2)
|
||||
|
||||
if o2 < o1 {
|
||||
return 1
|
||||
} else if o2 > o1 {
|
||||
return -1
|
||||
}
|
||||
|
||||
if s1 < s2 {
|
||||
return -1
|
||||
} else if s2 < s1 {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func scopeOrder(s string) int {
|
||||
switch s {
|
||||
case annotationScopeRule:
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareAuthors(a, b []*AuthorAnnotation) int {
|
||||
if len(a) > len(b) {
|
||||
return 1
|
||||
} else if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareRelatedResources(a, b []*RelatedResourceAnnotation) int {
|
||||
if len(a) > len(b) {
|
||||
return 1
|
||||
} else if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareSchemas(a, b []*SchemaAnnotation) int {
|
||||
maxLen := len(a)
|
||||
if len(b) < maxLen {
|
||||
maxLen = len(b)
|
||||
}
|
||||
|
||||
for i := 0; i < maxLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
|
||||
if len(a) > len(b) {
|
||||
return 1
|
||||
} else if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareStringLists(a, b []string) int {
|
||||
if len(a) > len(b) {
|
||||
return 1
|
||||
} else if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i := 0; i < len(a); i++ {
|
||||
if cmp := strings.Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of s.
|
||||
func (a *Annotations) Copy(node Node) *Annotations {
|
||||
cpy := *a
|
||||
|
||||
cpy.Organizations = make([]string, len(a.Organizations))
|
||||
copy(cpy.Organizations, a.Organizations)
|
||||
|
||||
cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources))
|
||||
for i := range a.RelatedResources {
|
||||
cpy.RelatedResources[i] = a.RelatedResources[i].Copy()
|
||||
}
|
||||
|
||||
cpy.Authors = make([]*AuthorAnnotation, len(a.Authors))
|
||||
for i := range a.Authors {
|
||||
cpy.Authors[i] = a.Authors[i].Copy()
|
||||
}
|
||||
|
||||
cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas))
|
||||
for i := range a.Schemas {
|
||||
cpy.Schemas[i] = a.Schemas[i].Copy()
|
||||
}
|
||||
|
||||
cpy.Custom = deepcopy.Map(a.Custom)
|
||||
|
||||
cpy.node = node
|
||||
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// toObject constructs an AST Object from the annotation.
|
||||
func (a *Annotations) toObject() (*Object, *Error) {
|
||||
obj := NewObject()
|
||||
|
||||
if a == nil {
|
||||
return &obj, nil
|
||||
}
|
||||
|
||||
if len(a.Scope) > 0 {
|
||||
obj.Insert(StringTerm("scope"), StringTerm(a.Scope))
|
||||
}
|
||||
|
||||
if len(a.Title) > 0 {
|
||||
obj.Insert(StringTerm("title"), StringTerm(a.Title))
|
||||
}
|
||||
|
||||
if a.Entrypoint {
|
||||
obj.Insert(StringTerm("entrypoint"), BooleanTerm(true))
|
||||
}
|
||||
|
||||
if len(a.Description) > 0 {
|
||||
obj.Insert(StringTerm("description"), StringTerm(a.Description))
|
||||
}
|
||||
|
||||
if len(a.Organizations) > 0 {
|
||||
orgs := make([]*Term, 0, len(a.Organizations))
|
||||
for _, org := range a.Organizations {
|
||||
orgs = append(orgs, StringTerm(org))
|
||||
}
|
||||
obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...))
|
||||
}
|
||||
|
||||
if len(a.RelatedResources) > 0 {
|
||||
rrs := make([]*Term, 0, len(a.RelatedResources))
|
||||
for _, rr := range a.RelatedResources {
|
||||
rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String())))
|
||||
if len(rr.Description) > 0 {
|
||||
rrObj.Insert(StringTerm("description"), StringTerm(rr.Description))
|
||||
}
|
||||
rrs = append(rrs, NewTerm(rrObj))
|
||||
}
|
||||
obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...))
|
||||
}
|
||||
|
||||
if len(a.Authors) > 0 {
|
||||
as := make([]*Term, 0, len(a.Authors))
|
||||
for _, author := range a.Authors {
|
||||
aObj := NewObject()
|
||||
if len(author.Name) > 0 {
|
||||
aObj.Insert(StringTerm("name"), StringTerm(author.Name))
|
||||
}
|
||||
if len(author.Email) > 0 {
|
||||
aObj.Insert(StringTerm("email"), StringTerm(author.Email))
|
||||
}
|
||||
as = append(as, NewTerm(aObj))
|
||||
}
|
||||
obj.Insert(StringTerm("authors"), ArrayTerm(as...))
|
||||
}
|
||||
|
||||
if len(a.Schemas) > 0 {
|
||||
ss := make([]*Term, 0, len(a.Schemas))
|
||||
for _, s := range a.Schemas {
|
||||
sObj := NewObject()
|
||||
if len(s.Path) > 0 {
|
||||
sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray()))
|
||||
}
|
||||
if len(s.Schema) > 0 {
|
||||
sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray()))
|
||||
}
|
||||
if s.Definition != nil {
|
||||
def, err := InterfaceToValue(s.Definition)
|
||||
if err != nil {
|
||||
return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error())
|
||||
}
|
||||
sObj.Insert(StringTerm("definition"), NewTerm(def))
|
||||
}
|
||||
ss = append(ss, NewTerm(sObj))
|
||||
}
|
||||
obj.Insert(StringTerm("schemas"), ArrayTerm(ss...))
|
||||
}
|
||||
|
||||
if len(a.Custom) > 0 {
|
||||
c, err := InterfaceToValue(a.Custom)
|
||||
if err != nil {
|
||||
return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error())
|
||||
}
|
||||
obj.Insert(StringTerm("custom"), NewTerm(c))
|
||||
}
|
||||
|
||||
return &obj, nil
|
||||
}
|
||||
|
||||
func attachRuleAnnotations(mod *Module) {
|
||||
// make a copy of the annotations
|
||||
cpy := make([]*Annotations, len(mod.Annotations))
|
||||
for i, a := range mod.Annotations {
|
||||
cpy[i] = a.Copy(a.node)
|
||||
}
|
||||
|
||||
for _, rule := range mod.Rules {
|
||||
var j int
|
||||
var found bool
|
||||
for i, a := range cpy {
|
||||
if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) {
|
||||
if a.Scope == annotationScopeDocument {
|
||||
rule.Annotations = append(rule.Annotations, a)
|
||||
} else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row {
|
||||
j = i
|
||||
found = true
|
||||
rule.Annotations = append(rule.Annotations, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if found && j < len(cpy) {
|
||||
cpy = append(cpy[:j], cpy[j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func attachAnnotationsNodes(mod *Module) Errors {
|
||||
var errs Errors
|
||||
|
||||
// Find first non-annotation statement following each annotation and attach
|
||||
// the annotation to that statement.
|
||||
for _, a := range mod.Annotations {
|
||||
for _, stmt := range mod.stmts {
|
||||
_, ok := stmt.(*Annotations)
|
||||
if !ok {
|
||||
if stmt.Loc().Row > a.Location.Row {
|
||||
a.node = stmt
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if a.Scope == "" {
|
||||
switch a.node.(type) {
|
||||
case *Rule:
|
||||
if a.Entrypoint {
|
||||
a.Scope = annotationScopeDocument
|
||||
} else {
|
||||
a.Scope = annotationScopeRule
|
||||
}
|
||||
case *Package:
|
||||
a.Scope = annotationScopePackage
|
||||
case *Import:
|
||||
a.Scope = annotationScopeImport
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateAnnotationScopeAttachment(a); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := validateAnnotationEntrypointAttachment(a); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateAnnotationScopeAttachment(a *Annotations) *Error {
|
||||
|
||||
switch a.Scope {
|
||||
case annotationScopeRule, annotationScopeDocument:
|
||||
if _, ok := a.node.(*Rule); ok {
|
||||
return nil
|
||||
}
|
||||
return newScopeAttachmentErr(a, "rule")
|
||||
case annotationScopePackage, annotationScopeSubpackages:
|
||||
if _, ok := a.node.(*Package); ok {
|
||||
return nil
|
||||
}
|
||||
return newScopeAttachmentErr(a, "package")
|
||||
}
|
||||
|
||||
return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'",
|
||||
a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages)
|
||||
}
|
||||
|
||||
func validateAnnotationEntrypointAttachment(a *Annotations) *Error {
|
||||
if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) {
|
||||
return NewError(
|
||||
ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of a.
|
||||
func (a *AuthorAnnotation) Copy() *AuthorAnnotation {
|
||||
cpy := *a
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Compare returns an integer indicating if s is less than, equal to, or greater
|
||||
// than other.
|
||||
func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int {
|
||||
if cmp := strings.Compare(a.Name, other.Name); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := strings.Compare(a.Email, other.Email); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (a *AuthorAnnotation) String() string {
|
||||
if len(a.Email) == 0 {
|
||||
return a.Name
|
||||
} else if len(a.Name) == 0 {
|
||||
return fmt.Sprintf("<%s>", a.Email)
|
||||
}
|
||||
return fmt.Sprintf("%s <%s>", a.Name, a.Email)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of rr.
|
||||
func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation {
|
||||
cpy := *rr
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Compare returns an integer indicating if s is less than, equal to, or greater
|
||||
// than other.
|
||||
func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int {
|
||||
if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rr *RelatedResourceAnnotation) String() string {
|
||||
bs, _ := json.Marshal(rr)
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) {
|
||||
d := map[string]interface{}{
|
||||
"ref": rr.Ref.String(),
|
||||
}
|
||||
|
||||
if len(rr.Description) > 0 {
|
||||
d["description"] = rr.Description
|
||||
}
|
||||
|
||||
return json.Marshal(d)
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of s.
|
||||
func (s *SchemaAnnotation) Copy() *SchemaAnnotation {
|
||||
cpy := *s
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// Compare returns an integer indicating if s is less than, equal to, or greater
|
||||
// than other.
|
||||
func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int {
|
||||
|
||||
if cmp := s.Path.Compare(other.Path); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if cmp := s.Schema.Compare(other.Schema); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
|
||||
if s.Definition != nil && other.Definition == nil {
|
||||
return -1
|
||||
} else if s.Definition == nil && other.Definition != nil {
|
||||
return 1
|
||||
} else if s.Definition != nil && other.Definition != nil {
|
||||
return util.Compare(*s.Definition, *other.Definition)
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *SchemaAnnotation) String() string {
|
||||
bs, _ := json.Marshal(s)
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func newAnnotationSet() *AnnotationSet {
|
||||
return &AnnotationSet{
|
||||
byRule: map[*Rule][]*Annotations{},
|
||||
byPackage: map[int]*Annotations{},
|
||||
byPath: newAnnotationTree(),
|
||||
}
|
||||
}
|
||||
|
||||
func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) {
|
||||
as := newAnnotationSet()
|
||||
var errs Errors
|
||||
for _, m := range modules {
|
||||
for _, a := range m.Annotations {
|
||||
if err := as.add(a); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
as.modules = modules
|
||||
return as, nil
|
||||
}
|
||||
|
||||
// NOTE(philipc): During copy propagation, the underlying Nodes can be
|
||||
// stripped away from the annotations, leading to nil deref panics. We
|
||||
// silently ignore these cases for now, as a workaround.
|
||||
func (as *AnnotationSet) add(a *Annotations) *Error {
|
||||
switch a.Scope {
|
||||
case annotationScopeRule:
|
||||
if rule, ok := a.node.(*Rule); ok {
|
||||
as.byRule[rule] = append(as.byRule[rule], a)
|
||||
}
|
||||
case annotationScopePackage:
|
||||
if pkg, ok := a.node.(*Package); ok {
|
||||
hash := pkg.Path.Hash()
|
||||
if exist, ok := as.byPackage[hash]; ok {
|
||||
return errAnnotationRedeclared(a, exist.Location)
|
||||
}
|
||||
as.byPackage[hash] = a
|
||||
}
|
||||
case annotationScopeDocument:
|
||||
if rule, ok := a.node.(*Rule); ok {
|
||||
path := rule.Ref().GroundPrefix()
|
||||
x := as.byPath.get(path)
|
||||
if x != nil {
|
||||
return errAnnotationRedeclared(a, x.Value.Location)
|
||||
}
|
||||
as.byPath.insert(path, a)
|
||||
}
|
||||
case annotationScopeSubpackages:
|
||||
if pkg, ok := a.node.(*Package); ok {
|
||||
x := as.byPath.get(pkg.Path)
|
||||
if x != nil && x.Value != nil {
|
||||
return errAnnotationRedeclared(a, x.Value.Location)
|
||||
}
|
||||
as.byPath.insert(pkg.Path, a)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations {
|
||||
if as == nil {
|
||||
return nil
|
||||
}
|
||||
return as.byRule[r]
|
||||
}
|
||||
|
||||
func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations {
|
||||
if as == nil {
|
||||
return nil
|
||||
}
|
||||
return as.byPath.ancestors(path)
|
||||
}
|
||||
|
||||
func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations {
|
||||
if as == nil {
|
||||
return nil
|
||||
}
|
||||
if node := as.byPath.get(path); node != nil {
|
||||
return node.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations {
|
||||
if as == nil {
|
||||
return nil
|
||||
}
|
||||
return as.byPackage[pkg.Path.Hash()]
|
||||
}
|
||||
|
||||
// Flatten returns a flattened list view of this AnnotationSet.
|
||||
// The returned slice is sorted, first by the annotations' target path, then by their target location
|
||||
func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet {
|
||||
// This preallocation often won't be optimal, but it's superior to starting with a nil slice.
|
||||
refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage))
|
||||
|
||||
refs = as.byPath.flatten(refs)
|
||||
|
||||
for _, a := range as.byPackage {
|
||||
refs = append(refs, NewAnnotationsRef(a))
|
||||
}
|
||||
|
||||
for _, as := range as.byRule {
|
||||
for _, a := range as {
|
||||
refs = append(refs, NewAnnotationsRef(a))
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by path, then annotation location, for stable output
|
||||
sort.SliceStable(refs, func(i, j int) bool {
|
||||
return refs[i].Compare(refs[j]) < 0
|
||||
})
|
||||
|
||||
return refs
|
||||
}
|
||||
|
||||
// Chain returns the chain of annotations leading up to the given rule.
|
||||
// The returned slice is ordered as follows
|
||||
// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry)
|
||||
// 1. The 'document' scope entry, if any
|
||||
// 2. The 'package' scope entry, if any
|
||||
// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do'
|
||||
// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule.
|
||||
func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet {
|
||||
var refs []*AnnotationsRef
|
||||
|
||||
ruleAnnots := as.GetRuleScope(rule)
|
||||
|
||||
if len(ruleAnnots) >= 1 {
|
||||
for _, a := range ruleAnnots {
|
||||
refs = append(refs, NewAnnotationsRef(a))
|
||||
}
|
||||
} else {
|
||||
// Make sure there is always a leading entry representing the passed rule, even if it has no annotations
|
||||
refs = append(refs, &AnnotationsRef{
|
||||
Location: rule.Location,
|
||||
Path: rule.Ref().GroundPrefix(),
|
||||
node: rule,
|
||||
})
|
||||
}
|
||||
|
||||
if len(refs) > 1 {
|
||||
// Sort by annotation location; chain must start with annotations declared closest to rule, then going outward
|
||||
sort.SliceStable(refs, func(i, j int) bool {
|
||||
return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0
|
||||
})
|
||||
}
|
||||
|
||||
docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix())
|
||||
if docAnnots != nil {
|
||||
refs = append(refs, NewAnnotationsRef(docAnnots))
|
||||
}
|
||||
|
||||
pkg := rule.Module.Package
|
||||
pkgAnnots := as.GetPackageScope(pkg)
|
||||
if pkgAnnots != nil {
|
||||
refs = append(refs, NewAnnotationsRef(pkgAnnots))
|
||||
}
|
||||
|
||||
subPkgAnnots := as.GetSubpackagesScope(pkg.Path)
|
||||
// We need to reverse the order, as subPkgAnnots ordering will start at the root,
|
||||
// whereas we want to end at the root.
|
||||
for i := len(subPkgAnnots) - 1; i >= 0; i-- {
|
||||
refs = append(refs, NewAnnotationsRef(subPkgAnnots[i]))
|
||||
}
|
||||
|
||||
return refs
|
||||
}
|
||||
|
||||
func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet {
|
||||
result := make(FlatAnnotationsRefSet, 0, len(ars)+1)
|
||||
|
||||
// insertion sort, first by path, then location
|
||||
for i, current := range ars {
|
||||
if ar.Compare(current) < 0 {
|
||||
result = append(result, ar)
|
||||
result = append(result, ars[i:]...)
|
||||
break
|
||||
}
|
||||
result = append(result, current)
|
||||
}
|
||||
|
||||
if len(result) < len(ars)+1 {
|
||||
result = append(result, ar)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func newAnnotationTree() *annotationTreeNode {
|
||||
return &annotationTreeNode{
|
||||
Value: nil,
|
||||
Children: map[Value]*annotationTreeNode{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *annotationTreeNode) insert(path Ref, value *Annotations) {
|
||||
node := t
|
||||
for _, k := range path {
|
||||
child, ok := node.Children[k.Value]
|
||||
if !ok {
|
||||
child = newAnnotationTree()
|
||||
node.Children[k.Value] = child
|
||||
}
|
||||
node = child
|
||||
}
|
||||
node.Value = value
|
||||
}
|
||||
|
||||
func (t *annotationTreeNode) get(path Ref) *annotationTreeNode {
|
||||
node := t
|
||||
for _, k := range path {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
child, ok := node.Children[k.Value]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
node = child
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'.
|
||||
func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) {
|
||||
node := t
|
||||
for _, k := range path {
|
||||
if node == nil {
|
||||
return result
|
||||
}
|
||||
child, ok := node.Children[k.Value]
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
if child.Value != nil {
|
||||
result = append(result, child.Value)
|
||||
}
|
||||
node = child
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef {
|
||||
if a := t.Value; a != nil {
|
||||
refs = append(refs, NewAnnotationsRef(a))
|
||||
}
|
||||
for _, c := range t.Children {
|
||||
refs = c.flatten(refs)
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int {
|
||||
if c := ar.Path.Compare(other.Path); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
return ar.Annotations.Compare(other.Annotations)
|
||||
return v1.NewAnnotationsRef(a)
|
||||
}
|
||||
|
||||
3153
vendor/github.com/open-policy-agent/opa/ast/builtins.go
generated
vendored
3153
vendor/github.com/open-policy-agent/opa/ast/builtins.go
generated
vendored
File diff suppressed because it is too large
Load Diff
200
vendor/github.com/open-policy-agent/opa/ast/capabilities.go
generated
vendored
200
vendor/github.com/open-policy-agent/opa/ast/capabilities.go
generated
vendored
@@ -5,228 +5,54 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
caps "github.com/open-policy-agent/opa/capabilities"
|
||||
"github.com/open-policy-agent/opa/internal/semver"
|
||||
"github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// VersonIndex contains an index from built-in function name, language feature,
|
||||
// and future rego keyword to version number. During the build, this is used to
|
||||
// create an index of the minimum version required for the built-in/feature/kw.
|
||||
type VersionIndex struct {
|
||||
Builtins map[string]semver.Version `json:"builtins"`
|
||||
Features map[string]semver.Version `json:"features"`
|
||||
Keywords map[string]semver.Version `json:"keywords"`
|
||||
}
|
||||
|
||||
// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go
|
||||
// and run as part of go:generate. We generate the version index as part of the
|
||||
// build process because it's relatively expensive to build (it takes ~500ms on
|
||||
// my machine) and never changes.
|
||||
//
|
||||
//go:embed version_index.json
|
||||
var versionIndexBs []byte
|
||||
|
||||
var minVersionIndex = func() VersionIndex {
|
||||
var vi VersionIndex
|
||||
err := json.Unmarshal(versionIndexBs, &vi)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return vi
|
||||
}()
|
||||
type VersionIndex = v1.VersionIndex
|
||||
|
||||
// In the compiler, we used this to check that we're OK working with ref heads.
|
||||
// If this isn't present, we'll fail. This is to ensure that older versions of
|
||||
// OPA can work with policies that we're compiling -- if they don't know ref
|
||||
// heads, they wouldn't be able to parse them.
|
||||
const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes"
|
||||
const FeatureRefHeads = "rule_head_refs"
|
||||
const FeatureRegoV1Import = "rego_v1_import"
|
||||
const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes
|
||||
const FeatureRefHeads = v1.FeatureRefHeads
|
||||
const FeatureRegoV1 = v1.FeatureRegoV1
|
||||
const FeatureRegoV1Import = v1.FeatureRegoV1Import
|
||||
|
||||
// Capabilities defines a structure containing data that describes the capabilities
|
||||
// or features supported by a particular version of OPA.
|
||||
type Capabilities struct {
|
||||
Builtins []*Builtin `json:"builtins,omitempty"`
|
||||
FutureKeywords []string `json:"future_keywords,omitempty"`
|
||||
WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"`
|
||||
|
||||
// Features is a bit of a mixed bag for checking that an older version of OPA
|
||||
// is able to do what needs to be done.
|
||||
// TODO(sr): find better words ^^
|
||||
Features []string `json:"features,omitempty"`
|
||||
|
||||
// allow_net is an array of hostnames or IP addresses, that an OPA instance is
|
||||
// allowed to connect to.
|
||||
// If omitted, ANY host can be connected to. If empty, NO host can be connected to.
|
||||
// As of now, this only controls fetching remote refs for using JSON Schemas in
|
||||
// the type checker.
|
||||
// TODO(sr): support ports to further restrict connection peers
|
||||
// TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665)
|
||||
AllowNet []string `json:"allow_net,omitempty"`
|
||||
}
|
||||
type Capabilities = v1.Capabilities
|
||||
|
||||
// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating
|
||||
// backwards-compatible changes.
|
||||
type WasmABIVersion struct {
|
||||
Version int `json:"version"`
|
||||
Minor int `json:"minor_version"`
|
||||
}
|
||||
type WasmABIVersion = v1.WasmABIVersion
|
||||
|
||||
// CapabilitiesForThisVersion returns the capabilities of this version of OPA.
|
||||
func CapabilitiesForThisVersion() *Capabilities {
|
||||
f := &Capabilities{}
|
||||
|
||||
for _, vers := range capabilities.ABIVersions() {
|
||||
f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]})
|
||||
}
|
||||
|
||||
f.Builtins = make([]*Builtin, len(Builtins))
|
||||
copy(f.Builtins, Builtins)
|
||||
sort.Slice(f.Builtins, func(i, j int) bool {
|
||||
return f.Builtins[i].Name < f.Builtins[j].Name
|
||||
})
|
||||
|
||||
for kw := range futureKeywords {
|
||||
f.FutureKeywords = append(f.FutureKeywords, kw)
|
||||
}
|
||||
sort.Strings(f.FutureKeywords)
|
||||
|
||||
f.Features = []string{
|
||||
FeatureRefHeadStringPrefixes,
|
||||
FeatureRefHeads,
|
||||
FeatureRegoV1Import,
|
||||
}
|
||||
|
||||
return f
|
||||
return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion))
|
||||
}
|
||||
|
||||
// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r.
|
||||
func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) {
|
||||
d := util.NewJSONDecoder(r)
|
||||
var c Capabilities
|
||||
return &c, d.Decode(&c)
|
||||
return v1.LoadCapabilitiesJSON(r)
|
||||
}
|
||||
|
||||
// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version.
|
||||
func LoadCapabilitiesVersion(version string) (*Capabilities, error) {
|
||||
cvs, err := LoadCapabilitiesVersions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, cv := range cvs {
|
||||
if cv == version {
|
||||
cont, err := caps.FS.ReadFile(cv + ".json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return LoadCapabilitiesJSON(bytes.NewReader(cont))
|
||||
}
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("no capabilities version found %v", version)
|
||||
return v1.LoadCapabilitiesVersion(version)
|
||||
}
|
||||
|
||||
// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file.
|
||||
func LoadCapabilitiesFile(file string) (*Capabilities, error) {
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
return LoadCapabilitiesJSON(fd)
|
||||
return v1.LoadCapabilitiesFile(file)
|
||||
}
|
||||
|
||||
// LoadCapabilitiesVersions loads all capabilities versions
|
||||
func LoadCapabilitiesVersions() ([]string, error) {
|
||||
ents, err := caps.FS.ReadDir(".")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
capabilitiesVersions := make([]string, 0, len(ents))
|
||||
for _, ent := range ents {
|
||||
capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1))
|
||||
}
|
||||
return capabilitiesVersions, nil
|
||||
}
|
||||
|
||||
// MinimumCompatibleVersion returns the minimum compatible OPA version based on
|
||||
// the built-ins, features, and keywords in c.
|
||||
func (c *Capabilities) MinimumCompatibleVersion() (string, bool) {
|
||||
|
||||
var maxVersion semver.Version
|
||||
|
||||
// this is the oldest OPA release that includes capabilities
|
||||
if err := maxVersion.Set("0.17.0"); err != nil {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
for _, bi := range c.Builtins {
|
||||
v, ok := minVersionIndex.Builtins[bi.Name]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if v.Compare(maxVersion) > 0 {
|
||||
maxVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, kw := range c.FutureKeywords {
|
||||
v, ok := minVersionIndex.Keywords[kw]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if v.Compare(maxVersion) > 0 {
|
||||
maxVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, feat := range c.Features {
|
||||
v, ok := minVersionIndex.Features[feat]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if v.Compare(maxVersion) > 0 {
|
||||
maxVersion = v
|
||||
}
|
||||
}
|
||||
|
||||
return maxVersion.String(), true
|
||||
}
|
||||
|
||||
func (c *Capabilities) ContainsFeature(feature string) bool {
|
||||
for _, f := range c.Features {
|
||||
if f == feature {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name
|
||||
// will be overwritten.
|
||||
func (c *Capabilities) addBuiltinSorted(bi *Builtin) {
|
||||
i := sort.Search(len(c.Builtins), func(x int) bool {
|
||||
return c.Builtins[x].Name >= bi.Name
|
||||
})
|
||||
if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name {
|
||||
c.Builtins[i] = bi
|
||||
return
|
||||
}
|
||||
c.Builtins = append(c.Builtins, nil)
|
||||
copy(c.Builtins[i+1:], c.Builtins[i:])
|
||||
c.Builtins[i] = bi
|
||||
return v1.LoadCapabilitiesVersions()
|
||||
}
|
||||
|
||||
1307
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
1307
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
File diff suppressed because it is too large
Load Diff
361
vendor/github.com/open-policy-agent/opa/ast/compare.go
generated
vendored
361
vendor/github.com/open-policy-agent/opa/ast/compare.go
generated
vendored
@@ -5,9 +5,7 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// Compare returns an integer indicating whether two AST values are less than,
|
||||
@@ -37,360 +35,5 @@ import (
|
||||
// is empty.
|
||||
// Other comparisons are consistent but not defined.
|
||||
func Compare(a, b interface{}) int {
|
||||
|
||||
if t, ok := a.(*Term); ok {
|
||||
if t == nil {
|
||||
a = nil
|
||||
} else {
|
||||
a = t.Value
|
||||
}
|
||||
}
|
||||
|
||||
if t, ok := b.(*Term); ok {
|
||||
if t == nil {
|
||||
b = nil
|
||||
} else {
|
||||
b = t.Value
|
||||
}
|
||||
}
|
||||
|
||||
if a == nil {
|
||||
if b == nil {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if b == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
sortA := sortOrder(a)
|
||||
sortB := sortOrder(b)
|
||||
|
||||
if sortA < sortB {
|
||||
return -1
|
||||
} else if sortB < sortA {
|
||||
return 1
|
||||
}
|
||||
|
||||
switch a := a.(type) {
|
||||
case Null:
|
||||
return 0
|
||||
case Boolean:
|
||||
b := b.(Boolean)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if !a {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Number:
|
||||
if ai, err := json.Number(a).Int64(); err == nil {
|
||||
if bi, err := json.Number(b.(Number)).Int64(); err == nil {
|
||||
if ai == bi {
|
||||
return 0
|
||||
}
|
||||
if ai < bi {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// We use big.Rat for comparing big numbers.
|
||||
// It replaces big.Float due to following reason:
|
||||
// big.Float comes with a default precision of 64, and setting a
|
||||
// larger precision results in more memory being allocated
|
||||
// (regardless of the actual number we are parsing with SetString).
|
||||
//
|
||||
// Note: If we're so close to zero that big.Float says we are zero, do
|
||||
// *not* big.Rat).SetString on the original string it'll potentially
|
||||
// take very long.
|
||||
var bigA, bigB *big.Rat
|
||||
fa, ok := new(big.Float).SetString(string(a))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
if fa.IsInt() {
|
||||
if i, _ := fa.Int64(); i == 0 {
|
||||
bigA = new(big.Rat).SetInt64(0)
|
||||
}
|
||||
}
|
||||
if bigA == nil {
|
||||
bigA, ok = new(big.Rat).SetString(string(a))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
}
|
||||
|
||||
fb, ok := new(big.Float).SetString(string(b.(Number)))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
if fb.IsInt() {
|
||||
if i, _ := fb.Int64(); i == 0 {
|
||||
bigB = new(big.Rat).SetInt64(0)
|
||||
}
|
||||
}
|
||||
if bigB == nil {
|
||||
bigB, ok = new(big.Rat).SetString(string(b.(Number)))
|
||||
if !ok {
|
||||
panic("illegal value")
|
||||
}
|
||||
}
|
||||
|
||||
return bigA.Cmp(bigB)
|
||||
case String:
|
||||
b := b.(String)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Var:
|
||||
b := b.(Var)
|
||||
if a.Equal(b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
case Ref:
|
||||
b := b.(Ref)
|
||||
return termSliceCompare(a, b)
|
||||
case *Array:
|
||||
b := b.(*Array)
|
||||
return termSliceCompare(a.elems, b.elems)
|
||||
case *lazyObj:
|
||||
return Compare(a.force(), b)
|
||||
case *object:
|
||||
if x, ok := b.(*lazyObj); ok {
|
||||
b = x.force()
|
||||
}
|
||||
b := b.(*object)
|
||||
return a.Compare(b)
|
||||
case Set:
|
||||
b := b.(Set)
|
||||
return a.Compare(b)
|
||||
case *ArrayComprehension:
|
||||
b := b.(*ArrayComprehension)
|
||||
if cmp := Compare(a.Term, b.Term); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case *ObjectComprehension:
|
||||
b := b.(*ObjectComprehension)
|
||||
if cmp := Compare(a.Key, b.Key); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := Compare(a.Value, b.Value); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case *SetComprehension:
|
||||
b := b.(*SetComprehension)
|
||||
if cmp := Compare(a.Term, b.Term); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return Compare(a.Body, b.Body)
|
||||
case Call:
|
||||
b := b.(Call)
|
||||
return termSliceCompare(a, b)
|
||||
case *Expr:
|
||||
b := b.(*Expr)
|
||||
return a.Compare(b)
|
||||
case *SomeDecl:
|
||||
b := b.(*SomeDecl)
|
||||
return a.Compare(b)
|
||||
case *Every:
|
||||
b := b.(*Every)
|
||||
return a.Compare(b)
|
||||
case *With:
|
||||
b := b.(*With)
|
||||
return a.Compare(b)
|
||||
case Body:
|
||||
b := b.(Body)
|
||||
return a.Compare(b)
|
||||
case *Head:
|
||||
b := b.(*Head)
|
||||
return a.Compare(b)
|
||||
case *Rule:
|
||||
b := b.(*Rule)
|
||||
return a.Compare(b)
|
||||
case Args:
|
||||
b := b.(Args)
|
||||
return termSliceCompare(a, b)
|
||||
case *Import:
|
||||
b := b.(*Import)
|
||||
return a.Compare(b)
|
||||
case *Package:
|
||||
b := b.(*Package)
|
||||
return a.Compare(b)
|
||||
case *Annotations:
|
||||
b := b.(*Annotations)
|
||||
return a.Compare(b)
|
||||
case *Module:
|
||||
b := b.(*Module)
|
||||
return a.Compare(b)
|
||||
}
|
||||
panic(fmt.Sprintf("illegal value: %T", a))
|
||||
}
|
||||
|
||||
type termSlice []*Term
|
||||
|
||||
func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
|
||||
func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
|
||||
func (s termSlice) Len() int { return len(s) }
|
||||
|
||||
func sortOrder(x interface{}) int {
|
||||
switch x.(type) {
|
||||
case Null:
|
||||
return 0
|
||||
case Boolean:
|
||||
return 1
|
||||
case Number:
|
||||
return 2
|
||||
case String:
|
||||
return 3
|
||||
case Var:
|
||||
return 4
|
||||
case Ref:
|
||||
return 5
|
||||
case *Array:
|
||||
return 6
|
||||
case Object:
|
||||
return 7
|
||||
case Set:
|
||||
return 8
|
||||
case *ArrayComprehension:
|
||||
return 9
|
||||
case *ObjectComprehension:
|
||||
return 10
|
||||
case *SetComprehension:
|
||||
return 11
|
||||
case Call:
|
||||
return 12
|
||||
case Args:
|
||||
return 13
|
||||
case *Expr:
|
||||
return 100
|
||||
case *SomeDecl:
|
||||
return 101
|
||||
case *Every:
|
||||
return 102
|
||||
case *With:
|
||||
return 110
|
||||
case *Head:
|
||||
return 120
|
||||
case Body:
|
||||
return 200
|
||||
case *Rule:
|
||||
return 1000
|
||||
case *Import:
|
||||
return 1001
|
||||
case *Package:
|
||||
return 1002
|
||||
case *Annotations:
|
||||
return 1003
|
||||
case *Module:
|
||||
return 10000
|
||||
}
|
||||
panic(fmt.Sprintf("illegal value: %T", x))
|
||||
}
|
||||
|
||||
func importsCompare(a, b []*Import) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func annotationsCompare(a, b []*Annotations) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func rulesCompare(a, b []*Rule) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := a[i].Compare(b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func termSliceCompare(a, b []*Term) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
} else if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func withSliceCompare(a, b []*With) int {
|
||||
minLen := len(a)
|
||||
if len(b) < minLen {
|
||||
minLen = len(b)
|
||||
}
|
||||
for i := 0; i < minLen; i++ {
|
||||
if cmp := Compare(a[i], b[i]); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
} else if len(b) < len(a) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
return v1.Compare(a, b)
|
||||
}
|
||||
|
||||
5811
vendor/github.com/open-policy-agent/opa/ast/compile.go
generated
vendored
5811
vendor/github.com/open-policy-agent/opa/ast/compile.go
generated
vendored
File diff suppressed because it is too large
Load Diff
34
vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
generated
vendored
34
vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
generated
vendored
@@ -4,41 +4,29 @@
|
||||
|
||||
package ast
|
||||
|
||||
import v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
|
||||
// CompileModules takes a set of Rego modules represented as strings and
|
||||
// compiles them for evaluation. The keys of the map are used as filenames.
|
||||
func CompileModules(modules map[string]string) (*Compiler, error) {
|
||||
return CompileModulesWithOpt(modules, CompileOpts{})
|
||||
return CompileModulesWithOpt(modules, CompileOpts{
|
||||
ParserOptions: ParserOptions{
|
||||
RegoVersion: DefaultRegoVersion,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CompileOpts defines a set of options for the compiler.
|
||||
type CompileOpts struct {
|
||||
EnablePrintStatements bool
|
||||
ParserOptions ParserOptions
|
||||
}
|
||||
type CompileOpts = v1.CompileOpts
|
||||
|
||||
// CompileModulesWithOpt takes a set of Rego modules represented as strings and
|
||||
// compiles them for evaluation. The keys of the map are used as filenames.
|
||||
func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) {
|
||||
|
||||
parsed := make(map[string]*Module, len(modules))
|
||||
|
||||
for f, module := range modules {
|
||||
var pm *Module
|
||||
var err error
|
||||
if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed[f] = pm
|
||||
if opts.ParserOptions.RegoVersion == RegoUndefined {
|
||||
opts.ParserOptions.RegoVersion = DefaultRegoVersion
|
||||
}
|
||||
|
||||
compiler := NewCompiler().WithEnablePrintStatements(opts.EnablePrintStatements)
|
||||
compiler.Compile(parsed)
|
||||
|
||||
if compiler.Failed() {
|
||||
return nil, compiler.Errors
|
||||
}
|
||||
|
||||
return compiler, nil
|
||||
return v1.CompileModulesWithOpt(modules, opts)
|
||||
}
|
||||
|
||||
// MustCompileModules compiles a set of Rego modules represented as strings. If
|
||||
|
||||
42
vendor/github.com/open-policy-agent/opa/ast/conflicts.go
generated
vendored
42
vendor/github.com/open-policy-agent/opa/ast/conflicts.go
generated
vendored
@@ -5,49 +5,11 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"strings"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// CheckPathConflicts returns a set of errors indicating paths that
|
||||
// are in conflict with the result of the provided callable.
|
||||
func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors {
|
||||
var errs Errors
|
||||
|
||||
root := c.RuleTree.Child(DefaultRootDocument.Value)
|
||||
if root == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, node := range root.Children {
|
||||
errs = append(errs, checkDocumentConflicts(node, exists, nil)...)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors {
|
||||
|
||||
switch key := node.Key.(type) {
|
||||
case String:
|
||||
path = append(path, string(key))
|
||||
default: // other key types cannot conflict with data
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(node.Values) > 0 {
|
||||
s := strings.Join(path, "/")
|
||||
if ok, err := exists(path); err != nil {
|
||||
return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())}
|
||||
} else if ok {
|
||||
return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)}
|
||||
}
|
||||
}
|
||||
|
||||
var errs Errors
|
||||
|
||||
for _, child := range node.Children {
|
||||
errs = append(errs, checkDocumentConflicts(child, exists, path)...)
|
||||
}
|
||||
|
||||
return errs
|
||||
return v1.CheckPathConflicts(c, exists)
|
||||
}
|
||||
|
||||
36
vendor/github.com/open-policy-agent/opa/ast/doc.go
generated
vendored
36
vendor/github.com/open-policy-agent/opa/ast/doc.go
generated
vendored
@@ -1,36 +1,8 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine.
|
||||
//
|
||||
// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc.
|
||||
//
|
||||
// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows:
|
||||
//
|
||||
// Module
|
||||
// |
|
||||
// +--- Package (Reference)
|
||||
// |
|
||||
// +--- Imports
|
||||
// | |
|
||||
// | +--- Import (Term)
|
||||
// |
|
||||
// +--- Rules
|
||||
// |
|
||||
// +--- Rule
|
||||
// |
|
||||
// +--- Head
|
||||
// | |
|
||||
// | +--- Name (Variable)
|
||||
// | |
|
||||
// | +--- Key (Term)
|
||||
// | |
|
||||
// | +--- Value (Term)
|
||||
// |
|
||||
// +--- Body
|
||||
// |
|
||||
// +--- Expression (Term | Terms | Variable Declaration)
|
||||
//
|
||||
// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports.
|
||||
// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
|
||||
// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
|
||||
// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
|
||||
package ast
|
||||
|
||||
518
vendor/github.com/open-policy-agent/opa/ast/env.go
generated
vendored
518
vendor/github.com/open-policy-agent/opa/ast/env.go
generated
vendored
@@ -5,522 +5,8 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// TypeEnv contains type info for static analysis such as type checking.
|
||||
type TypeEnv struct {
|
||||
tree *typeTreeNode
|
||||
next *TypeEnv
|
||||
newChecker func() *typeChecker
|
||||
}
|
||||
|
||||
// newTypeEnv returns an empty TypeEnv. The constructor is not exported because
|
||||
// type environments should only be created by the type checker.
|
||||
func newTypeEnv(f func() *typeChecker) *TypeEnv {
|
||||
return &TypeEnv{
|
||||
tree: newTypeTree(),
|
||||
newChecker: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the type of x.
|
||||
func (env *TypeEnv) Get(x interface{}) types.Type {
|
||||
|
||||
if term, ok := x.(*Term); ok {
|
||||
x = term.Value
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
|
||||
// Scalars.
|
||||
case Null:
|
||||
return types.NewNull()
|
||||
case Boolean:
|
||||
return types.NewBoolean()
|
||||
case Number:
|
||||
return types.NewNumber()
|
||||
case String:
|
||||
return types.NewString()
|
||||
|
||||
// Composites.
|
||||
case *Array:
|
||||
static := make([]types.Type, x.Len())
|
||||
for i := range static {
|
||||
tpe := env.Get(x.Elem(i).Value)
|
||||
static[i] = tpe
|
||||
}
|
||||
|
||||
var dynamic types.Type
|
||||
if len(static) == 0 {
|
||||
dynamic = types.A
|
||||
}
|
||||
|
||||
return types.NewArray(static, dynamic)
|
||||
|
||||
case *lazyObj:
|
||||
return env.Get(x.force())
|
||||
case *object:
|
||||
static := []*types.StaticProperty{}
|
||||
var dynamic *types.DynamicProperty
|
||||
|
||||
x.Foreach(func(k, v *Term) {
|
||||
if IsConstant(k.Value) {
|
||||
kjson, err := JSON(k.Value)
|
||||
if err == nil {
|
||||
tpe := env.Get(v)
|
||||
static = append(static, types.NewStaticProperty(kjson, tpe))
|
||||
return
|
||||
}
|
||||
}
|
||||
// Can't handle it as a static property, fallback to dynamic
|
||||
typeK := env.Get(k.Value)
|
||||
typeV := env.Get(v.Value)
|
||||
dynamic = types.NewDynamicProperty(typeK, typeV)
|
||||
})
|
||||
|
||||
if len(static) == 0 && dynamic == nil {
|
||||
dynamic = types.NewDynamicProperty(types.A, types.A)
|
||||
}
|
||||
|
||||
return types.NewObject(static, dynamic)
|
||||
|
||||
case Set:
|
||||
var tpe types.Type
|
||||
x.Foreach(func(elem *Term) {
|
||||
other := env.Get(elem.Value)
|
||||
tpe = types.Or(tpe, other)
|
||||
})
|
||||
if tpe == nil {
|
||||
tpe = types.A
|
||||
}
|
||||
return types.NewSet(tpe)
|
||||
|
||||
// Comprehensions.
|
||||
case *ArrayComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewArray(nil, cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
case *ObjectComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
|
||||
}
|
||||
return nil
|
||||
case *SetComprehension:
|
||||
cpy, errs := env.newChecker().CheckBody(env, x.Body)
|
||||
if len(errs) == 0 {
|
||||
return types.NewSet(cpy.Get(x.Term))
|
||||
}
|
||||
return nil
|
||||
|
||||
// Refs.
|
||||
case Ref:
|
||||
return env.getRef(x)
|
||||
|
||||
// Vars.
|
||||
case Var:
|
||||
if node := env.tree.Child(x); node != nil {
|
||||
return node.Value()
|
||||
}
|
||||
if env.next != nil {
|
||||
return env.next.Get(x)
|
||||
}
|
||||
return nil
|
||||
|
||||
// Calls.
|
||||
case Call:
|
||||
return nil
|
||||
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRef(ref Ref) types.Type {
|
||||
|
||||
node := env.tree.Child(ref[0].Value)
|
||||
if node == nil {
|
||||
return env.getRefFallback(ref)
|
||||
}
|
||||
|
||||
return env.getRefRec(node, ref, ref[1:])
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
|
||||
|
||||
if env.next != nil {
|
||||
return env.next.Get(ref)
|
||||
}
|
||||
|
||||
if RootDocumentNames.Contains(ref[0]) {
|
||||
return types.A
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type {
|
||||
if len(tail) == 0 {
|
||||
return env.getRefRecExtent(node)
|
||||
}
|
||||
|
||||
if node.Leaf() {
|
||||
if node.children.Len() > 0 {
|
||||
if child := node.Child(tail[0].Value); child != nil {
|
||||
return env.getRefRec(child, ref, tail[1:])
|
||||
}
|
||||
}
|
||||
return selectRef(node.Value(), tail)
|
||||
}
|
||||
|
||||
if !IsConstant(tail[0].Value) {
|
||||
return selectRef(env.getRefRecExtent(node), tail)
|
||||
}
|
||||
|
||||
child := node.Child(tail[0].Value)
|
||||
if child == nil {
|
||||
return env.getRefFallback(ref)
|
||||
}
|
||||
|
||||
return env.getRefRec(child, ref, tail[1:])
|
||||
}
|
||||
|
||||
func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
|
||||
|
||||
if node.Leaf() {
|
||||
return node.Value()
|
||||
}
|
||||
|
||||
children := []*types.StaticProperty{}
|
||||
|
||||
node.Children().Iter(func(k, v util.T) bool {
|
||||
key := k.(Value)
|
||||
child := v.(*typeTreeNode)
|
||||
|
||||
tpe := env.getRefRecExtent(child)
|
||||
|
||||
// NOTE(sr): Converting to Golang-native types here is an extension of what we did
|
||||
// before -- only supporting strings. But since we cannot differentiate sets and arrays
|
||||
// that way, we could reconsider.
|
||||
switch key.(type) {
|
||||
case String, Number, Boolean: // skip anything else
|
||||
propKey, err := JSON(key)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unreachable, ValueToInterface: %w", err))
|
||||
}
|
||||
children = append(children, types.NewStaticProperty(propKey, tpe))
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
// TODO(tsandall): for now, these objects can have any dynamic properties
|
||||
// because we don't have schema for base docs. Once schemas are supported
|
||||
// we can improve this.
|
||||
return types.NewObject(children, types.NewDynamicProperty(types.S, types.A))
|
||||
}
|
||||
|
||||
func (env *TypeEnv) wrap() *TypeEnv {
|
||||
cpy := *env
|
||||
cpy.next = env
|
||||
cpy.tree = newTypeTree()
|
||||
return &cpy
|
||||
}
|
||||
|
||||
// typeTreeNode is used to store type information in a tree.
|
||||
type typeTreeNode struct {
|
||||
key Value
|
||||
value types.Type
|
||||
children *util.HashMap
|
||||
}
|
||||
|
||||
func newTypeTree() *typeTreeNode {
|
||||
return &typeTreeNode{
|
||||
key: nil,
|
||||
value: nil,
|
||||
children: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Child(key Value) *typeTreeNode {
|
||||
value, ok := n.children.Get(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return value.(*typeTreeNode)
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Children() *util.HashMap {
|
||||
return n.children
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Get(path Ref) types.Type {
|
||||
curr := n
|
||||
for _, term := range path {
|
||||
child, ok := curr.children.Get(term.Value)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
curr = child.(*typeTreeNode)
|
||||
}
|
||||
return curr.Value()
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Leaf() bool {
|
||||
return n.value != nil
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
|
||||
c, ok := n.children.Get(key)
|
||||
|
||||
var child *typeTreeNode
|
||||
if !ok {
|
||||
child = newTypeTree()
|
||||
child.key = key
|
||||
n.children.Put(key, child)
|
||||
} else {
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
child.value = tpe
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
|
||||
curr := n
|
||||
for _, term := range path {
|
||||
c, ok := curr.children.Get(term.Value)
|
||||
|
||||
var child *typeTreeNode
|
||||
if !ok {
|
||||
child = newTypeTree()
|
||||
child.key = term.Value
|
||||
curr.children.Put(child.key, child)
|
||||
} else {
|
||||
child = c.(*typeTreeNode)
|
||||
}
|
||||
|
||||
curr = child
|
||||
}
|
||||
curr.value = tpe
|
||||
}
|
||||
|
||||
// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path.
|
||||
// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object.
|
||||
// path must be ground.
|
||||
func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) {
|
||||
curr := n
|
||||
for i, term := range path {
|
||||
c, ok := curr.children.Get(term.Value)
|
||||
|
||||
var child *typeTreeNode
|
||||
if !ok {
|
||||
child = newTypeTree()
|
||||
child.key = term.Value
|
||||
curr.children.Put(child.key, child)
|
||||
} else {
|
||||
child = c.(*typeTreeNode)
|
||||
|
||||
if child.value != nil && i+1 < len(path) {
|
||||
// If child has an object value, merge the new value into it.
|
||||
if o, ok := child.value.(*types.Object); ok {
|
||||
var err error
|
||||
child.value, err = insertIntoObject(o, path[i+1:], tpe, env)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
curr = child
|
||||
}
|
||||
|
||||
curr.value = mergeTypes(curr.value, tpe)
|
||||
|
||||
if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 {
|
||||
// merge all leafs into the inserted object
|
||||
leafs := curr.Leafs()
|
||||
for p, t := range leafs {
|
||||
var err error
|
||||
curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unreachable, insertIntoObject: %w", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or.
|
||||
// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types
|
||||
// are recursively merged (using mergeTypes).
|
||||
// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined
|
||||
// with an types.Or, instead of being merged.
|
||||
// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no
|
||||
// static properties, they are merged.
|
||||
// If 'a' and 'b' are different types, they are joined with an types.Or.
|
||||
func mergeTypes(a, b types.Type) types.Type {
|
||||
if a == nil {
|
||||
return b
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
return a
|
||||
}
|
||||
|
||||
switch a := a.(type) {
|
||||
case *types.Object:
|
||||
if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 {
|
||||
if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 {
|
||||
return types.Or(a, bObj)
|
||||
}
|
||||
|
||||
aDynProps := a.DynamicProperties()
|
||||
bDynProps := bObj.DynamicProperties()
|
||||
dynProps := types.NewDynamicProperty(
|
||||
types.Or(aDynProps.Key, bDynProps.Key),
|
||||
mergeTypes(aDynProps.Value, bDynProps.Value))
|
||||
return types.NewObject(nil, dynProps)
|
||||
} else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 {
|
||||
// If a is an object type with no static components ...
|
||||
for _, t := range bAny {
|
||||
if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 {
|
||||
// ... and b is a types.Any containing an object with no static components, we merge them.
|
||||
aDynProps := a.DynamicProperties()
|
||||
tDynProps := tObj.DynamicProperties()
|
||||
tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key)
|
||||
tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value)
|
||||
return bAny
|
||||
}
|
||||
}
|
||||
}
|
||||
case *types.Set:
|
||||
if bSet, ok := b.(*types.Set); ok {
|
||||
return types.NewSet(types.Or(a.Of(), bSet.Of()))
|
||||
}
|
||||
case types.Any:
|
||||
if _, ok := b.(types.Any); !ok {
|
||||
return mergeTypes(b, a)
|
||||
}
|
||||
}
|
||||
|
||||
return types.Or(a, b)
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) String() string {
|
||||
b := strings.Builder{}
|
||||
|
||||
if k := n.key; k != nil {
|
||||
b.WriteString(k.String())
|
||||
} else {
|
||||
b.WriteString("-")
|
||||
}
|
||||
|
||||
if v := n.value; v != nil {
|
||||
b.WriteString(": ")
|
||||
b.WriteString(v.String())
|
||||
}
|
||||
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
if child, ok := v.(*typeTreeNode); ok {
|
||||
b.WriteString("\n\t+ ")
|
||||
s := child.String()
|
||||
s = strings.ReplaceAll(s, "\n", "\n\t")
|
||||
b.WriteString(s)
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) {
|
||||
if len(path) == 0 {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
key := env.Get(path[0].Value)
|
||||
|
||||
if len(path) == 1 {
|
||||
var dynamicProps *types.DynamicProperty
|
||||
if dp := o.DynamicProperties(); dp != nil {
|
||||
dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe))
|
||||
} else {
|
||||
dynamicProps = types.NewDynamicProperty(key, tpe)
|
||||
}
|
||||
return types.NewObject(o.StaticProperties(), dynamicProps), nil
|
||||
}
|
||||
|
||||
child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dynamicProps *types.DynamicProperty
|
||||
if dp := o.DynamicProperties(); dp != nil {
|
||||
dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child))
|
||||
} else {
|
||||
dynamicProps = types.NewDynamicProperty(key, child)
|
||||
}
|
||||
return types.NewObject(o.StaticProperties(), dynamicProps), nil
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Leafs() map[*Ref]types.Type {
|
||||
leafs := map[*Ref]types.Type{}
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
collectLeafs(v.(*typeTreeNode), nil, leafs)
|
||||
return false
|
||||
})
|
||||
return leafs
|
||||
}
|
||||
|
||||
func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) {
|
||||
nPath := append(path, NewTerm(n.key))
|
||||
if n.Leaf() {
|
||||
leafs[&nPath] = n.Value()
|
||||
return
|
||||
}
|
||||
n.children.Iter(func(_, v util.T) bool {
|
||||
collectLeafs(v.(*typeTreeNode), nPath, leafs)
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (n *typeTreeNode) Value() types.Type {
|
||||
return n.value
|
||||
}
|
||||
|
||||
// selectConstant returns the attribute of the type referred to by the term. If
|
||||
// the attribute type cannot be determined, nil is returned.
|
||||
func selectConstant(tpe types.Type, term *Term) types.Type {
|
||||
x, err := JSON(term.Value)
|
||||
if err == nil {
|
||||
return types.Select(tpe, x)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// selectRef returns the type of the nested attribute referred to by ref. If
|
||||
// the attribute type cannot be determined, nil is returned. If the ref
|
||||
// contains vars or refs, then the returned type will be a union of the
|
||||
// possible types.
|
||||
func selectRef(tpe types.Type, ref Ref) types.Type {
|
||||
|
||||
if tpe == nil || len(ref) == 0 {
|
||||
return tpe
|
||||
}
|
||||
|
||||
head, tail := ref[0], ref[1:]
|
||||
|
||||
switch head.Value.(type) {
|
||||
case Var, Ref, *Array, Object, Set:
|
||||
return selectRef(types.Values(tpe), tail)
|
||||
default:
|
||||
return selectRef(selectConstant(tpe, head), tail)
|
||||
}
|
||||
}
|
||||
type TypeEnv = v1.TypeEnv
|
||||
|
||||
99
vendor/github.com/open-policy-agent/opa/ast/errors.go
generated
vendored
99
vendor/github.com/open-policy-agent/opa/ast/errors.go
generated
vendored
@@ -5,119 +5,42 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// Errors represents a series of errors encountered during parsing, compiling,
|
||||
// etc.
|
||||
type Errors []*Error
|
||||
|
||||
func (e Errors) Error() string {
|
||||
|
||||
if len(e) == 0 {
|
||||
return "no error(s)"
|
||||
}
|
||||
|
||||
if len(e) == 1 {
|
||||
return fmt.Sprintf("1 error occurred: %v", e[0].Error())
|
||||
}
|
||||
|
||||
s := make([]string, len(e))
|
||||
for i, err := range e {
|
||||
s[i] = err.Error()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n"))
|
||||
}
|
||||
|
||||
// Sort sorts the error slice by location. If the locations are equal then the
|
||||
// error message is compared.
|
||||
func (e Errors) Sort() {
|
||||
sort.Slice(e, func(i, j int) bool {
|
||||
a := e[i]
|
||||
b := e[j]
|
||||
|
||||
if cmp := a.Location.Compare(b.Location); cmp != 0 {
|
||||
return cmp < 0
|
||||
}
|
||||
|
||||
return a.Error() < b.Error()
|
||||
})
|
||||
}
|
||||
type Errors = v1.Errors
|
||||
|
||||
const (
|
||||
// ParseErr indicates an unclassified parse error occurred.
|
||||
ParseErr = "rego_parse_error"
|
||||
ParseErr = v1.ParseErr
|
||||
|
||||
// CompileErr indicates an unclassified compile error occurred.
|
||||
CompileErr = "rego_compile_error"
|
||||
CompileErr = v1.CompileErr
|
||||
|
||||
// TypeErr indicates a type error was caught.
|
||||
TypeErr = "rego_type_error"
|
||||
TypeErr = v1.TypeErr
|
||||
|
||||
// UnsafeVarErr indicates an unsafe variable was found during compilation.
|
||||
UnsafeVarErr = "rego_unsafe_var_error"
|
||||
UnsafeVarErr = v1.UnsafeVarErr
|
||||
|
||||
// RecursionErr indicates recursion was found during compilation.
|
||||
RecursionErr = "rego_recursion_error"
|
||||
RecursionErr = v1.RecursionErr
|
||||
)
|
||||
|
||||
// IsError returns true if err is an AST error with code.
|
||||
func IsError(code string, err error) bool {
|
||||
if err, ok := err.(*Error); ok {
|
||||
return err.Code == code
|
||||
}
|
||||
return false
|
||||
return v1.IsError(code, err)
|
||||
}
|
||||
|
||||
// ErrorDetails defines the interface for detailed error messages.
|
||||
type ErrorDetails interface {
|
||||
Lines() []string
|
||||
}
|
||||
type ErrorDetails = v1.ErrorDetails
|
||||
|
||||
// Error represents a single error caught during parsing, compiling, etc.
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Location *Location `json:"location,omitempty"`
|
||||
Details ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
|
||||
var prefix string
|
||||
|
||||
if e.Location != nil {
|
||||
|
||||
if len(e.Location.File) > 0 {
|
||||
prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
|
||||
} else {
|
||||
prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
|
||||
}
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
|
||||
|
||||
if len(prefix) > 0 {
|
||||
msg = prefix + ": " + msg
|
||||
}
|
||||
|
||||
if e.Details != nil {
|
||||
for _, line := range e.Details.Lines() {
|
||||
msg += "\n\t" + line
|
||||
}
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
type Error = v1.Error
|
||||
|
||||
// NewError returns a new Error object.
|
||||
func NewError(code string, loc *Location, f string, a ...interface{}) *Error {
|
||||
return &Error{
|
||||
Code: code,
|
||||
Location: loc,
|
||||
Message: fmt.Sprintf(f, a...),
|
||||
}
|
||||
return v1.NewError(code, loc, f, a...)
|
||||
}
|
||||
|
||||
896
vendor/github.com/open-policy-agent/opa/ast/index.go
generated
vendored
896
vendor/github.com/open-policy-agent/opa/ast/index.go
generated
vendored
@@ -5,904 +5,16 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// RuleIndex defines the interface for rule indices.
|
||||
type RuleIndex interface {
|
||||
|
||||
// Build tries to construct an index for the given rules. If the index was
|
||||
// constructed, it returns true, otherwise false.
|
||||
Build(rules []*Rule) bool
|
||||
|
||||
// Lookup searches the index for rules that will match the provided
|
||||
// resolver. If the resolver returns an error, it is returned via err.
|
||||
Lookup(resolver ValueResolver) (*IndexResult, error)
|
||||
|
||||
// AllRules traverses the index and returns all rules that will match
|
||||
// the provided resolver without any optimizations (effectively with
|
||||
// indexing disabled). If the resolver returns an error, it is returned
|
||||
// via err.
|
||||
AllRules(resolver ValueResolver) (*IndexResult, error)
|
||||
}
|
||||
type RuleIndex v1.RuleIndex
|
||||
|
||||
// IndexResult contains the result of an index lookup.
|
||||
type IndexResult struct {
|
||||
Kind RuleKind
|
||||
Rules []*Rule
|
||||
Else map[*Rule][]*Rule
|
||||
Default *Rule
|
||||
EarlyExit bool
|
||||
OnlyGroundRefs bool
|
||||
}
|
||||
type IndexResult = v1.IndexResult
|
||||
|
||||
// NewIndexResult returns a new IndexResult object.
|
||||
func NewIndexResult(kind RuleKind) *IndexResult {
|
||||
return &IndexResult{
|
||||
Kind: kind,
|
||||
Else: map[*Rule][]*Rule{},
|
||||
}
|
||||
}
|
||||
|
||||
// Empty returns true if there are no rules to evaluate.
|
||||
func (ir *IndexResult) Empty() bool {
|
||||
return len(ir.Rules) == 0 && ir.Default == nil
|
||||
}
|
||||
|
||||
type baseDocEqIndex struct {
|
||||
skipIndexing Set
|
||||
isVirtual func(Ref) bool
|
||||
root *trieNode
|
||||
defaultRule *Rule
|
||||
kind RuleKind
|
||||
onlyGroundRefs bool
|
||||
}
|
||||
|
||||
func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
|
||||
return &baseDocEqIndex{
|
||||
skipIndexing: NewSet(NewTerm(InternalPrint.Ref())),
|
||||
isVirtual: isVirtual,
|
||||
root: newTrieNodeImpl(),
|
||||
onlyGroundRefs: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) Build(rules []*Rule) bool {
|
||||
if len(rules) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
i.kind = rules[0].Head.RuleKind()
|
||||
indices := newrefindices(i.isVirtual)
|
||||
|
||||
// build indices for each rule.
|
||||
for idx := range rules {
|
||||
WalkRules(rules[idx], func(rule *Rule) bool {
|
||||
if rule.Default {
|
||||
i.defaultRule = rule
|
||||
return false
|
||||
}
|
||||
if i.onlyGroundRefs {
|
||||
i.onlyGroundRefs = rule.Head.Reference.IsGround()
|
||||
}
|
||||
var skip bool
|
||||
for _, expr := range rule.Body {
|
||||
if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
for _, expr := range rule.Body {
|
||||
indices.Update(rule, expr)
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// build trie out of indices.
|
||||
for idx := range rules {
|
||||
var prio int
|
||||
WalkRules(rules[idx], func(rule *Rule) bool {
|
||||
if rule.Default {
|
||||
return false
|
||||
}
|
||||
node := i.root
|
||||
if indices.Indexed(rule) {
|
||||
for _, ref := range indices.Sorted() {
|
||||
node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref))
|
||||
}
|
||||
}
|
||||
// Insert rule into trie with (insertion order, priority order)
|
||||
// tuple. Retaining the insertion order allows us to return rules
|
||||
// in the order they were passed to this function.
|
||||
node.append([...]int{idx, prio}, rule)
|
||||
prio++
|
||||
return false
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
|
||||
|
||||
tr := newTrieTraversalResult()
|
||||
|
||||
err := i.root.Traverse(resolver, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := NewIndexResult(i.kind)
|
||||
result.Default = i.defaultRule
|
||||
result.OnlyGroundRefs = i.onlyGroundRefs
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) {
|
||||
tr := newTrieTraversalResult()
|
||||
|
||||
// Walk over the rule trie and accumulate _all_ rules
|
||||
rw := &ruleWalker{result: tr}
|
||||
i.root.Do(rw)
|
||||
|
||||
result := NewIndexResult(i.kind)
|
||||
result.Default = i.defaultRule
|
||||
result.OnlyGroundRefs = i.onlyGroundRefs
|
||||
result.Rules = make([]*Rule, 0, len(tr.ordering))
|
||||
|
||||
for _, pos := range tr.ordering {
|
||||
sort.Slice(tr.unordered[pos], func(i, j int) bool {
|
||||
return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
|
||||
})
|
||||
nodes := tr.unordered[pos]
|
||||
root := nodes[0].rule
|
||||
result.Rules = append(result.Rules, root)
|
||||
if len(nodes) > 1 {
|
||||
result.Else[root] = make([]*Rule, len(nodes)-1)
|
||||
for i := 1; i < len(nodes); i++ {
|
||||
result.Else[root][i-1] = nodes[i].rule
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type ruleWalker struct {
|
||||
result *trieTraversalResult
|
||||
}
|
||||
|
||||
func (r *ruleWalker) Do(x interface{}) trieWalker {
|
||||
tn := x.(*trieNode)
|
||||
r.result.Add(tn)
|
||||
return r
|
||||
}
|
||||
|
||||
type valueMapper struct {
|
||||
Key string
|
||||
MapValue func(Value) Value
|
||||
}
|
||||
|
||||
type refindex struct {
|
||||
Ref Ref
|
||||
Value Value
|
||||
Mapper *valueMapper
|
||||
}
|
||||
|
||||
type refindices struct {
|
||||
isVirtual func(Ref) bool
|
||||
rules map[*Rule][]*refindex
|
||||
frequency *util.HashMap
|
||||
sorted []Ref
|
||||
}
|
||||
|
||||
func newrefindices(isVirtual func(Ref) bool) *refindices {
|
||||
return &refindices{
|
||||
isVirtual: isVirtual,
|
||||
rules: map[*Rule][]*refindex{},
|
||||
frequency: util.NewHashMap(func(a, b util.T) bool {
|
||||
r1, r2 := a.(Ref), b.(Ref)
|
||||
return r1.Equal(r2)
|
||||
}, func(x util.T) int {
|
||||
return x.(Ref).Hash()
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Update attempts to update the refindices for the given expression in the
|
||||
// given rule. If the expression cannot be indexed the update does not affect
|
||||
// the indices.
|
||||
func (i *refindices) Update(rule *Rule, expr *Expr) {
|
||||
|
||||
if expr.Negated {
|
||||
return
|
||||
}
|
||||
|
||||
if len(expr.With) > 0 {
|
||||
// NOTE(tsandall): In the future, we may need to consider expressions
|
||||
// that have with statements applied to them.
|
||||
return
|
||||
}
|
||||
|
||||
op := expr.Operator()
|
||||
|
||||
switch {
|
||||
case op.Equal(Equality.Ref()):
|
||||
i.updateEq(rule, expr)
|
||||
|
||||
case op.Equal(Equal.Ref()) && len(expr.Operands()) == 2:
|
||||
// NOTE(tsandall): if equal() is called with more than two arguments the
|
||||
// output value is being captured in which case the indexer cannot
|
||||
// exclude the rule if the equal() call would return false (because the
|
||||
// false value must still be produced.)
|
||||
i.updateEq(rule, expr)
|
||||
|
||||
case op.Equal(GlobMatch.Ref()) && len(expr.Operands()) == 3:
|
||||
// NOTE(sr): Same as with equal() above -- 4 operands means the output
|
||||
// of `glob.match` is captured and the rule can thus not be excluded.
|
||||
i.updateGlobMatch(rule, expr)
|
||||
}
|
||||
}
|
||||
|
||||
// Sorted returns a sorted list of references that the indices were built from.
|
||||
// References that appear more frequently in the indexed rules are ordered
|
||||
// before less frequently appearing references.
|
||||
func (i *refindices) Sorted() []Ref {
|
||||
|
||||
if i.sorted == nil {
|
||||
counts := make([]int, 0, i.frequency.Len())
|
||||
i.sorted = make([]Ref, 0, i.frequency.Len())
|
||||
|
||||
i.frequency.Iter(func(k, v util.T) bool {
|
||||
counts = append(counts, v.(int))
|
||||
i.sorted = append(i.sorted, k.(Ref))
|
||||
return false
|
||||
})
|
||||
|
||||
sort.Slice(i.sorted, func(a, b int) bool {
|
||||
if counts[a] > counts[b] {
|
||||
return true
|
||||
} else if counts[b] > counts[a] {
|
||||
return false
|
||||
}
|
||||
return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0
|
||||
})
|
||||
}
|
||||
|
||||
return i.sorted
|
||||
}
|
||||
|
||||
func (i *refindices) Indexed(rule *Rule) bool {
|
||||
return len(i.rules[rule]) > 0
|
||||
}
|
||||
|
||||
func (i *refindices) Value(rule *Rule, ref Ref) Value {
|
||||
if index := i.index(rule, ref); index != nil {
|
||||
return index.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper {
|
||||
if index := i.index(rule, ref); index != nil {
|
||||
return index.Mapper
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *refindices) updateEq(rule *Rule, expr *Expr) {
|
||||
a, b := expr.Operand(0), expr.Operand(1)
|
||||
args := rule.Head.Args
|
||||
if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok {
|
||||
i.insert(rule, idx)
|
||||
return
|
||||
}
|
||||
if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok {
|
||||
i.insert(rule, idx)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) {
|
||||
args := rule.Head.Args
|
||||
|
||||
delim, ok := globDelimiterToString(expr.Operand(1))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if arr := globPatternToArray(expr.Operand(0), delim); arr != nil {
|
||||
// The 3rd operand of glob.match is the value to match. We assume the
|
||||
// 3rd operand was a reference that has been rewritten and bound to a
|
||||
// variable earlier in the query OR a function argument variable.
|
||||
match := expr.Operand(2)
|
||||
if _, ok := match.Value.(Var); ok {
|
||||
var ref Ref
|
||||
for _, other := range i.rules[rule] {
|
||||
if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 {
|
||||
ref = other.Ref
|
||||
}
|
||||
}
|
||||
if ref == nil {
|
||||
for j, arg := range args {
|
||||
if arg.Equal(match) {
|
||||
ref = Ref{FunctionArgRootDocument, IntNumberTerm(j)}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ref != nil {
|
||||
i.insert(rule, &refindex{
|
||||
Ref: ref,
|
||||
Value: arr.Value,
|
||||
Mapper: &valueMapper{
|
||||
Key: delim,
|
||||
MapValue: func(v Value) Value {
|
||||
if s, ok := v.(String); ok {
|
||||
return stringSliceToArray(splitStringEscaped(string(s), delim))
|
||||
}
|
||||
return v
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *refindices) insert(rule *Rule, index *refindex) {
|
||||
|
||||
count, ok := i.frequency.Get(index.Ref)
|
||||
if !ok {
|
||||
count = 0
|
||||
}
|
||||
|
||||
i.frequency.Put(index.Ref, count.(int)+1)
|
||||
|
||||
for pos, other := range i.rules[rule] {
|
||||
if other.Ref.Equal(index.Ref) {
|
||||
i.rules[rule][pos] = index
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i.rules[rule] = append(i.rules[rule], index)
|
||||
}
|
||||
|
||||
func (i *refindices) index(rule *Rule, ref Ref) *refindex {
|
||||
for _, index := range i.rules[rule] {
|
||||
if index.Ref.Equal(ref) {
|
||||
return index
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type trieWalker interface {
|
||||
Do(x interface{}) trieWalker
|
||||
}
|
||||
|
||||
type trieTraversalResult struct {
|
||||
unordered map[int][]*ruleNode
|
||||
ordering []int
|
||||
values Set
|
||||
}
|
||||
|
||||
func newTrieTraversalResult() *trieTraversalResult {
|
||||
return &trieTraversalResult{
|
||||
unordered: map[int][]*ruleNode{},
|
||||
values: NewSet(),
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *trieTraversalResult) Add(t *trieNode) {
|
||||
for _, node := range t.rules {
|
||||
root := node.prio[0]
|
||||
nodes, ok := tr.unordered[root]
|
||||
if !ok {
|
||||
tr.ordering = append(tr.ordering, root)
|
||||
}
|
||||
tr.unordered[root] = append(nodes, node)
|
||||
}
|
||||
if t.values != nil {
|
||||
t.values.Foreach(func(v *Term) { tr.values.Add(v) })
|
||||
}
|
||||
}
|
||||
|
||||
type trieNode struct {
|
||||
ref Ref
|
||||
values Set
|
||||
mappers []*valueMapper
|
||||
next *trieNode
|
||||
any *trieNode
|
||||
undefined *trieNode
|
||||
scalars *util.HashMap
|
||||
array *trieNode
|
||||
rules []*ruleNode
|
||||
}
|
||||
|
||||
func (node *trieNode) String() string {
|
||||
var flags []string
|
||||
flags = append(flags, fmt.Sprintf("self:%p", node))
|
||||
if len(node.ref) > 0 {
|
||||
flags = append(flags, node.ref.String())
|
||||
}
|
||||
if node.next != nil {
|
||||
flags = append(flags, fmt.Sprintf("next:%p", node.next))
|
||||
}
|
||||
if node.any != nil {
|
||||
flags = append(flags, fmt.Sprintf("any:%p", node.any))
|
||||
}
|
||||
if node.undefined != nil {
|
||||
flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined))
|
||||
}
|
||||
if node.array != nil {
|
||||
flags = append(flags, fmt.Sprintf("array:%p", node.array))
|
||||
}
|
||||
if node.scalars.Len() > 0 {
|
||||
buf := make([]string, 0, node.scalars.Len())
|
||||
node.scalars.Iter(func(k, v util.T) bool {
|
||||
key := k.(Value)
|
||||
val := v.(*trieNode)
|
||||
buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val))
|
||||
return false
|
||||
})
|
||||
sort.Strings(buf)
|
||||
flags = append(flags, strings.Join(buf, " "))
|
||||
}
|
||||
if len(node.rules) > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules)))
|
||||
}
|
||||
if len(node.mappers) > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers)))
|
||||
}
|
||||
if node.values != nil {
|
||||
if l := node.values.Len(); l > 0 {
|
||||
flags = append(flags, fmt.Sprintf("%d value(s)", l))
|
||||
}
|
||||
}
|
||||
return strings.Join(flags, " ")
|
||||
}
|
||||
|
||||
func (node *trieNode) append(prio [2]int, rule *Rule) {
|
||||
node.rules = append(node.rules, &ruleNode{prio, rule})
|
||||
|
||||
if node.values != nil && rule.Head.Value != nil {
|
||||
node.values.Add(rule.Head.Value)
|
||||
return
|
||||
}
|
||||
|
||||
if node.values == nil && rule.Head.DocKind() == CompleteDoc {
|
||||
node.values = NewSet(rule.Head.Value)
|
||||
}
|
||||
}
|
||||
|
||||
type ruleNode struct {
|
||||
prio [2]int
|
||||
rule *Rule
|
||||
}
|
||||
|
||||
func newTrieNodeImpl() *trieNode {
|
||||
return &trieNode{
|
||||
scalars: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *trieNode) Do(walker trieWalker) {
|
||||
next := walker.Do(node)
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if node.any != nil {
|
||||
node.any.Do(next)
|
||||
}
|
||||
if node.undefined != nil {
|
||||
node.undefined.Do(next)
|
||||
}
|
||||
|
||||
node.scalars.Iter(func(_, v util.T) bool {
|
||||
child := v.(*trieNode)
|
||||
child.Do(next)
|
||||
return false
|
||||
})
|
||||
|
||||
if node.array != nil {
|
||||
node.array.Do(next)
|
||||
}
|
||||
if node.next != nil {
|
||||
node.next.Do(next)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode {
|
||||
|
||||
if node.next == nil {
|
||||
node.next = newTrieNodeImpl()
|
||||
node.next.ref = ref
|
||||
}
|
||||
|
||||
if mapper != nil {
|
||||
node.next.addMapper(mapper)
|
||||
}
|
||||
|
||||
return node.next.insertValue(value)
|
||||
}
|
||||
|
||||
func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tr.Add(node)
|
||||
|
||||
return node.next.traverse(resolver, tr)
|
||||
}
|
||||
|
||||
func (node *trieNode) addMapper(mapper *valueMapper) {
|
||||
for i := range node.mappers {
|
||||
if node.mappers[i].Key == mapper.Key {
|
||||
return
|
||||
}
|
||||
}
|
||||
node.mappers = append(node.mappers, mapper)
|
||||
}
|
||||
|
||||
func (node *trieNode) insertValue(value Value) *trieNode {
|
||||
|
||||
switch value := value.(type) {
|
||||
case nil:
|
||||
if node.undefined == nil {
|
||||
node.undefined = newTrieNodeImpl()
|
||||
}
|
||||
return node.undefined
|
||||
case Var:
|
||||
if node.any == nil {
|
||||
node.any = newTrieNodeImpl()
|
||||
}
|
||||
return node.any
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars.Get(value)
|
||||
if !ok {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars.Put(value, child)
|
||||
}
|
||||
return child.(*trieNode)
|
||||
case *Array:
|
||||
if node.array == nil {
|
||||
node.array = newTrieNodeImpl()
|
||||
}
|
||||
return node.array.insertArray(value)
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
}
|
||||
|
||||
func (node *trieNode) insertArray(arr *Array) *trieNode {
|
||||
|
||||
if arr.Len() == 0 {
|
||||
return node
|
||||
}
|
||||
|
||||
switch head := arr.Elem(0).Value.(type) {
|
||||
case Var:
|
||||
if node.any == nil {
|
||||
node.any = newTrieNodeImpl()
|
||||
}
|
||||
return node.any.insertArray(arr.Slice(1, -1))
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars.Get(head)
|
||||
if !ok {
|
||||
child = newTrieNodeImpl()
|
||||
node.scalars.Put(head, child)
|
||||
}
|
||||
return child.(*trieNode).insertArray(arr.Slice(1, -1))
|
||||
}
|
||||
|
||||
panic("illegal value")
|
||||
}
|
||||
|
||||
func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := resolver.Resolve(node.ref)
|
||||
if err != nil {
|
||||
if IsUnknownValueErr(err) {
|
||||
return node.traverseUnknown(resolver, tr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if node.undefined != nil {
|
||||
err = node.undefined.Traverse(resolver, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if node.any != nil {
|
||||
err = node.any.Traverse(resolver, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := node.traverseValue(resolver, tr, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range node.mappers {
|
||||
if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error {
|
||||
|
||||
switch value := value.(type) {
|
||||
case *Array:
|
||||
if node.array == nil {
|
||||
return nil
|
||||
}
|
||||
return node.array.traverseArray(resolver, tr, value)
|
||||
|
||||
case Null, Boolean, Number, String:
|
||||
child, ok := node.scalars.Get(value)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return child.(*trieNode).Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error {
|
||||
|
||||
if arr.Len() == 0 {
|
||||
return node.Traverse(resolver, tr)
|
||||
}
|
||||
|
||||
if node.any != nil {
|
||||
err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
head := arr.Elem(0).Value
|
||||
|
||||
if !IsScalar(head) {
|
||||
return nil
|
||||
}
|
||||
|
||||
child, ok := node.scalars.Get(head)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1))
|
||||
}
|
||||
|
||||
func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
|
||||
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := node.Traverse(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.undefined.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.any.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := node.array.traverseUnknown(resolver, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var iterErr error
|
||||
node.scalars.Iter(func(_, v util.T) bool {
|
||||
child := v.(*trieNode)
|
||||
if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return iterErr
|
||||
}
|
||||
|
||||
// If term `a` is one of the function's operands, we store a Ref: `args[0]`
|
||||
// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll
|
||||
// bind `args[0]` and `args[1]` to this rule when called for (x=10) and
|
||||
// (y=12) respectively.
|
||||
func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) {
|
||||
switch v := a.Value.(type) {
|
||||
case Var:
|
||||
for i, arg := range args {
|
||||
if arg.Value.Compare(v) == 0 {
|
||||
if bval, ok := indexValue(b); ok {
|
||||
return &refindex{Ref: Ref{FunctionArgRootDocument, IntNumberTerm(i)}, Value: bval}, true
|
||||
}
|
||||
}
|
||||
}
|
||||
case Ref:
|
||||
if !RootDocumentNames.Contains(v[0]) {
|
||||
return nil, false
|
||||
}
|
||||
if isVirtual(v) {
|
||||
return nil, false
|
||||
}
|
||||
if v.IsNested() || !v.IsGround() {
|
||||
return nil, false
|
||||
}
|
||||
if bval, ok := indexValue(b); ok {
|
||||
return &refindex{Ref: v, Value: bval}, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func indexValue(b *Term) (Value, bool) {
|
||||
switch b := b.Value.(type) {
|
||||
case Null, Boolean, Number, String, Var:
|
||||
return b, true
|
||||
case *Array:
|
||||
stop := false
|
||||
first := true
|
||||
vis := NewGenericVisitor(func(x interface{}) bool {
|
||||
if first {
|
||||
first = false
|
||||
return false
|
||||
}
|
||||
switch x.(type) {
|
||||
// No nested structures or values that require evaluation (other than var).
|
||||
case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref:
|
||||
stop = true
|
||||
}
|
||||
return stop
|
||||
})
|
||||
vis.Walk(b)
|
||||
if !stop {
|
||||
return b, true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func globDelimiterToString(delim *Term) (string, bool) {
|
||||
|
||||
arr, ok := delim.Value.(*Array)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var result string
|
||||
|
||||
if arr.Len() == 0 {
|
||||
result = "."
|
||||
} else {
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
term := arr.Elem(i)
|
||||
s, ok := term.Value.(String)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
result += string(s)
|
||||
}
|
||||
}
|
||||
|
||||
return result, true
|
||||
}
|
||||
|
||||
func globPatternToArray(pattern *Term, delim string) *Term {
|
||||
|
||||
s, ok := pattern.Value.(String)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
parts := splitStringEscaped(string(s), delim)
|
||||
arr := make([]*Term, len(parts))
|
||||
|
||||
for i := range parts {
|
||||
if parts[i] == "*" {
|
||||
arr[i] = VarTerm("$globwildcard")
|
||||
} else {
|
||||
var escaped bool
|
||||
for _, c := range parts[i] {
|
||||
if c == '\\' {
|
||||
escaped = !escaped
|
||||
continue
|
||||
}
|
||||
if !escaped {
|
||||
switch c {
|
||||
case '[', '?', '{', '*':
|
||||
// TODO(tsandall): super glob and character pattern
|
||||
// matching not supported yet.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
escaped = false
|
||||
}
|
||||
arr[i] = StringTerm(parts[i])
|
||||
}
|
||||
}
|
||||
|
||||
return NewTerm(NewArray(arr...))
|
||||
}
|
||||
|
||||
// splits s on characters in delim except if delim characters have been escaped
|
||||
// with reverse solidus.
|
||||
func splitStringEscaped(s string, delim string) []string {
|
||||
|
||||
var last, curr int
|
||||
var escaped bool
|
||||
var result []string
|
||||
|
||||
for ; curr < len(s); curr++ {
|
||||
if s[curr] == '\\' || escaped {
|
||||
escaped = !escaped
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(delim, rune(s[curr])) {
|
||||
result = append(result, s[last:curr])
|
||||
last = curr + 1
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, s[last:])
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func stringSliceToArray(s []string) *Array {
|
||||
arr := make([]*Term, len(s))
|
||||
for i, v := range s {
|
||||
arr[i] = StringTerm(v)
|
||||
}
|
||||
return NewArray(arr...)
|
||||
return v1.NewIndexResult(kind)
|
||||
}
|
||||
|
||||
24
vendor/github.com/open-policy-agent/opa/ast/interning.go
generated
vendored
Normal file
24
vendor/github.com/open-policy-agent/opa/ast/interning.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
func InternedBooleanTerm(b bool) *Term {
|
||||
return v1.InternedBooleanTerm(b)
|
||||
}
|
||||
|
||||
// InternedIntNumberTerm returns a term with the given integer value. The term is
|
||||
// cached between -1 to 512, and for values outside of that range, this function
|
||||
// is equivalent to ast.IntNumberTerm.
|
||||
func InternedIntNumberTerm(i int) *Term {
|
||||
return v1.InternedIntNumberTerm(i)
|
||||
}
|
||||
|
||||
func HasInternedIntNumberTerm(i int) bool {
|
||||
return v1.HasInternedIntNumberTerm(i)
|
||||
}
|
||||
8
vendor/github.com/open-policy-agent/opa/ast/json/doc.go
generated
vendored
Normal file
8
vendor/github.com/open-policy-agent/opa/ast/json/doc.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
|
||||
// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
|
||||
// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
|
||||
package json
|
||||
31
vendor/github.com/open-policy-agent/opa/ast/json/json.go
generated
vendored
31
vendor/github.com/open-policy-agent/opa/ast/json/json.go
generated
vendored
@@ -1,36 +1,15 @@
|
||||
package json
|
||||
|
||||
import v1 "github.com/open-policy-agent/opa/v1/ast/json"
|
||||
|
||||
// Options defines the options for JSON operations,
|
||||
// currently only marshaling can be configured
|
||||
type Options struct {
|
||||
MarshalOptions MarshalOptions
|
||||
}
|
||||
type Options = v1.Options
|
||||
|
||||
// MarshalOptions defines the options for JSON marshaling,
|
||||
// currently only toggling the marshaling of location information is supported
|
||||
type MarshalOptions struct {
|
||||
// IncludeLocation toggles the marshaling of location information
|
||||
IncludeLocation NodeToggle
|
||||
// IncludeLocationText additionally/optionally includes the text of the location
|
||||
IncludeLocationText bool
|
||||
// ExcludeLocationFile additionally/optionally excludes the file of the location
|
||||
// Note that this is inverted (i.e. not "include" as the default needs to remain false)
|
||||
ExcludeLocationFile bool
|
||||
}
|
||||
type MarshalOptions = v1.MarshalOptions
|
||||
|
||||
// NodeToggle is a generic struct to allow the toggling of
|
||||
// settings for different ast node types
|
||||
type NodeToggle struct {
|
||||
Term bool
|
||||
Package bool
|
||||
Comment bool
|
||||
Import bool
|
||||
Rule bool
|
||||
Head bool
|
||||
Expr bool
|
||||
SomeDecl bool
|
||||
Every bool
|
||||
With bool
|
||||
Annotations bool
|
||||
AnnotationsRef bool
|
||||
}
|
||||
type NodeToggle = v1.NodeToggle
|
||||
|
||||
121
vendor/github.com/open-policy-agent/opa/ast/map.go
generated
vendored
121
vendor/github.com/open-policy-agent/opa/ast/map.go
generated
vendored
@@ -5,129 +5,14 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// ValueMap represents a key/value map between AST term values. Any type of term
|
||||
// can be used as a key in the map.
|
||||
type ValueMap struct {
|
||||
hashMap *util.HashMap
|
||||
}
|
||||
type ValueMap = v1.ValueMap
|
||||
|
||||
// NewValueMap returns a new ValueMap.
|
||||
func NewValueMap() *ValueMap {
|
||||
vs := &ValueMap{
|
||||
hashMap: util.NewHashMap(valueEq, valueHash),
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// MarshalJSON provides a custom marshaller for the ValueMap which
|
||||
// will include the key, value, and value type.
|
||||
func (vs *ValueMap) MarshalJSON() ([]byte, error) {
|
||||
var tmp []map[string]interface{}
|
||||
vs.Iter(func(k Value, v Value) bool {
|
||||
tmp = append(tmp, map[string]interface{}{
|
||||
"name": k.String(),
|
||||
"type": TypeName(v),
|
||||
"value": v,
|
||||
})
|
||||
return false
|
||||
})
|
||||
return json.Marshal(tmp)
|
||||
}
|
||||
|
||||
// Copy returns a shallow copy of the ValueMap.
|
||||
func (vs *ValueMap) Copy() *ValueMap {
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := NewValueMap()
|
||||
cpy.hashMap = vs.hashMap.Copy()
|
||||
return cpy
|
||||
}
|
||||
|
||||
// Equal returns true if this ValueMap equals the other.
|
||||
func (vs *ValueMap) Equal(other *ValueMap) bool {
|
||||
if vs == nil {
|
||||
return other == nil || other.Len() == 0
|
||||
}
|
||||
if other == nil {
|
||||
return vs == nil || vs.Len() == 0
|
||||
}
|
||||
return vs.hashMap.Equal(other.hashMap)
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the map.
|
||||
func (vs *ValueMap) Len() int {
|
||||
if vs == nil {
|
||||
return 0
|
||||
}
|
||||
return vs.hashMap.Len()
|
||||
}
|
||||
|
||||
// Get returns the value in the map for k.
|
||||
func (vs *ValueMap) Get(k Value) Value {
|
||||
if vs != nil {
|
||||
if v, ok := vs.hashMap.Get(k); ok {
|
||||
return v.(Value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns a hash code for this ValueMap.
|
||||
func (vs *ValueMap) Hash() int {
|
||||
if vs == nil {
|
||||
return 0
|
||||
}
|
||||
return vs.hashMap.Hash()
|
||||
}
|
||||
|
||||
// Iter calls the iter function for each key/value pair in the map. If the iter
|
||||
// function returns true, iteration stops.
|
||||
func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
|
||||
if vs == nil {
|
||||
return false
|
||||
}
|
||||
return vs.hashMap.Iter(func(kt, vt util.T) bool {
|
||||
k := kt.(Value)
|
||||
v := vt.(Value)
|
||||
return iter(k, v)
|
||||
})
|
||||
}
|
||||
|
||||
// Put inserts a key k into the map with value v.
|
||||
func (vs *ValueMap) Put(k, v Value) {
|
||||
if vs == nil {
|
||||
panic("put on nil value map")
|
||||
}
|
||||
vs.hashMap.Put(k, v)
|
||||
}
|
||||
|
||||
// Delete removes a key k from the map.
|
||||
func (vs *ValueMap) Delete(k Value) {
|
||||
if vs == nil {
|
||||
return
|
||||
}
|
||||
vs.hashMap.Delete(k)
|
||||
}
|
||||
|
||||
func (vs *ValueMap) String() string {
|
||||
if vs == nil {
|
||||
return "{}"
|
||||
}
|
||||
return vs.hashMap.String()
|
||||
}
|
||||
|
||||
func valueHash(v util.T) int {
|
||||
return v.(Value).Hash()
|
||||
}
|
||||
|
||||
func valueEq(a, b util.T) bool {
|
||||
av := a.(Value)
|
||||
bv := b.(Value)
|
||||
return av.Compare(bv) == 0
|
||||
return v1.NewValueMap()
|
||||
}
|
||||
|
||||
11
vendor/github.com/open-policy-agent/opa/ast/marshal.go
generated
vendored
11
vendor/github.com/open-policy-agent/opa/ast/marshal.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
astJSON "github.com/open-policy-agent/opa/ast/json"
|
||||
)
|
||||
|
||||
// customJSON is an interface that can be implemented by AST nodes that
|
||||
// allows the parser to set options for JSON operations on that node.
|
||||
type customJSON interface {
|
||||
setJSONOptions(astJSON.Options)
|
||||
}
|
||||
2712
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
2712
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
File diff suppressed because it is too large
Load Diff
597
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
597
vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
generated
vendored
@@ -1,24 +1,13 @@
|
||||
// Copyright 2016 The OPA Authors. All rights reserved.
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains extra functions for parsing Rego.
|
||||
// Most of the parsing is handled by the code in parser.go,
|
||||
// however, there are additional utilities that are
|
||||
// helpful for dealing with Rego source inputs (e.g., REPL
|
||||
// statements, source files, etc.)
|
||||
|
||||
package ast
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast/internal/tokens"
|
||||
astJSON "github.com/open-policy-agent/opa/ast/json"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// MustParseBody returns a parsed body.
|
||||
@@ -30,11 +19,7 @@ func MustParseBody(input string) Body {
|
||||
// MustParseBodyWithOpts returns a parsed body.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseBodyWithOpts(input string, opts ParserOptions) Body {
|
||||
parsed, err := ParseBodyWithOpts(input, opts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
return v1.MustParseBodyWithOpts(input, setDefaultRegoVersion(opts))
|
||||
}
|
||||
|
||||
// MustParseExpr returns a parsed expression.
|
||||
@@ -66,11 +51,7 @@ func MustParseModule(input string) *Module {
|
||||
// MustParseModuleWithOpts returns a parsed module.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseModuleWithOpts(input string, opts ParserOptions) *Module {
|
||||
parsed, err := ParseModuleWithOpts("", input, opts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
return v1.MustParseModuleWithOpts(input, setDefaultRegoVersion(opts))
|
||||
}
|
||||
|
||||
// MustParsePackage returns a Package.
|
||||
@@ -104,11 +85,7 @@ func MustParseStatement(input string) Statement {
|
||||
}
|
||||
|
||||
func MustParseStatementWithOpts(input string, popts ParserOptions) Statement {
|
||||
parsed, err := ParseStatementWithOpts(input, popts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
return v1.MustParseStatementWithOpts(input, setDefaultRegoVersion(popts))
|
||||
}
|
||||
|
||||
// MustParseRef returns a parsed reference.
|
||||
@@ -134,11 +111,7 @@ func MustParseRule(input string) *Rule {
|
||||
// MustParseRuleWithOpts returns a parsed rule.
|
||||
// If an error occurs during parsing, panic.
|
||||
func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule {
|
||||
parsed, err := ParseRuleWithOpts(input, opts)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return parsed
|
||||
return v1.MustParseRuleWithOpts(input, setDefaultRegoVersion(opts))
|
||||
}
|
||||
|
||||
// MustParseTerm returns a parsed term.
|
||||
@@ -154,331 +127,59 @@ func MustParseTerm(input string) *Term {
|
||||
// ParseRuleFromBody returns a rule if the body can be interpreted as a rule
|
||||
// definition. Otherwise, an error is returned.
|
||||
func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
|
||||
|
||||
if len(body) != 1 {
|
||||
return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
return ParseRuleFromExpr(module, body[0])
|
||||
return v1.ParseRuleFromBody(module, body)
|
||||
}
|
||||
|
||||
// ParseRuleFromExpr returns a rule if the expression can be interpreted as a
|
||||
// rule definition.
|
||||
func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
|
||||
|
||||
if len(expr.With) > 0 {
|
||||
return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
|
||||
}
|
||||
|
||||
if expr.Negated {
|
||||
return nil, fmt.Errorf("negated expressions cannot be used for rule head")
|
||||
}
|
||||
|
||||
if _, ok := expr.Terms.(*SomeDecl); ok {
|
||||
return nil, errors.New("'some' declarations cannot be used for rule head")
|
||||
}
|
||||
|
||||
if term, ok := expr.Terms.(*Term); ok {
|
||||
switch v := term.Value.(type) {
|
||||
case Ref:
|
||||
if len(v) > 2 { // 2+ dots
|
||||
return ParseCompleteDocRuleWithDotsFromTerm(module, term)
|
||||
}
|
||||
return ParsePartialSetDocRuleFromTerm(module, term)
|
||||
default:
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v))
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := expr.Terms.([]*Term); !ok {
|
||||
// This is a defensive check in case other kinds of expression terms are
|
||||
// introduced in the future.
|
||||
return nil, errors.New("expression cannot be used for rule head")
|
||||
}
|
||||
|
||||
if expr.IsEquality() {
|
||||
return parseCompleteRuleFromEq(module, expr)
|
||||
} else if expr.IsAssignment() {
|
||||
rule, err := parseCompleteRuleFromEq(module, expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rule.Head.Assign = true
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
if _, ok := BuiltinMap[expr.Operator().String()]; ok {
|
||||
return nil, fmt.Errorf("rule name conflicts with built-in function")
|
||||
}
|
||||
|
||||
return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
|
||||
}
|
||||
|
||||
func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) {
|
||||
|
||||
// ensure the rule location is set to the expr location
|
||||
// the helper functions called below try to set the location based
|
||||
// on the terms they've been provided but that is not as accurate.
|
||||
defer func() {
|
||||
if rule != nil {
|
||||
rule.Location = expr.Location
|
||||
rule.Head.Location = expr.Location
|
||||
}
|
||||
}()
|
||||
|
||||
lhs, rhs := expr.Operand(0), expr.Operand(1)
|
||||
if lhs == nil || rhs == nil {
|
||||
return nil, errors.New("assignment requires two operands")
|
||||
}
|
||||
|
||||
rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs)
|
||||
if err == nil {
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
|
||||
if err == nil {
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
|
||||
return v1.ParseRuleFromExpr(module, expr)
|
||||
}
|
||||
|
||||
// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can
|
||||
// be interpreted as a complete document definition declared with the assignment
|
||||
// operator.
|
||||
func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rule.Head.Assign = true
|
||||
|
||||
return rule, nil
|
||||
return v1.ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs)
|
||||
}
|
||||
|
||||
// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be
|
||||
// interpreted as a complete document definition.
|
||||
func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
var head *Head
|
||||
|
||||
if v, ok := lhs.Value.(Var); ok {
|
||||
// Modify the code to add the location to the head ref
|
||||
// and set the head ref's jsonOptions.
|
||||
head = VarHead(v, lhs.Location, &lhs.jsonOptions)
|
||||
} else if r, ok := lhs.Value.(Ref); ok { // groundness ?
|
||||
if _, ok := r[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", r)
|
||||
}
|
||||
head = RefHead(r)
|
||||
if len(r) > 1 && !r[len(r)-1].IsGround() {
|
||||
return nil, fmt.Errorf("ref not ground")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value))
|
||||
}
|
||||
head.Value = rhs
|
||||
head.Location = lhs.Location
|
||||
head.setJSONOptions(lhs.jsonOptions)
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
|
||||
setJSONOptions(body, &rhs.jsonOptions)
|
||||
|
||||
return &Rule{
|
||||
Location: lhs.Location,
|
||||
Head: head,
|
||||
Body: body,
|
||||
Module: module,
|
||||
jsonOptions: lhs.jsonOptions,
|
||||
generatedBody: true,
|
||||
}, nil
|
||||
return v1.ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
|
||||
}
|
||||
|
||||
func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) {
|
||||
ref, ok := term.Value.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(term.Value))
|
||||
}
|
||||
|
||||
if _, ok := ref[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", ref)
|
||||
}
|
||||
head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location))
|
||||
head.generatedValue = true
|
||||
head.Location = term.Location
|
||||
head.jsonOptions = term.jsonOptions
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
|
||||
setJSONOptions(body, &term.jsonOptions)
|
||||
|
||||
return &Rule{
|
||||
Location: term.Location,
|
||||
Head: head,
|
||||
Body: body,
|
||||
Module: module,
|
||||
|
||||
jsonOptions: term.jsonOptions,
|
||||
}, nil
|
||||
return v1.ParseCompleteDocRuleWithDotsFromTerm(module, term)
|
||||
}
|
||||
|
||||
// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be
|
||||
// interpreted as a partial object document definition.
|
||||
func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
ref, ok := lhs.Value.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value))
|
||||
}
|
||||
|
||||
if _, ok := ref[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", ref)
|
||||
}
|
||||
|
||||
head := RefHead(ref, rhs)
|
||||
if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements
|
||||
head.Name = ref[0].Value.(Var)
|
||||
head.Key = ref[1]
|
||||
}
|
||||
head.Location = rhs.Location
|
||||
head.jsonOptions = rhs.jsonOptions
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
|
||||
setJSONOptions(body, &rhs.jsonOptions)
|
||||
|
||||
rule := &Rule{
|
||||
Location: rhs.Location,
|
||||
Head: head,
|
||||
Body: body,
|
||||
Module: module,
|
||||
jsonOptions: rhs.jsonOptions,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
return v1.ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
|
||||
}
|
||||
|
||||
// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted
|
||||
// as a partial set document definition.
|
||||
func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) {
|
||||
|
||||
ref, ok := term.Value.(Ref)
|
||||
if !ok || len(ref) == 1 {
|
||||
return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
|
||||
}
|
||||
if _, ok := ref[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", ref)
|
||||
}
|
||||
|
||||
head := RefHead(ref)
|
||||
if len(ref) == 2 {
|
||||
v, ok := ref[0].Value.(Var)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
|
||||
}
|
||||
// Modify the code to add the location to the head ref
|
||||
// and set the head ref's jsonOptions.
|
||||
head = VarHead(v, ref[0].Location, &ref[0].jsonOptions)
|
||||
head.Key = ref[1]
|
||||
}
|
||||
head.Location = term.Location
|
||||
head.jsonOptions = term.jsonOptions
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location))
|
||||
setJSONOptions(body, &term.jsonOptions)
|
||||
|
||||
rule := &Rule{
|
||||
Location: term.Location,
|
||||
Head: head,
|
||||
Body: body,
|
||||
Module: module,
|
||||
jsonOptions: term.jsonOptions,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
return v1.ParsePartialSetDocRuleFromTerm(module, term)
|
||||
}
|
||||
|
||||
// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a
|
||||
// function definition (e.g., f(x) = y => f(x) = y { true }).
|
||||
func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
|
||||
|
||||
call, ok := lhs.Value.(Call)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("must be call")
|
||||
}
|
||||
|
||||
ref, ok := call[0].Value.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value))
|
||||
}
|
||||
if _, ok := ref[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", ref)
|
||||
}
|
||||
|
||||
head := RefHead(ref, rhs)
|
||||
head.Location = lhs.Location
|
||||
head.Args = Args(call[1:])
|
||||
head.jsonOptions = lhs.jsonOptions
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location))
|
||||
setJSONOptions(body, &rhs.jsonOptions)
|
||||
|
||||
rule := &Rule{
|
||||
Location: lhs.Location,
|
||||
Head: head,
|
||||
Body: body,
|
||||
Module: module,
|
||||
jsonOptions: lhs.jsonOptions,
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
return v1.ParseRuleFromCallEqExpr(module, lhs, rhs)
|
||||
}
|
||||
|
||||
// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a
|
||||
// function returning true or some value (e.g., f(x) => f(x) = true { true }).
|
||||
func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
|
||||
|
||||
if len(terms) <= 1 {
|
||||
return nil, fmt.Errorf("rule argument list must take at least one argument")
|
||||
}
|
||||
|
||||
loc := terms[0].Location
|
||||
ref := terms[0].Value.(Ref)
|
||||
if _, ok := ref[0].Value.(Var); !ok {
|
||||
return nil, fmt.Errorf("invalid rule head: %v", ref)
|
||||
}
|
||||
head := RefHead(ref, BooleanTerm(true).SetLocation(loc))
|
||||
head.Location = loc
|
||||
head.Args = terms[1:]
|
||||
head.jsonOptions = terms[0].jsonOptions
|
||||
|
||||
body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc))
|
||||
setJSONOptions(body, &terms[0].jsonOptions)
|
||||
|
||||
rule := &Rule{
|
||||
Location: loc,
|
||||
Head: head,
|
||||
Module: module,
|
||||
Body: body,
|
||||
jsonOptions: terms[0].jsonOptions,
|
||||
}
|
||||
return rule, nil
|
||||
return v1.ParseRuleFromCallExpr(module, terms)
|
||||
}
|
||||
|
||||
// ParseImports returns a slice of Import objects.
|
||||
func ParseImports(input string) ([]*Import, error) {
|
||||
stmts, _, err := ParseStatements("", input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := []*Import{}
|
||||
for _, stmt := range stmts {
|
||||
if imp, ok := stmt.(*Import); ok {
|
||||
result = append(result, imp)
|
||||
} else {
|
||||
return nil, fmt.Errorf("expected import but got %T", stmt)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return v1.ParseImports(input)
|
||||
}
|
||||
|
||||
// ParseModule returns a parsed Module object.
|
||||
@@ -492,11 +193,7 @@ func ParseModule(filename, input string) (*Module, error) {
|
||||
// For details on Module objects and their fields, see policy.go.
|
||||
// Empty input will return nil, nil.
|
||||
func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) {
|
||||
stmts, comments, err := ParseStatementsWithOpts(filename, input, popts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseModule(filename, stmts, comments, popts.RegoVersion)
|
||||
return v1.ParseModuleWithOpts(filename, input, setDefaultRegoVersion(popts))
|
||||
}
|
||||
|
||||
// ParseBody returns exactly one body.
|
||||
@@ -508,28 +205,7 @@ func ParseBody(input string) (Body, error) {
|
||||
// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own,
|
||||
// but respects whatever ParserOptions it's been given.
|
||||
func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) {
|
||||
|
||||
stmts, _, err := ParseStatementsWithOpts("", input, popts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := Body{}
|
||||
|
||||
for _, stmt := range stmts {
|
||||
switch stmt := stmt.(type) {
|
||||
case Body:
|
||||
for i := range stmt {
|
||||
result.Append(stmt[i])
|
||||
}
|
||||
case *Comment:
|
||||
// skip
|
||||
default:
|
||||
return nil, fmt.Errorf("expected body but got %T", stmt)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return v1.ParseBodyWithOpts(input, setDefaultRegoVersion(popts))
|
||||
}
|
||||
|
||||
// ParseExpr returns exactly one expression.
|
||||
@@ -548,15 +224,7 @@ func ParseExpr(input string) (*Expr, error) {
|
||||
// ParsePackage returns exactly one Package.
|
||||
// If multiple statements are parsed, an error is returned.
|
||||
func ParsePackage(input string) (*Package, error) {
|
||||
stmt, err := ParseStatement(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pkg, ok := stmt.(*Package)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected package but got %T", stmt)
|
||||
}
|
||||
return pkg, nil
|
||||
return v1.ParsePackage(input)
|
||||
}
|
||||
|
||||
// ParseTerm returns exactly one term.
|
||||
@@ -592,18 +260,7 @@ func ParseRef(input string) (Ref, error) {
|
||||
// ParseRuleWithOpts returns exactly one rule.
|
||||
// If multiple rules are parsed, an error is returned.
|
||||
func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) {
|
||||
stmts, _, err := ParseStatementsWithOpts("", input, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1])
|
||||
}
|
||||
rule, ok := stmts[0].(*Rule)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected rule but got %T", stmts[0])
|
||||
}
|
||||
return rule, nil
|
||||
return v1.ParseRuleWithOpts(input, setDefaultRegoVersion(opts))
|
||||
}
|
||||
|
||||
// ParseRule returns exactly one rule.
|
||||
@@ -628,14 +285,7 @@ func ParseStatement(input string) (Statement, error) {
|
||||
}
|
||||
|
||||
func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) {
|
||||
stmts, _, err := ParseStatementsWithOpts("", input, popts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stmts) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one statement")
|
||||
}
|
||||
return stmts[0], nil
|
||||
return v1.ParseStatementWithOpts(input, setDefaultRegoVersion(popts))
|
||||
}
|
||||
|
||||
// ParseStatements is deprecated. Use ParseStatementWithOpts instead.
|
||||
@@ -646,204 +296,15 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) {
|
||||
// ParseStatementsWithOpts returns a slice of parsed statements. This is the
|
||||
// default return value from the parser.
|
||||
func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) {
|
||||
|
||||
parser := NewParser().
|
||||
WithFilename(filename).
|
||||
WithReader(bytes.NewBufferString(input)).
|
||||
WithProcessAnnotation(popts.ProcessAnnotation).
|
||||
WithFutureKeywords(popts.FutureKeywords...).
|
||||
WithAllFutureKeywords(popts.AllFutureKeywords).
|
||||
WithCapabilities(popts.Capabilities).
|
||||
WithSkipRules(popts.SkipRules).
|
||||
WithJSONOptions(popts.JSONOptions).
|
||||
WithRegoVersion(popts.RegoVersion).
|
||||
withUnreleasedKeywords(popts.unreleasedKeywords)
|
||||
|
||||
stmts, comments, errs := parser.Parse()
|
||||
|
||||
if len(errs) > 0 {
|
||||
return nil, nil, errs
|
||||
}
|
||||
|
||||
return stmts, comments, nil
|
||||
}
|
||||
|
||||
func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) {
|
||||
|
||||
if len(stmts) == 0 {
|
||||
return nil, NewError(ParseErr, &Location{File: filename}, "empty module")
|
||||
}
|
||||
|
||||
var errs Errors
|
||||
|
||||
pkg, ok := stmts[0].(*Package)
|
||||
if !ok {
|
||||
loc := stmts[0].Loc()
|
||||
errs = append(errs, NewError(ParseErr, loc, "package expected"))
|
||||
}
|
||||
|
||||
mod := &Module{
|
||||
Package: pkg,
|
||||
stmts: stmts,
|
||||
}
|
||||
|
||||
// The comments slice only holds comments that were not their own statements.
|
||||
mod.Comments = append(mod.Comments, comments...)
|
||||
mod.regoVersion = regoCompatibilityMode
|
||||
|
||||
for i, stmt := range stmts[1:] {
|
||||
switch stmt := stmt.(type) {
|
||||
case *Import:
|
||||
mod.Imports = append(mod.Imports, stmt)
|
||||
if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 {
|
||||
mod.regoVersion = RegoV0CompatV1
|
||||
}
|
||||
case *Rule:
|
||||
setRuleModule(stmt, mod)
|
||||
mod.Rules = append(mod.Rules, stmt)
|
||||
case Body:
|
||||
rule, err := ParseRuleFromBody(mod, stmt)
|
||||
if err != nil {
|
||||
errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error()))
|
||||
continue
|
||||
}
|
||||
rule.generatedBody = true
|
||||
mod.Rules = append(mod.Rules, rule)
|
||||
|
||||
// NOTE(tsandall): the statement should now be interpreted as a
|
||||
// rule so update the statement list. This is important for the
|
||||
// logic below that associates annotations with statements.
|
||||
stmts[i+1] = rule
|
||||
case *Package:
|
||||
errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package"))
|
||||
case *Annotations:
|
||||
mod.Annotations = append(mod.Annotations, stmt)
|
||||
case *Comment:
|
||||
// Ignore comments, they're handled above.
|
||||
default:
|
||||
panic("illegal value") // Indicates grammar is out-of-sync with code.
|
||||
}
|
||||
}
|
||||
|
||||
if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 {
|
||||
for _, rule := range mod.Rules {
|
||||
for r := rule; r != nil; r = r.Else {
|
||||
errs = append(errs, CheckRegoV1(r)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
errs = append(errs, attachAnnotationsNodes(mod)...)
|
||||
|
||||
if len(errs) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
attachRuleAnnotations(mod)
|
||||
|
||||
return mod, nil
|
||||
}
|
||||
|
||||
func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool {
|
||||
for _, kw := range rule.Head.keywords {
|
||||
if kw == keyword {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newScopeAttachmentErr(a *Annotations, want string) *Error {
|
||||
var have string
|
||||
if a.node != nil {
|
||||
have = fmt.Sprintf(" (have %v)", TypeName(a.node))
|
||||
}
|
||||
return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have)
|
||||
}
|
||||
|
||||
func setRuleModule(rule *Rule, module *Module) {
|
||||
rule.Module = module
|
||||
if rule.Else != nil {
|
||||
setRuleModule(rule.Else, module)
|
||||
}
|
||||
}
|
||||
|
||||
func setJSONOptions(x interface{}, jsonOptions *astJSON.Options) {
|
||||
vis := NewGenericVisitor(func(x interface{}) bool {
|
||||
if x, ok := x.(customJSON); ok {
|
||||
x.setJSONOptions(*jsonOptions)
|
||||
}
|
||||
return false
|
||||
})
|
||||
vis.Walk(x)
|
||||
return v1.ParseStatementsWithOpts(filename, input, setDefaultRegoVersion(popts))
|
||||
}
|
||||
|
||||
// ParserErrorDetail holds additional details for parser errors.
|
||||
type ParserErrorDetail struct {
|
||||
Line string `json:"line"`
|
||||
Idx int `json:"idx"`
|
||||
}
|
||||
|
||||
func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail {
|
||||
|
||||
// Find first non-space character at or before offset position.
|
||||
if offset >= len(bs) {
|
||||
offset = len(bs) - 1
|
||||
} else if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
|
||||
for offset > 0 && unicode.IsSpace(rune(bs[offset])) {
|
||||
offset--
|
||||
}
|
||||
|
||||
// Find beginning of line containing offset.
|
||||
begin := offset
|
||||
|
||||
for begin > 0 && !isNewLineChar(bs[begin]) {
|
||||
begin--
|
||||
}
|
||||
|
||||
if isNewLineChar(bs[begin]) {
|
||||
begin++
|
||||
}
|
||||
|
||||
// Find end of line containing offset.
|
||||
end := offset
|
||||
|
||||
for end < len(bs) && !isNewLineChar(bs[end]) {
|
||||
end++
|
||||
}
|
||||
|
||||
if begin > end {
|
||||
begin = end
|
||||
}
|
||||
|
||||
// Extract line and compute index of offset byte in line.
|
||||
line := bs[begin:end]
|
||||
index := offset - begin
|
||||
|
||||
return &ParserErrorDetail{
|
||||
Line: string(line),
|
||||
Idx: index,
|
||||
}
|
||||
}
|
||||
|
||||
// Lines returns the pretty formatted line output for the error details.
|
||||
func (d ParserErrorDetail) Lines() []string {
|
||||
line := strings.TrimLeft(d.Line, "\t") // remove leading tabs
|
||||
tabCount := len(d.Line) - len(line)
|
||||
indent := d.Idx - tabCount
|
||||
if indent < 0 {
|
||||
indent = 0
|
||||
}
|
||||
return []string{line, strings.Repeat(" ", indent) + "^"}
|
||||
}
|
||||
|
||||
func isNewLineChar(b byte) bool {
|
||||
return b == '\r' || b == '\n'
|
||||
type ParserErrorDetail = v1.ParserErrorDetail
|
||||
|
||||
func setDefaultRegoVersion(opts ParserOptions) ParserOptions {
|
||||
if opts.RegoVersion == RegoUndefined {
|
||||
opts.RegoVersion = DefaultRegoVersion
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
1970
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
1970
vendor/github.com/open-policy-agent/opa/ast/policy.go
generated
vendored
File diff suppressed because it is too large
Load Diff
70
vendor/github.com/open-policy-agent/opa/ast/pretty.go
generated
vendored
70
vendor/github.com/open-policy-agent/opa/ast/pretty.go
generated
vendored
@@ -5,78 +5,14 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// Pretty writes a pretty representation of the AST rooted at x to w.
|
||||
//
|
||||
// This is function is intended for debug purposes when inspecting ASTs.
|
||||
func Pretty(w io.Writer, x interface{}) {
|
||||
pp := &prettyPrinter{
|
||||
depth: -1,
|
||||
w: w,
|
||||
}
|
||||
NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x)
|
||||
}
|
||||
|
||||
type prettyPrinter struct {
|
||||
depth int
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) Before(x interface{}) bool {
|
||||
switch x.(type) {
|
||||
case *Term:
|
||||
default:
|
||||
pp.depth++
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *Term:
|
||||
return false
|
||||
case Args:
|
||||
if len(x) == 0 {
|
||||
return false
|
||||
}
|
||||
pp.writeType(x)
|
||||
case *Expr:
|
||||
extras := []string{}
|
||||
if x.Negated {
|
||||
extras = append(extras, "negated")
|
||||
}
|
||||
extras = append(extras, fmt.Sprintf("index=%d", x.Index))
|
||||
pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " "))
|
||||
case Null, Boolean, Number, String, Var:
|
||||
pp.writeValue(x)
|
||||
default:
|
||||
pp.writeType(x)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) After(x interface{}) {
|
||||
switch x.(type) {
|
||||
case *Term:
|
||||
default:
|
||||
pp.depth--
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeValue(x interface{}) {
|
||||
pp.writeIndent(fmt.Sprint(x))
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeType(x interface{}) {
|
||||
pp.writeIndent(TypeName(x))
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
|
||||
pad := strings.Repeat(" ", pp.depth)
|
||||
pp.write(pad+f, a...)
|
||||
}
|
||||
|
||||
func (pp *prettyPrinter) write(f string, a ...interface{}) {
|
||||
fmt.Fprintf(pp.w, f+"\n", a...)
|
||||
v1.Pretty(w, x)
|
||||
}
|
||||
|
||||
52
vendor/github.com/open-policy-agent/opa/ast/schema.go
generated
vendored
52
vendor/github.com/open-policy-agent/opa/ast/schema.go
generated
vendored
@@ -5,59 +5,13 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// SchemaSet holds a map from a path to a schema.
|
||||
type SchemaSet struct {
|
||||
m *util.HashMap
|
||||
}
|
||||
type SchemaSet = v1.SchemaSet
|
||||
|
||||
// NewSchemaSet returns an empty SchemaSet.
|
||||
func NewSchemaSet() *SchemaSet {
|
||||
|
||||
eqFunc := func(a, b util.T) bool {
|
||||
return a.(Ref).Equal(b.(Ref))
|
||||
}
|
||||
|
||||
hashFunc := func(x util.T) int { return x.(Ref).Hash() }
|
||||
|
||||
return &SchemaSet{
|
||||
m: util.NewHashMap(eqFunc, hashFunc),
|
||||
}
|
||||
}
|
||||
|
||||
// Put inserts a raw schema into the set.
|
||||
func (ss *SchemaSet) Put(path Ref, raw interface{}) {
|
||||
ss.m.Put(path, raw)
|
||||
}
|
||||
|
||||
// Get returns the raw schema identified by the path.
|
||||
func (ss *SchemaSet) Get(path Ref) interface{} {
|
||||
if ss == nil {
|
||||
return nil
|
||||
}
|
||||
x, ok := ss.m.Get(path)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func loadSchema(raw interface{}, allowNet []string) (types.Type, error) {
|
||||
|
||||
jsonSchema, err := compileSchema(raw, allowNet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("type checking: %w", err)
|
||||
}
|
||||
|
||||
return tpe, nil
|
||||
return v1.NewSchemaSet()
|
||||
}
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/ast/strings.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/ast/strings.go
generated
vendored
@@ -5,14 +5,10 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// TypeName returns a human readable name for the AST element type.
|
||||
func TypeName(x interface{}) string {
|
||||
if _, ok := x.(*lazyObj); ok {
|
||||
return "object"
|
||||
}
|
||||
return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name())
|
||||
return v1.TypeName(x)
|
||||
}
|
||||
|
||||
3098
vendor/github.com/open-policy-agent/opa/ast/term.go
generated
vendored
3098
vendor/github.com/open-policy-agent/opa/ast/term.go
generated
vendored
File diff suppressed because it is too large
Load Diff
401
vendor/github.com/open-policy-agent/opa/ast/transform.go
generated
vendored
401
vendor/github.com/open-policy-agent/opa/ast/transform.go
generated
vendored
@@ -5,427 +5,42 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// Transformer defines the interface for transforming AST elements. If the
|
||||
// transformer returns nil and does not indicate an error, the AST element will
|
||||
// be set to nil and no transformations will be applied to children of the
|
||||
// element.
|
||||
type Transformer interface {
|
||||
Transform(interface{}) (interface{}, error)
|
||||
}
|
||||
type Transformer = v1.Transformer
|
||||
|
||||
// Transform iterates the AST and calls the Transform function on the
|
||||
// Transformer t for x before recursing.
|
||||
func Transform(t Transformer, x interface{}) (interface{}, error) {
|
||||
|
||||
if term, ok := x.(*Term); ok {
|
||||
return Transform(t, term.Value)
|
||||
}
|
||||
|
||||
y, err := t.Transform(x)
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
if y == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var ok bool
|
||||
switch y := y.(type) {
|
||||
case *Module:
|
||||
p, err := Transform(t, y.Package)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Package, ok = p.(*Package); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p)
|
||||
}
|
||||
for i := range y.Imports {
|
||||
imp, err := Transform(t, y.Imports[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Imports[i], ok = imp.(*Import); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp)
|
||||
}
|
||||
}
|
||||
for i := range y.Rules {
|
||||
rule, err := Transform(t, y.Rules[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Rules[i], ok = rule.(*Rule); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule)
|
||||
}
|
||||
}
|
||||
for i := range y.Annotations {
|
||||
a, err := Transform(t, y.Annotations[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Annotations[i], ok = a.(*Annotations); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a)
|
||||
}
|
||||
}
|
||||
for i := range y.Comments {
|
||||
comment, err := Transform(t, y.Comments[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Comments[i], ok = comment.(*Comment); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment)
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case *Package:
|
||||
ref, err := Transform(t, y.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Path, ok = ref.(Ref); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref)
|
||||
}
|
||||
return y, nil
|
||||
case *Import:
|
||||
y.Path, err = transformTerm(t, y.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Alias, err = transformVar(t, y.Alias); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case *Rule:
|
||||
if y.Head, err = transformHead(t, y.Head); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Body, err = transformBody(t, y.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Else != nil {
|
||||
rule, err := Transform(t, y.Else)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Else, ok = rule.(*Rule); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule)
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case *Head:
|
||||
if y.Reference, err = transformRef(t, y.Reference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Name, err = transformVar(t, y.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Args, err = transformArgs(t, y.Args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Key != nil {
|
||||
if y.Key, err = transformTerm(t, y.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if y.Value != nil {
|
||||
if y.Value, err = transformTerm(t, y.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case Args:
|
||||
for i := range y {
|
||||
if y[i], err = transformTerm(t, y[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case Body:
|
||||
for i, e := range y {
|
||||
e, err := Transform(t, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y[i], ok = e.(*Expr); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e)
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case *Expr:
|
||||
switch ts := y.Terms.(type) {
|
||||
case *SomeDecl:
|
||||
decl, err := Transform(t, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Terms, ok = decl.(*SomeDecl); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y, decl)
|
||||
}
|
||||
return y, nil
|
||||
case []*Term:
|
||||
for i := range ts {
|
||||
if ts[i], err = transformTerm(t, ts[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
case *Term:
|
||||
if y.Terms, err = transformTerm(t, ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *Every:
|
||||
if ts.Key != nil {
|
||||
ts.Key, err = transformTerm(t, ts.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ts.Value, err = transformTerm(t, ts.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts.Domain, err = transformTerm(t, ts.Domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts.Body, err = transformBody(t, ts.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
y.Terms = ts
|
||||
}
|
||||
for i, w := range y.With {
|
||||
w, err := Transform(t, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.With[i], ok = w.(*With); !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w)
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case *With:
|
||||
if y.Target, err = transformTerm(t, y.Target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Value, err = transformTerm(t, y.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case Ref:
|
||||
for i, term := range y {
|
||||
if y[i], err = transformTerm(t, term); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
case *object:
|
||||
return y.Map(func(k, v *Term) (*Term, *Term, error) {
|
||||
k, err := transformTerm(t, k)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
v, err = transformTerm(t, v)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return k, v, nil
|
||||
})
|
||||
case *Array:
|
||||
for i := 0; i < y.Len(); i++ {
|
||||
v, err := transformTerm(t, y.Elem(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
y.set(i, v)
|
||||
}
|
||||
return y, nil
|
||||
case Set:
|
||||
y, err = y.Map(func(term *Term) (*Term, error) {
|
||||
return transformTerm(t, term)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case *ArrayComprehension:
|
||||
if y.Term, err = transformTerm(t, y.Term); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Body, err = transformBody(t, y.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case *ObjectComprehension:
|
||||
if y.Key, err = transformTerm(t, y.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Value, err = transformTerm(t, y.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Body, err = transformBody(t, y.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case *SetComprehension:
|
||||
if y.Term, err = transformTerm(t, y.Term); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if y.Body, err = transformBody(t, y.Body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return y, nil
|
||||
case Call:
|
||||
for i := range y {
|
||||
if y[i], err = transformTerm(t, y[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return y, nil
|
||||
default:
|
||||
return y, nil
|
||||
}
|
||||
return v1.Transform(t, x)
|
||||
}
|
||||
|
||||
// TransformRefs calls the function f on all references under x.
|
||||
func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) {
|
||||
t := &GenericTransformer{func(x interface{}) (interface{}, error) {
|
||||
if r, ok := x.(Ref); ok {
|
||||
return f(r)
|
||||
}
|
||||
return x, nil
|
||||
}}
|
||||
return Transform(t, x)
|
||||
return v1.TransformRefs(x, f)
|
||||
}
|
||||
|
||||
// TransformVars calls the function f on all vars under x.
|
||||
func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) {
|
||||
t := &GenericTransformer{func(x interface{}) (interface{}, error) {
|
||||
if v, ok := x.(Var); ok {
|
||||
return f(v)
|
||||
}
|
||||
return x, nil
|
||||
}}
|
||||
return Transform(t, x)
|
||||
return v1.TransformVars(x, f)
|
||||
}
|
||||
|
||||
// TransformComprehensions calls the functio nf on all comprehensions under x.
|
||||
func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) {
|
||||
t := &GenericTransformer{func(x interface{}) (interface{}, error) {
|
||||
switch x := x.(type) {
|
||||
case *ArrayComprehension:
|
||||
return f(x)
|
||||
case *SetComprehension:
|
||||
return f(x)
|
||||
case *ObjectComprehension:
|
||||
return f(x)
|
||||
}
|
||||
return x, nil
|
||||
}}
|
||||
return Transform(t, x)
|
||||
return v1.TransformComprehensions(x, f)
|
||||
}
|
||||
|
||||
// GenericTransformer implements the Transformer interface to provide a utility
|
||||
// to transform AST nodes using a closure.
|
||||
type GenericTransformer struct {
|
||||
f func(interface{}) (interface{}, error)
|
||||
}
|
||||
type GenericTransformer = v1.GenericTransformer
|
||||
|
||||
// NewGenericTransformer returns a new GenericTransformer that will transform
|
||||
// AST nodes using the function f.
|
||||
func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer {
|
||||
return &GenericTransformer{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Transform calls the function f on the GenericTransformer.
|
||||
func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) {
|
||||
return t.f(x)
|
||||
}
|
||||
|
||||
func transformHead(t Transformer, head *Head) (*Head, error) {
|
||||
y, err := Transform(t, head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, ok := y.(*Head)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", head, y)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func transformArgs(t Transformer, args Args) (Args, error) {
|
||||
y, err := Transform(t, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a, ok := y.(Args)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", args, y)
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func transformBody(t Transformer, body Body) (Body, error) {
|
||||
y, err := Transform(t, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, ok := y.(Body)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", body, y)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func transformTerm(t Transformer, term *Term) (*Term, error) {
|
||||
v, err := transformValue(t, term.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &Term{
|
||||
Value: v,
|
||||
Location: term.Location,
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func transformValue(t Transformer, v Value) (Value, error) {
|
||||
v1, err := Transform(t, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, ok := v1.(Value)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", v, v1)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func transformVar(t Transformer, v Var) (Var, error) {
|
||||
v1, err := Transform(t, v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
r, ok := v1.(Var)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("illegal transform: %T != %T", v, v1)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func transformRef(t Transformer, r Ref) (Ref, error) {
|
||||
r1, err := Transform(t, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r2, ok := r1.(Ref)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("illegal transform: %T != %T", r, r2)
|
||||
}
|
||||
return r2, nil
|
||||
return v1.NewGenericTransformer(f)
|
||||
}
|
||||
|
||||
225
vendor/github.com/open-policy-agent/opa/ast/unify.go
generated
vendored
225
vendor/github.com/open-policy-agent/opa/ast/unify.go
generated
vendored
@@ -4,232 +4,11 @@
|
||||
|
||||
package ast
|
||||
|
||||
func isRefSafe(ref Ref, safe VarSet) bool {
|
||||
switch head := ref[0].Value.(type) {
|
||||
case Var:
|
||||
return safe.Contains(head)
|
||||
case Call:
|
||||
return isCallSafe(head, safe)
|
||||
default:
|
||||
for v := range ref[0].Vars() {
|
||||
if !safe.Contains(v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func isCallSafe(call Call, safe VarSet) bool {
|
||||
vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams)
|
||||
vis.Walk(call)
|
||||
unsafe := vis.Vars().Diff(safe)
|
||||
return len(unsafe) == 0
|
||||
}
|
||||
import v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
|
||||
// Unify returns a set of variables that will be unified when the equality expression defined by
|
||||
// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already
|
||||
// unified.
|
||||
func Unify(safe VarSet, a *Term, b *Term) VarSet {
|
||||
u := &unifier{
|
||||
safe: safe,
|
||||
unified: VarSet{},
|
||||
unknown: map[Var]VarSet{},
|
||||
}
|
||||
u.unify(a, b)
|
||||
return u.unified
|
||||
}
|
||||
|
||||
type unifier struct {
|
||||
safe VarSet
|
||||
unified VarSet
|
||||
unknown map[Var]VarSet
|
||||
}
|
||||
|
||||
func (u *unifier) isSafe(x Var) bool {
|
||||
return u.safe.Contains(x) || u.unified.Contains(x)
|
||||
}
|
||||
|
||||
func (u *unifier) unify(a *Term, b *Term) {
|
||||
|
||||
switch a := a.Value.(type) {
|
||||
|
||||
case Var:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
if u.isSafe(b) {
|
||||
u.markSafe(a)
|
||||
} else if u.isSafe(a) {
|
||||
u.markSafe(b)
|
||||
} else {
|
||||
u.markUnknown(a, b)
|
||||
u.markUnknown(b, a)
|
||||
}
|
||||
case *Array, Object:
|
||||
u.unifyAll(a, b)
|
||||
case Ref:
|
||||
if isRefSafe(b, u.safe) {
|
||||
u.markSafe(a)
|
||||
}
|
||||
case Call:
|
||||
if isCallSafe(b, u.safe) {
|
||||
u.markSafe(a)
|
||||
}
|
||||
default:
|
||||
u.markSafe(a)
|
||||
}
|
||||
|
||||
case Ref:
|
||||
if isRefSafe(a, u.safe) {
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
case *Array, Object:
|
||||
u.markAllSafe(b)
|
||||
}
|
||||
}
|
||||
|
||||
case Call:
|
||||
if isCallSafe(a, u.safe) {
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
case *Array, Object:
|
||||
u.markAllSafe(b)
|
||||
}
|
||||
}
|
||||
|
||||
case *ArrayComprehension:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
case *Array:
|
||||
u.markAllSafe(b)
|
||||
}
|
||||
case *ObjectComprehension:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
case *object:
|
||||
u.markAllSafe(b)
|
||||
}
|
||||
case *SetComprehension:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
}
|
||||
|
||||
case *Array:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.unifyAll(b, a)
|
||||
case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
|
||||
u.markAllSafe(a)
|
||||
case Ref:
|
||||
if isRefSafe(b, u.safe) {
|
||||
u.markAllSafe(a)
|
||||
}
|
||||
case Call:
|
||||
if isCallSafe(b, u.safe) {
|
||||
u.markAllSafe(a)
|
||||
}
|
||||
case *Array:
|
||||
if a.Len() == b.Len() {
|
||||
for i := 0; i < a.Len(); i++ {
|
||||
u.unify(a.Elem(i), b.Elem(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *object:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.unifyAll(b, a)
|
||||
case Ref:
|
||||
if isRefSafe(b, u.safe) {
|
||||
u.markAllSafe(a)
|
||||
}
|
||||
case Call:
|
||||
if isCallSafe(b, u.safe) {
|
||||
u.markAllSafe(a)
|
||||
}
|
||||
case *object:
|
||||
if a.Len() == b.Len() {
|
||||
_ = a.Iter(func(k, v *Term) error {
|
||||
if v2 := b.Get(k); v2 != nil {
|
||||
u.unify(v, v2)
|
||||
}
|
||||
return nil
|
||||
}) // impossible to return error
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
switch b := b.Value.(type) {
|
||||
case Var:
|
||||
u.markSafe(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unifier) markAllSafe(x Value) {
|
||||
vis := u.varVisitor()
|
||||
vis.Walk(x)
|
||||
for v := range vis.Vars() {
|
||||
u.markSafe(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unifier) markSafe(x Var) {
|
||||
u.unified.Add(x)
|
||||
|
||||
// Add dependencies of 'x' to safe set
|
||||
vs := u.unknown[x]
|
||||
delete(u.unknown, x)
|
||||
for v := range vs {
|
||||
u.markSafe(v)
|
||||
}
|
||||
|
||||
// Add dependants of 'x' to safe set if they have no more
|
||||
// dependencies.
|
||||
for v, deps := range u.unknown {
|
||||
if deps.Contains(x) {
|
||||
delete(deps, x)
|
||||
if len(deps) == 0 {
|
||||
u.markSafe(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unifier) markUnknown(a, b Var) {
|
||||
if _, ok := u.unknown[a]; !ok {
|
||||
u.unknown[a] = NewVarSet()
|
||||
}
|
||||
u.unknown[a].Add(b)
|
||||
}
|
||||
|
||||
func (u *unifier) unifyAll(a Var, b Value) {
|
||||
if u.isSafe(a) {
|
||||
u.markAllSafe(b)
|
||||
} else {
|
||||
vis := u.varVisitor()
|
||||
vis.Walk(b)
|
||||
unsafe := vis.Vars().Diff(u.safe).Diff(u.unified)
|
||||
if len(unsafe) == 0 {
|
||||
u.markSafe(a)
|
||||
} else {
|
||||
for v := range unsafe {
|
||||
u.markUnknown(a, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unifier) varVisitor() *VarVisitor {
|
||||
return NewVarVisitor().WithParams(VarVisitorParams{
|
||||
SkipRefHead: true,
|
||||
SkipObjectKeys: true,
|
||||
SkipClosures: true,
|
||||
})
|
||||
return v1.Unify(safe, a, b)
|
||||
}
|
||||
|
||||
89
vendor/github.com/open-policy-agent/opa/ast/varset.go
generated
vendored
89
vendor/github.com/open-policy-agent/opa/ast/varset.go
generated
vendored
@@ -5,96 +5,13 @@
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// VarSet represents a set of variables.
|
||||
type VarSet map[Var]struct{}
|
||||
type VarSet = v1.VarSet
|
||||
|
||||
// NewVarSet returns a new VarSet containing the specified variables.
|
||||
func NewVarSet(vs ...Var) VarSet {
|
||||
s := VarSet{}
|
||||
for _, v := range vs {
|
||||
s.Add(v)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Add updates the set to include the variable "v".
|
||||
func (s VarSet) Add(v Var) {
|
||||
s[v] = struct{}{}
|
||||
}
|
||||
|
||||
// Contains returns true if the set contains the variable "v".
|
||||
func (s VarSet) Contains(v Var) bool {
|
||||
_, ok := s[v]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Copy returns a shallow copy of the VarSet.
|
||||
func (s VarSet) Copy() VarSet {
|
||||
cpy := VarSet{}
|
||||
for v := range s {
|
||||
cpy.Add(v)
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
// Diff returns a VarSet containing variables in s that are not in vs.
|
||||
func (s VarSet) Diff(vs VarSet) VarSet {
|
||||
r := VarSet{}
|
||||
for v := range s {
|
||||
if !vs.Contains(v) {
|
||||
r.Add(v)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Equal returns true if s contains exactly the same elements as vs.
|
||||
func (s VarSet) Equal(vs VarSet) bool {
|
||||
if len(s.Diff(vs)) > 0 {
|
||||
return false
|
||||
}
|
||||
return len(vs.Diff(s)) == 0
|
||||
}
|
||||
|
||||
// Intersect returns a VarSet containing variables in s that are in vs.
|
||||
func (s VarSet) Intersect(vs VarSet) VarSet {
|
||||
r := VarSet{}
|
||||
for v := range s {
|
||||
if vs.Contains(v) {
|
||||
r.Add(v)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Sorted returns a sorted slice of vars from s.
|
||||
func (s VarSet) Sorted() []Var {
|
||||
sorted := make([]Var, 0, len(s))
|
||||
for v := range s {
|
||||
sorted = append(sorted, v)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].Compare(sorted[j]) < 0
|
||||
})
|
||||
return sorted
|
||||
}
|
||||
|
||||
// Update merges the other VarSet into this VarSet.
|
||||
func (s VarSet) Update(vs VarSet) {
|
||||
for v := range vs {
|
||||
s.Add(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (s VarSet) String() string {
|
||||
tmp := make([]string, 0, len(s))
|
||||
for v := range s {
|
||||
tmp = append(tmp, string(v))
|
||||
}
|
||||
sort.Strings(tmp)
|
||||
return fmt.Sprintf("%v", tmp)
|
||||
return v1.NewVarSet(vs...)
|
||||
}
|
||||
|
||||
704
vendor/github.com/open-policy-agent/opa/ast/visit.go
generated
vendored
704
vendor/github.com/open-policy-agent/opa/ast/visit.go
generated
vendored
@@ -4,780 +4,120 @@
|
||||
|
||||
package ast
|
||||
|
||||
import v1 "github.com/open-policy-agent/opa/v1/ast"
|
||||
|
||||
// Visitor defines the interface for iterating AST elements. The Visit function
|
||||
// can return a Visitor w which will be used to visit the children of the AST
|
||||
// element v. If the Visit function returns nil, the children will not be
|
||||
// visited.
|
||||
// Deprecated: use GenericVisitor or another visitor implementation
|
||||
type Visitor interface {
|
||||
Visit(v interface{}) (w Visitor)
|
||||
}
|
||||
type Visitor = v1.Visitor
|
||||
|
||||
// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before
|
||||
// and after the AST has been visited.
|
||||
// Deprecated: use GenericVisitor or another visitor implementation
|
||||
type BeforeAndAfterVisitor interface {
|
||||
Visitor
|
||||
Before(x interface{})
|
||||
After(x interface{})
|
||||
}
|
||||
type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor
|
||||
|
||||
// Walk iterates the AST by calling the Visit function on the Visitor
|
||||
// v for x before recursing.
|
||||
// Deprecated: use GenericVisitor.Walk
|
||||
func Walk(v Visitor, x interface{}) {
|
||||
if bav, ok := v.(BeforeAndAfterVisitor); !ok {
|
||||
walk(v, x)
|
||||
} else {
|
||||
bav.Before(x)
|
||||
defer bav.After(x)
|
||||
walk(bav, x)
|
||||
}
|
||||
v1.Walk(v, x)
|
||||
}
|
||||
|
||||
// WalkBeforeAndAfter iterates the AST by calling the Visit function on the
|
||||
// Visitor v for x before recursing.
|
||||
// Deprecated: use GenericVisitor.Walk
|
||||
func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) {
|
||||
Walk(v, x)
|
||||
}
|
||||
|
||||
func walk(v Visitor, x interface{}) {
|
||||
w := v.Visit(x)
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
switch x := x.(type) {
|
||||
case *Module:
|
||||
Walk(w, x.Package)
|
||||
for i := range x.Imports {
|
||||
Walk(w, x.Imports[i])
|
||||
}
|
||||
for i := range x.Rules {
|
||||
Walk(w, x.Rules[i])
|
||||
}
|
||||
for i := range x.Annotations {
|
||||
Walk(w, x.Annotations[i])
|
||||
}
|
||||
for i := range x.Comments {
|
||||
Walk(w, x.Comments[i])
|
||||
}
|
||||
case *Package:
|
||||
Walk(w, x.Path)
|
||||
case *Import:
|
||||
Walk(w, x.Path)
|
||||
Walk(w, x.Alias)
|
||||
case *Rule:
|
||||
Walk(w, x.Head)
|
||||
Walk(w, x.Body)
|
||||
if x.Else != nil {
|
||||
Walk(w, x.Else)
|
||||
}
|
||||
case *Head:
|
||||
Walk(w, x.Name)
|
||||
Walk(w, x.Args)
|
||||
if x.Key != nil {
|
||||
Walk(w, x.Key)
|
||||
}
|
||||
if x.Value != nil {
|
||||
Walk(w, x.Value)
|
||||
}
|
||||
case Body:
|
||||
for i := range x {
|
||||
Walk(w, x[i])
|
||||
}
|
||||
case Args:
|
||||
for i := range x {
|
||||
Walk(w, x[i])
|
||||
}
|
||||
case *Expr:
|
||||
switch ts := x.Terms.(type) {
|
||||
case *Term, *SomeDecl, *Every:
|
||||
Walk(w, ts)
|
||||
case []*Term:
|
||||
for i := range ts {
|
||||
Walk(w, ts[i])
|
||||
}
|
||||
}
|
||||
for i := range x.With {
|
||||
Walk(w, x.With[i])
|
||||
}
|
||||
case *With:
|
||||
Walk(w, x.Target)
|
||||
Walk(w, x.Value)
|
||||
case *Term:
|
||||
Walk(w, x.Value)
|
||||
case Ref:
|
||||
for i := range x {
|
||||
Walk(w, x[i])
|
||||
}
|
||||
case *object:
|
||||
x.Foreach(func(k, vv *Term) {
|
||||
Walk(w, k)
|
||||
Walk(w, vv)
|
||||
})
|
||||
case *Array:
|
||||
x.Foreach(func(t *Term) {
|
||||
Walk(w, t)
|
||||
})
|
||||
case Set:
|
||||
x.Foreach(func(t *Term) {
|
||||
Walk(w, t)
|
||||
})
|
||||
case *ArrayComprehension:
|
||||
Walk(w, x.Term)
|
||||
Walk(w, x.Body)
|
||||
case *ObjectComprehension:
|
||||
Walk(w, x.Key)
|
||||
Walk(w, x.Value)
|
||||
Walk(w, x.Body)
|
||||
case *SetComprehension:
|
||||
Walk(w, x.Term)
|
||||
Walk(w, x.Body)
|
||||
case Call:
|
||||
for i := range x {
|
||||
Walk(w, x[i])
|
||||
}
|
||||
case *Every:
|
||||
if x.Key != nil {
|
||||
Walk(w, x.Key)
|
||||
}
|
||||
Walk(w, x.Value)
|
||||
Walk(w, x.Domain)
|
||||
Walk(w, x.Body)
|
||||
case *SomeDecl:
|
||||
for i := range x.Symbols {
|
||||
Walk(w, x.Symbols[i])
|
||||
}
|
||||
}
|
||||
v1.WalkBeforeAndAfter(v, x)
|
||||
}
|
||||
|
||||
// WalkVars calls the function f on all vars under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkVars(x interface{}, f func(Var) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if v, ok := x.(Var); ok {
|
||||
return f(v)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkVars(x, f)
|
||||
}
|
||||
|
||||
// WalkClosures calls the function f on all closures under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkClosures(x interface{}, f func(interface{}) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
switch x := x.(type) {
|
||||
case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every:
|
||||
return f(x)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkClosures(x, f)
|
||||
}
|
||||
|
||||
// WalkRefs calls the function f on all references under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkRefs(x interface{}, f func(Ref) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if r, ok := x.(Ref); ok {
|
||||
return f(r)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkRefs(x, f)
|
||||
}
|
||||
|
||||
// WalkTerms calls the function f on all terms under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkTerms(x interface{}, f func(*Term) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if term, ok := x.(*Term); ok {
|
||||
return f(term)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkTerms(x, f)
|
||||
}
|
||||
|
||||
// WalkWiths calls the function f on all with modifiers under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkWiths(x interface{}, f func(*With) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if w, ok := x.(*With); ok {
|
||||
return f(w)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkWiths(x, f)
|
||||
}
|
||||
|
||||
// WalkExprs calls the function f on all expressions under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkExprs(x interface{}, f func(*Expr) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if r, ok := x.(*Expr); ok {
|
||||
return f(r)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkExprs(x, f)
|
||||
}
|
||||
|
||||
// WalkBodies calls the function f on all bodies under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkBodies(x interface{}, f func(Body) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if b, ok := x.(Body); ok {
|
||||
return f(b)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkBodies(x, f)
|
||||
}
|
||||
|
||||
// WalkRules calls the function f on all rules under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkRules(x interface{}, f func(*Rule) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if r, ok := x.(*Rule); ok {
|
||||
stop := f(r)
|
||||
// NOTE(tsandall): since rules cannot be embedded inside of queries
|
||||
// we can stop early if there is no else block.
|
||||
if stop || r.Else == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkRules(x, f)
|
||||
}
|
||||
|
||||
// WalkNodes calls the function f on all nodes under x. If the function f
|
||||
// returns true, AST nodes under the last node will not be visited.
|
||||
func WalkNodes(x interface{}, f func(Node) bool) {
|
||||
vis := &GenericVisitor{func(x interface{}) bool {
|
||||
if n, ok := x.(Node); ok {
|
||||
return f(n)
|
||||
}
|
||||
return false
|
||||
}}
|
||||
vis.Walk(x)
|
||||
v1.WalkNodes(x, f)
|
||||
}
|
||||
|
||||
// GenericVisitor provides a utility to walk over AST nodes using a
|
||||
// closure. If the closure returns true, the visitor will not walk
|
||||
// over AST nodes under x.
|
||||
type GenericVisitor struct {
|
||||
f func(x interface{}) bool
|
||||
}
|
||||
type GenericVisitor = v1.GenericVisitor
|
||||
|
||||
// NewGenericVisitor returns a new GenericVisitor that will invoke the function
|
||||
// f on AST nodes.
|
||||
func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor {
|
||||
return &GenericVisitor{f}
|
||||
}
|
||||
|
||||
// Walk iterates the AST by calling the function f on the
|
||||
// GenericVisitor before recursing. Contrary to the generic Walk, this
|
||||
// does not require allocating the visitor from heap.
|
||||
func (vis *GenericVisitor) Walk(x interface{}) {
|
||||
if vis.f(x) {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *Module:
|
||||
vis.Walk(x.Package)
|
||||
for i := range x.Imports {
|
||||
vis.Walk(x.Imports[i])
|
||||
}
|
||||
for i := range x.Rules {
|
||||
vis.Walk(x.Rules[i])
|
||||
}
|
||||
for i := range x.Annotations {
|
||||
vis.Walk(x.Annotations[i])
|
||||
}
|
||||
for i := range x.Comments {
|
||||
vis.Walk(x.Comments[i])
|
||||
}
|
||||
case *Package:
|
||||
vis.Walk(x.Path)
|
||||
case *Import:
|
||||
vis.Walk(x.Path)
|
||||
vis.Walk(x.Alias)
|
||||
case *Rule:
|
||||
vis.Walk(x.Head)
|
||||
vis.Walk(x.Body)
|
||||
if x.Else != nil {
|
||||
vis.Walk(x.Else)
|
||||
}
|
||||
case *Head:
|
||||
vis.Walk(x.Name)
|
||||
vis.Walk(x.Args)
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
if x.Value != nil {
|
||||
vis.Walk(x.Value)
|
||||
}
|
||||
case Body:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case Args:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Expr:
|
||||
switch ts := x.Terms.(type) {
|
||||
case *Term, *SomeDecl, *Every:
|
||||
vis.Walk(ts)
|
||||
case []*Term:
|
||||
for i := range ts {
|
||||
vis.Walk(ts[i])
|
||||
}
|
||||
}
|
||||
for i := range x.With {
|
||||
vis.Walk(x.With[i])
|
||||
}
|
||||
case *With:
|
||||
vis.Walk(x.Target)
|
||||
vis.Walk(x.Value)
|
||||
case *Term:
|
||||
vis.Walk(x.Value)
|
||||
case Ref:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *object:
|
||||
x.Foreach(func(k, _ *Term) {
|
||||
vis.Walk(k)
|
||||
vis.Walk(x.Get(k))
|
||||
})
|
||||
case Object:
|
||||
x.Foreach(func(k, _ *Term) {
|
||||
vis.Walk(k)
|
||||
vis.Walk(x.Get(k))
|
||||
})
|
||||
case *Array:
|
||||
x.Foreach(func(t *Term) {
|
||||
vis.Walk(t)
|
||||
})
|
||||
case Set:
|
||||
xSlice := x.Slice()
|
||||
for i := range xSlice {
|
||||
vis.Walk(xSlice[i])
|
||||
}
|
||||
case *ArrayComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case *ObjectComprehension:
|
||||
vis.Walk(x.Key)
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Body)
|
||||
case *SetComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case Call:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Every:
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Domain)
|
||||
vis.Walk(x.Body)
|
||||
case *SomeDecl:
|
||||
for i := range x.Symbols {
|
||||
vis.Walk(x.Symbols[i])
|
||||
}
|
||||
}
|
||||
return v1.NewGenericVisitor(f)
|
||||
}
|
||||
|
||||
// BeforeAfterVisitor provides a utility to walk over AST nodes using
|
||||
// closures. If the before closure returns true, the visitor will not
|
||||
// walk over AST nodes under x. The after closure is invoked always
|
||||
// after visiting a node.
|
||||
type BeforeAfterVisitor struct {
|
||||
before func(x interface{}) bool
|
||||
after func(x interface{})
|
||||
}
|
||||
type BeforeAfterVisitor = v1.BeforeAfterVisitor
|
||||
|
||||
// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that
|
||||
// will invoke the functions before and after AST nodes.
|
||||
func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor {
|
||||
return &BeforeAfterVisitor{before, after}
|
||||
}
|
||||
|
||||
// Walk iterates the AST by calling the functions on the
|
||||
// BeforeAndAfterVisitor before and after recursing. Contrary to the
|
||||
// generic Walk, this does not require allocating the visitor from
|
||||
// heap.
|
||||
func (vis *BeforeAfterVisitor) Walk(x interface{}) {
|
||||
defer vis.after(x)
|
||||
if vis.before(x) {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *Module:
|
||||
vis.Walk(x.Package)
|
||||
for i := range x.Imports {
|
||||
vis.Walk(x.Imports[i])
|
||||
}
|
||||
for i := range x.Rules {
|
||||
vis.Walk(x.Rules[i])
|
||||
}
|
||||
for i := range x.Annotations {
|
||||
vis.Walk(x.Annotations[i])
|
||||
}
|
||||
for i := range x.Comments {
|
||||
vis.Walk(x.Comments[i])
|
||||
}
|
||||
case *Package:
|
||||
vis.Walk(x.Path)
|
||||
case *Import:
|
||||
vis.Walk(x.Path)
|
||||
vis.Walk(x.Alias)
|
||||
case *Rule:
|
||||
vis.Walk(x.Head)
|
||||
vis.Walk(x.Body)
|
||||
if x.Else != nil {
|
||||
vis.Walk(x.Else)
|
||||
}
|
||||
case *Head:
|
||||
if len(x.Reference) > 0 {
|
||||
vis.Walk(x.Reference)
|
||||
} else {
|
||||
vis.Walk(x.Name)
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
}
|
||||
vis.Walk(x.Args)
|
||||
if x.Value != nil {
|
||||
vis.Walk(x.Value)
|
||||
}
|
||||
case Body:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case Args:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Expr:
|
||||
switch ts := x.Terms.(type) {
|
||||
case *Term, *SomeDecl, *Every:
|
||||
vis.Walk(ts)
|
||||
case []*Term:
|
||||
for i := range ts {
|
||||
vis.Walk(ts[i])
|
||||
}
|
||||
}
|
||||
for i := range x.With {
|
||||
vis.Walk(x.With[i])
|
||||
}
|
||||
case *With:
|
||||
vis.Walk(x.Target)
|
||||
vis.Walk(x.Value)
|
||||
case *Term:
|
||||
vis.Walk(x.Value)
|
||||
case Ref:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *object:
|
||||
x.Foreach(func(k, _ *Term) {
|
||||
vis.Walk(k)
|
||||
vis.Walk(x.Get(k))
|
||||
})
|
||||
case Object:
|
||||
x.Foreach(func(k, _ *Term) {
|
||||
vis.Walk(k)
|
||||
vis.Walk(x.Get(k))
|
||||
})
|
||||
case *Array:
|
||||
x.Foreach(func(t *Term) {
|
||||
vis.Walk(t)
|
||||
})
|
||||
case Set:
|
||||
xSlice := x.Slice()
|
||||
for i := range xSlice {
|
||||
vis.Walk(xSlice[i])
|
||||
}
|
||||
case *ArrayComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case *ObjectComprehension:
|
||||
vis.Walk(x.Key)
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Body)
|
||||
case *SetComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case Call:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Every:
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Domain)
|
||||
vis.Walk(x.Body)
|
||||
case *SomeDecl:
|
||||
for i := range x.Symbols {
|
||||
vis.Walk(x.Symbols[i])
|
||||
}
|
||||
}
|
||||
return v1.NewBeforeAfterVisitor(before, after)
|
||||
}
|
||||
|
||||
// VarVisitor walks AST nodes under a given node and collects all encountered
|
||||
// variables. The collected variables can be controlled by specifying
|
||||
// VarVisitorParams when creating the visitor.
|
||||
type VarVisitor struct {
|
||||
params VarVisitorParams
|
||||
vars VarSet
|
||||
}
|
||||
type VarVisitor = v1.VarVisitor
|
||||
|
||||
// VarVisitorParams contains settings for a VarVisitor.
|
||||
type VarVisitorParams struct {
|
||||
SkipRefHead bool
|
||||
SkipRefCallHead bool
|
||||
SkipObjectKeys bool
|
||||
SkipClosures bool
|
||||
SkipWithTarget bool
|
||||
SkipSets bool
|
||||
}
|
||||
type VarVisitorParams = v1.VarVisitorParams
|
||||
|
||||
// NewVarVisitor returns a new VarVisitor object.
|
||||
func NewVarVisitor() *VarVisitor {
|
||||
return &VarVisitor{
|
||||
vars: NewVarSet(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithParams sets the parameters in params on vis.
|
||||
func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor {
|
||||
vis.params = params
|
||||
return vis
|
||||
}
|
||||
|
||||
// Vars returns a VarSet that contains collected vars.
|
||||
func (vis *VarVisitor) Vars() VarSet {
|
||||
return vis.vars
|
||||
}
|
||||
|
||||
// visit determines if the VarVisitor will recurse into x: if it returns `true`,
|
||||
// the visitor will _skip_ that branch of the AST
|
||||
func (vis *VarVisitor) visit(v interface{}) bool {
|
||||
if vis.params.SkipObjectKeys {
|
||||
if o, ok := v.(Object); ok {
|
||||
o.Foreach(func(_, v *Term) {
|
||||
vis.Walk(v)
|
||||
})
|
||||
return true
|
||||
}
|
||||
}
|
||||
if vis.params.SkipRefHead {
|
||||
if r, ok := v.(Ref); ok {
|
||||
rSlice := r[1:]
|
||||
for i := range rSlice {
|
||||
vis.Walk(rSlice[i])
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
if vis.params.SkipClosures {
|
||||
switch v := v.(type) {
|
||||
case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
|
||||
return true
|
||||
case *Expr:
|
||||
if ev, ok := v.Terms.(*Every); ok {
|
||||
vis.Walk(ev.Domain)
|
||||
// We're _not_ walking ev.Body -- that's the closure here
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if vis.params.SkipWithTarget {
|
||||
if v, ok := v.(*With); ok {
|
||||
vis.Walk(v.Value)
|
||||
return true
|
||||
}
|
||||
}
|
||||
if vis.params.SkipSets {
|
||||
if _, ok := v.(Set); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if vis.params.SkipRefCallHead {
|
||||
switch v := v.(type) {
|
||||
case *Expr:
|
||||
if terms, ok := v.Terms.([]*Term); ok {
|
||||
termSlice := terms[0].Value.(Ref)[1:]
|
||||
for i := range termSlice {
|
||||
vis.Walk(termSlice[i])
|
||||
}
|
||||
for i := 1; i < len(terms); i++ {
|
||||
vis.Walk(terms[i])
|
||||
}
|
||||
for i := range v.With {
|
||||
vis.Walk(v.With[i])
|
||||
}
|
||||
return true
|
||||
}
|
||||
case Call:
|
||||
operator := v[0].Value.(Ref)
|
||||
for i := 1; i < len(operator); i++ {
|
||||
vis.Walk(operator[i])
|
||||
}
|
||||
for i := 1; i < len(v); i++ {
|
||||
vis.Walk(v[i])
|
||||
}
|
||||
return true
|
||||
case *With:
|
||||
if ref, ok := v.Target.Value.(Ref); ok {
|
||||
refSlice := ref[1:]
|
||||
for i := range refSlice {
|
||||
vis.Walk(refSlice[i])
|
||||
}
|
||||
}
|
||||
if ref, ok := v.Value.Value.(Ref); ok {
|
||||
refSlice := ref[1:]
|
||||
for i := range refSlice {
|
||||
vis.Walk(refSlice[i])
|
||||
}
|
||||
} else {
|
||||
vis.Walk(v.Value)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
if v, ok := v.(Var); ok {
|
||||
vis.vars.Add(v)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Walk iterates the AST by calling the function f on the
|
||||
// GenericVisitor before recursing. Contrary to the generic Walk, this
|
||||
// does not require allocating the visitor from heap.
|
||||
func (vis *VarVisitor) Walk(x interface{}) {
|
||||
if vis.visit(x) {
|
||||
return
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *Module:
|
||||
vis.Walk(x.Package)
|
||||
for i := range x.Imports {
|
||||
vis.Walk(x.Imports[i])
|
||||
}
|
||||
for i := range x.Rules {
|
||||
vis.Walk(x.Rules[i])
|
||||
}
|
||||
for i := range x.Comments {
|
||||
vis.Walk(x.Comments[i])
|
||||
}
|
||||
case *Package:
|
||||
vis.Walk(x.Path)
|
||||
case *Import:
|
||||
vis.Walk(x.Path)
|
||||
vis.Walk(x.Alias)
|
||||
case *Rule:
|
||||
vis.Walk(x.Head)
|
||||
vis.Walk(x.Body)
|
||||
if x.Else != nil {
|
||||
vis.Walk(x.Else)
|
||||
}
|
||||
case *Head:
|
||||
if len(x.Reference) > 0 {
|
||||
vis.Walk(x.Reference)
|
||||
} else {
|
||||
vis.Walk(x.Name)
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
}
|
||||
vis.Walk(x.Args)
|
||||
|
||||
if x.Value != nil {
|
||||
vis.Walk(x.Value)
|
||||
}
|
||||
case Body:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case Args:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Expr:
|
||||
switch ts := x.Terms.(type) {
|
||||
case *Term, *SomeDecl, *Every:
|
||||
vis.Walk(ts)
|
||||
case []*Term:
|
||||
for i := range ts {
|
||||
vis.Walk(ts[i])
|
||||
}
|
||||
}
|
||||
for i := range x.With {
|
||||
vis.Walk(x.With[i])
|
||||
}
|
||||
case *With:
|
||||
vis.Walk(x.Target)
|
||||
vis.Walk(x.Value)
|
||||
case *Term:
|
||||
vis.Walk(x.Value)
|
||||
case Ref:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *object:
|
||||
x.Foreach(func(k, _ *Term) {
|
||||
vis.Walk(k)
|
||||
vis.Walk(x.Get(k))
|
||||
})
|
||||
case *Array:
|
||||
x.Foreach(func(t *Term) {
|
||||
vis.Walk(t)
|
||||
})
|
||||
case Set:
|
||||
xSlice := x.Slice()
|
||||
for i := range xSlice {
|
||||
vis.Walk(xSlice[i])
|
||||
}
|
||||
case *ArrayComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case *ObjectComprehension:
|
||||
vis.Walk(x.Key)
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Body)
|
||||
case *SetComprehension:
|
||||
vis.Walk(x.Term)
|
||||
vis.Walk(x.Body)
|
||||
case Call:
|
||||
for i := range x {
|
||||
vis.Walk(x[i])
|
||||
}
|
||||
case *Every:
|
||||
if x.Key != nil {
|
||||
vis.Walk(x.Key)
|
||||
}
|
||||
vis.Walk(x.Value)
|
||||
vis.Walk(x.Domain)
|
||||
vis.Walk(x.Body)
|
||||
case *SomeDecl:
|
||||
for i := range x.Symbols {
|
||||
vis.Walk(x.Symbols[i])
|
||||
}
|
||||
}
|
||||
return v1.NewVarVisitor()
|
||||
}
|
||||
|
||||
1688
vendor/github.com/open-policy-agent/opa/bundle/bundle.go
generated
vendored
1688
vendor/github.com/open-policy-agent/opa/bundle/bundle.go
generated
vendored
File diff suppressed because it is too large
Load Diff
8
vendor/github.com/open-policy-agent/opa/bundle/doc.go
generated
vendored
Normal file
8
vendor/github.com/open-policy-agent/opa/bundle/doc.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
|
||||
// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
|
||||
// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
|
||||
package bundle
|
||||
482
vendor/github.com/open-policy-agent/opa/bundle/file.go
generated
vendored
482
vendor/github.com/open-policy-agent/opa/bundle/file.go
generated
vendored
@@ -1,508 +1,50 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/open-policy-agent/opa/loader/filter"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)"
|
||||
|
||||
// Descriptor contains information about a file and
|
||||
// can be used to read the file contents.
|
||||
type Descriptor struct {
|
||||
url string
|
||||
path string
|
||||
reader io.Reader
|
||||
closer io.Closer
|
||||
closeOnce *sync.Once
|
||||
}
|
||||
|
||||
// lazyFile defers reading the file until the first call of Read
|
||||
type lazyFile struct {
|
||||
path string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
// newLazyFile creates a new instance of lazyFile
|
||||
func newLazyFile(path string) *lazyFile {
|
||||
return &lazyFile{path: path}
|
||||
}
|
||||
|
||||
// Read implements io.Reader. It will check if the file has been opened
|
||||
// and open it if it has not before attempting to read using the file's
|
||||
// read method
|
||||
func (f *lazyFile) Read(b []byte) (int, error) {
|
||||
var err error
|
||||
|
||||
if f.file == nil {
|
||||
if f.file, err = os.Open(f.path); err != nil {
|
||||
return 0, fmt.Errorf("failed to open file %s: %w", f.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.file.Read(b)
|
||||
}
|
||||
|
||||
// Close closes the lazy file if it has been opened using the file's
|
||||
// close method
|
||||
func (f *lazyFile) Close() error {
|
||||
if f.file != nil {
|
||||
return f.file.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
type Descriptor = v1.Descriptor
|
||||
|
||||
func NewDescriptor(url, path string, reader io.Reader) *Descriptor {
|
||||
return &Descriptor{
|
||||
url: url,
|
||||
path: path,
|
||||
reader: reader,
|
||||
}
|
||||
return v1.NewDescriptor(url, path, reader)
|
||||
}
|
||||
|
||||
func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor {
|
||||
d.closer = closer
|
||||
d.closeOnce = new(sync.Once)
|
||||
return d
|
||||
}
|
||||
|
||||
// Path returns the path of the file.
|
||||
func (d *Descriptor) Path() string {
|
||||
return d.path
|
||||
}
|
||||
|
||||
// URL returns the url of the file.
|
||||
func (d *Descriptor) URL() string {
|
||||
return d.url
|
||||
}
|
||||
|
||||
// Read will read all the contents from the file the Descriptor refers to
|
||||
// into the dest writer up n bytes. Will return an io.EOF error
|
||||
// if EOF is encountered before n bytes are read.
|
||||
func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) {
|
||||
n, err := io.CopyN(dest, d.reader, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close the file, on some Loader implementations this might be a no-op.
|
||||
// It should *always* be called regardless of file.
|
||||
func (d *Descriptor) Close() error {
|
||||
var err error
|
||||
if d.closer != nil {
|
||||
d.closeOnce.Do(func() {
|
||||
err = d.closer.Close()
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type PathFormat int64
|
||||
type PathFormat = v1.PathFormat
|
||||
|
||||
const (
|
||||
Chrooted PathFormat = iota
|
||||
SlashRooted
|
||||
Passthrough
|
||||
Chrooted = v1.Chrooted
|
||||
SlashRooted = v1.SlashRooted
|
||||
Passthrough = v1.Passthrough
|
||||
)
|
||||
|
||||
// DirectoryLoader defines an interface which can be used to load
|
||||
// files from a directory by iterating over each one in the tree.
|
||||
type DirectoryLoader interface {
|
||||
// NextFile must return io.EOF if there is no next value. The returned
|
||||
// descriptor should *always* be closed when no longer needed.
|
||||
NextFile() (*Descriptor, error)
|
||||
WithFilter(filter filter.LoaderFilter) DirectoryLoader
|
||||
WithPathFormat(PathFormat) DirectoryLoader
|
||||
WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader
|
||||
WithFollowSymlinks(followSymlinks bool) DirectoryLoader
|
||||
}
|
||||
|
||||
type dirLoader struct {
|
||||
root string
|
||||
files []string
|
||||
idx int
|
||||
filter filter.LoaderFilter
|
||||
pathFormat PathFormat
|
||||
maxSizeLimitBytes int64
|
||||
followSymlinks bool
|
||||
}
|
||||
|
||||
// Normalize root directory, ex "./src/bundle" -> "src/bundle"
|
||||
// We don't need an absolute path, but this makes the joined/trimmed
|
||||
// paths more uniform.
|
||||
func normalizeRootDirectory(root string) string {
|
||||
if len(root) > 1 {
|
||||
if root[0] == '.' && root[1] == filepath.Separator {
|
||||
if len(root) == 2 {
|
||||
root = root[:1] // "./" -> "."
|
||||
} else {
|
||||
root = root[2:] // remove leading "./"
|
||||
}
|
||||
}
|
||||
}
|
||||
return root
|
||||
}
|
||||
type DirectoryLoader = v1.DirectoryLoader
|
||||
|
||||
// NewDirectoryLoader returns a basic DirectoryLoader implementation
|
||||
// that will load files from a given root directory path.
|
||||
func NewDirectoryLoader(root string) DirectoryLoader {
|
||||
d := dirLoader{
|
||||
root: normalizeRootDirectory(root),
|
||||
pathFormat: Chrooted,
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
// WithFilter specifies the filter object to use to filter files while loading bundles
|
||||
func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
|
||||
d.filter = filter
|
||||
return d
|
||||
}
|
||||
|
||||
// WithPathFormat specifies how a path is formatted in a Descriptor
|
||||
func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
|
||||
d.pathFormat = pathFormat
|
||||
return d
|
||||
}
|
||||
|
||||
// WithSizeLimitBytes specifies the maximum size of any file in the directory to read
|
||||
func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
|
||||
d.maxSizeLimitBytes = sizeLimitBytes
|
||||
return d
|
||||
}
|
||||
|
||||
// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory
|
||||
func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
|
||||
d.followSymlinks = followSymlinks
|
||||
return d
|
||||
}
|
||||
|
||||
func formatPath(fileName string, root string, pathFormat PathFormat) string {
|
||||
switch pathFormat {
|
||||
case SlashRooted:
|
||||
if !strings.HasPrefix(fileName, string(filepath.Separator)) {
|
||||
return string(filepath.Separator) + fileName
|
||||
}
|
||||
return fileName
|
||||
case Chrooted:
|
||||
// Trim off the root directory and return path as if chrooted
|
||||
result := strings.TrimPrefix(fileName, filepath.FromSlash(root))
|
||||
if root == "." && filepath.Base(fileName) == ManifestExt {
|
||||
result = fileName
|
||||
}
|
||||
if !strings.HasPrefix(result, string(filepath.Separator)) {
|
||||
result = string(filepath.Separator) + result
|
||||
}
|
||||
return result
|
||||
case Passthrough:
|
||||
fallthrough
|
||||
default:
|
||||
return fileName
|
||||
}
|
||||
}
|
||||
|
||||
// NextFile iterates to the next file in the directory tree
|
||||
// and returns a file Descriptor for the file.
|
||||
func (d *dirLoader) NextFile() (*Descriptor, error) {
|
||||
// build a list of all files we will iterate over and read, but only one time
|
||||
if d.files == nil {
|
||||
d.files = []string{}
|
||||
err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error {
|
||||
if info == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode().IsRegular() {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
|
||||
return nil
|
||||
}
|
||||
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
|
||||
return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
|
||||
}
|
||||
d.files = append(d.files, path)
|
||||
} else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
|
||||
return nil
|
||||
}
|
||||
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
|
||||
return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
|
||||
}
|
||||
d.files = append(d.files, path)
|
||||
} else if info.Mode().IsDir() {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If done reading files then just return io.EOF
|
||||
// errors for each NextFile() call
|
||||
if d.idx >= len(d.files) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
fileName := d.files[d.idx]
|
||||
d.idx++
|
||||
fh := newLazyFile(fileName)
|
||||
|
||||
cleanedPath := formatPath(fileName, d.root, d.pathFormat)
|
||||
f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
type tarballLoader struct {
|
||||
baseURL string
|
||||
r io.Reader
|
||||
tr *tar.Reader
|
||||
files []file
|
||||
idx int
|
||||
filter filter.LoaderFilter
|
||||
skipDir map[string]struct{}
|
||||
pathFormat PathFormat
|
||||
maxSizeLimitBytes int64
|
||||
}
|
||||
|
||||
type file struct {
|
||||
name string
|
||||
reader io.Reader
|
||||
path storage.Path
|
||||
raw []byte
|
||||
return v1.NewDirectoryLoader(root)
|
||||
}
|
||||
|
||||
// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead.
|
||||
func NewTarballLoader(r io.Reader) DirectoryLoader {
|
||||
l := tarballLoader{
|
||||
r: r,
|
||||
pathFormat: Passthrough,
|
||||
}
|
||||
return &l
|
||||
return v1.NewTarballLoader(r)
|
||||
}
|
||||
|
||||
// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads
|
||||
// files out of a gzipped tar archive. The file URLs will be prefixed
|
||||
// with the baseURL.
|
||||
func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader {
|
||||
l := tarballLoader{
|
||||
baseURL: strings.TrimSuffix(baseURL, "/"),
|
||||
r: r,
|
||||
pathFormat: Passthrough,
|
||||
}
|
||||
return &l
|
||||
}
|
||||
|
||||
// WithFilter specifies the filter object to use to filter files while loading bundles
|
||||
func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
|
||||
t.filter = filter
|
||||
return t
|
||||
}
|
||||
|
||||
// WithPathFormat specifies how a path is formatted in a Descriptor
|
||||
func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
|
||||
t.pathFormat = pathFormat
|
||||
return t
|
||||
}
|
||||
|
||||
// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read
|
||||
func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
|
||||
t.maxSizeLimitBytes = sizeLimitBytes
|
||||
return t
|
||||
}
|
||||
|
||||
// WithFollowSymlinks is a no-op for tarballLoader
|
||||
func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader {
|
||||
return t
|
||||
}
|
||||
|
||||
// NextFile iterates to the next file in the directory tree
|
||||
// and returns a file Descriptor for the file.
|
||||
func (t *tarballLoader) NextFile() (*Descriptor, error) {
|
||||
if t.tr == nil {
|
||||
gr, err := gzip.NewReader(t.r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("archive read failed: %w", err)
|
||||
}
|
||||
|
||||
t.tr = tar.NewReader(gr)
|
||||
}
|
||||
|
||||
if t.files == nil {
|
||||
t.files = []file{}
|
||||
|
||||
if t.skipDir == nil {
|
||||
t.skipDir = map[string]struct{}{}
|
||||
}
|
||||
|
||||
for {
|
||||
header, err := t.tr.Next()
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Keep iterating on the archive until we find a normal file
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
|
||||
if t.filter != nil {
|
||||
|
||||
if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) {
|
||||
continue
|
||||
}
|
||||
|
||||
basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/")
|
||||
|
||||
// check if the directory is to be skipped
|
||||
if _, ok := t.skipDir[basePath]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
match := false
|
||||
for p := range t.skipDir {
|
||||
if strings.HasPrefix(basePath, p) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes {
|
||||
return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes)
|
||||
}
|
||||
|
||||
f := file{name: header.Name}
|
||||
|
||||
// Note(philipc): We rely on the previous size check in this loop for safety.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, header.Size))
|
||||
if _, err := io.Copy(buf, t.tr); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err)
|
||||
}
|
||||
|
||||
f.reader = buf
|
||||
|
||||
t.files = append(t.files, f)
|
||||
} else if header.Typeflag == tar.TypeDir {
|
||||
cleanedPath := filepath.ToSlash(header.Name)
|
||||
if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) {
|
||||
t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If done reading files then just return io.EOF
|
||||
// errors for each NextFile() call
|
||||
if t.idx >= len(t.files) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
f := t.files[t.idx]
|
||||
t.idx++
|
||||
|
||||
cleanedPath := formatPath(f.name, "", t.pathFormat)
|
||||
d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Next implements the storage.Iterator interface.
|
||||
// It iterates to the next policy or data file in the directory tree
|
||||
// and returns a storage.Update for the file.
|
||||
func (it *iterator) Next() (*storage.Update, error) {
|
||||
if it.files == nil {
|
||||
it.files = []file{}
|
||||
|
||||
for _, item := range it.raw {
|
||||
f := file{name: item.Path}
|
||||
|
||||
fpath := strings.TrimLeft(normalizePath(filepath.Dir(f.name)), "/.")
|
||||
if strings.HasSuffix(f.name, RegoExt) {
|
||||
fpath = strings.Trim(normalizePath(f.name), "/")
|
||||
}
|
||||
|
||||
p, ok := storage.ParsePathEscaped("/" + fpath)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("storage path invalid: %v", f.name)
|
||||
}
|
||||
f.path = p
|
||||
|
||||
f.raw = item.Value
|
||||
|
||||
it.files = append(it.files, f)
|
||||
}
|
||||
|
||||
sortFilePathAscend(it.files)
|
||||
}
|
||||
|
||||
// If done reading files then just return io.EOF
|
||||
// errors for each NextFile() call
|
||||
if it.idx >= len(it.files) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
f := it.files[it.idx]
|
||||
it.idx++
|
||||
|
||||
isPolicy := false
|
||||
if strings.HasSuffix(f.name, RegoExt) {
|
||||
isPolicy = true
|
||||
}
|
||||
|
||||
return &storage.Update{
|
||||
Path: f.path,
|
||||
Value: f.raw,
|
||||
IsPolicy: isPolicy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type iterator struct {
|
||||
raw []Raw
|
||||
files []file
|
||||
idx int
|
||||
return v1.NewTarballLoaderWithBaseURL(r, baseURL)
|
||||
}
|
||||
|
||||
func NewIterator(raw []Raw) storage.Iterator {
|
||||
it := iterator{
|
||||
raw: raw,
|
||||
}
|
||||
return &it
|
||||
}
|
||||
|
||||
func sortFilePathAscend(files []file) {
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return len(files[i].path) < len(files[j].path)
|
||||
})
|
||||
}
|
||||
|
||||
func getdepth(path string, isDir bool) int {
|
||||
if isDir {
|
||||
cleanedPath := strings.Trim(filepath.ToSlash(path), "/")
|
||||
return len(strings.Split(cleanedPath, "/"))
|
||||
}
|
||||
|
||||
basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/")
|
||||
return len(strings.Split(basePath, "/"))
|
||||
return v1.NewIterator(raw)
|
||||
}
|
||||
|
||||
127
vendor/github.com/open-policy-agent/opa/bundle/filefs.go
generated
vendored
127
vendor/github.com/open-policy-agent/opa/bundle/filefs.go
generated
vendored
@@ -4,140 +4,19 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/open-policy-agent/opa/loader/filter"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFSLoaderRoot = "."
|
||||
)
|
||||
|
||||
type dirLoaderFS struct {
|
||||
sync.Mutex
|
||||
filesystem fs.FS
|
||||
files []string
|
||||
idx int
|
||||
filter filter.LoaderFilter
|
||||
root string
|
||||
pathFormat PathFormat
|
||||
maxSizeLimitBytes int64
|
||||
followSymlinks bool
|
||||
}
|
||||
|
||||
// NewFSLoader returns a basic DirectoryLoader implementation
|
||||
// that will load files from a fs.FS interface
|
||||
func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) {
|
||||
return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil
|
||||
return v1.NewFSLoader(filesystem)
|
||||
}
|
||||
|
||||
// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation
|
||||
// that will load files from a fs.FS interface at the supplied root
|
||||
func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader {
|
||||
d := dirLoaderFS{
|
||||
filesystem: filesystem,
|
||||
root: normalizeRootDirectory(root),
|
||||
pathFormat: Chrooted,
|
||||
}
|
||||
|
||||
return &d
|
||||
}
|
||||
|
||||
func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dirEntry != nil {
|
||||
info, err := dirEntry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dirEntry.Type().IsRegular() {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
|
||||
return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
|
||||
}
|
||||
|
||||
d.files = append(d.files, path)
|
||||
} else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
|
||||
return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
|
||||
}
|
||||
|
||||
d.files = append(d.files, path)
|
||||
} else if dirEntry.Type().IsDir() {
|
||||
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithFilter specifies the filter object to use to filter files while loading bundles
|
||||
func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader {
|
||||
d.filter = filter
|
||||
return d
|
||||
}
|
||||
|
||||
// WithPathFormat specifies how a path is formatted in a Descriptor
|
||||
func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
|
||||
d.pathFormat = pathFormat
|
||||
return d
|
||||
}
|
||||
|
||||
// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read
|
||||
func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
|
||||
d.maxSizeLimitBytes = sizeLimitBytes
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader {
|
||||
d.followSymlinks = followSymlinks
|
||||
return d
|
||||
}
|
||||
|
||||
// NextFile iterates to the next file in the directory tree
|
||||
// and returns a file Descriptor for the file.
|
||||
func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
if d.files == nil {
|
||||
err := fs.WalkDir(d.filesystem, d.root, d.walkDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// If done reading files then just return io.EOF
|
||||
// errors for each NextFile() call
|
||||
if d.idx >= len(d.files) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
fileName := d.files[d.idx]
|
||||
d.idx++
|
||||
|
||||
fh, err := d.filesystem.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
cleanedPath := formatPath(fileName, d.root, d.pathFormat)
|
||||
f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh)
|
||||
return f, nil
|
||||
return v1.NewFSLoaderWithRoot(filesystem, root)
|
||||
}
|
||||
|
||||
133
vendor/github.com/open-policy-agent/opa/bundle/hash.go
generated
vendored
133
vendor/github.com/open-policy-agent/opa/bundle/hash.go
generated
vendored
@@ -5,137 +5,28 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
// HashingAlgorithm represents a subset of hashing algorithms implemented in Go
|
||||
type HashingAlgorithm string
|
||||
type HashingAlgorithm = v1.HashingAlgorithm
|
||||
|
||||
// Supported values for HashingAlgorithm
|
||||
const (
|
||||
MD5 HashingAlgorithm = "MD5"
|
||||
SHA1 HashingAlgorithm = "SHA-1"
|
||||
SHA224 HashingAlgorithm = "SHA-224"
|
||||
SHA256 HashingAlgorithm = "SHA-256"
|
||||
SHA384 HashingAlgorithm = "SHA-384"
|
||||
SHA512 HashingAlgorithm = "SHA-512"
|
||||
SHA512224 HashingAlgorithm = "SHA-512-224"
|
||||
SHA512256 HashingAlgorithm = "SHA-512-256"
|
||||
MD5 = v1.MD5
|
||||
SHA1 = v1.SHA1
|
||||
SHA224 = v1.SHA224
|
||||
SHA256 = v1.SHA256
|
||||
SHA384 = v1.SHA384
|
||||
SHA512 = v1.SHA512
|
||||
SHA512224 = v1.SHA512224
|
||||
SHA512256 = v1.SHA512256
|
||||
)
|
||||
|
||||
// String returns the string representation of a HashingAlgorithm
|
||||
func (alg HashingAlgorithm) String() string {
|
||||
return string(alg)
|
||||
}
|
||||
|
||||
// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy
|
||||
type SignatureHasher interface {
|
||||
HashFile(v interface{}) ([]byte, error)
|
||||
}
|
||||
|
||||
type hasher struct {
|
||||
h func() hash.Hash // hash function factory
|
||||
}
|
||||
type SignatureHasher = v1.SignatureHasher
|
||||
|
||||
// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm
|
||||
func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) {
|
||||
h := &hasher{}
|
||||
|
||||
switch alg {
|
||||
case MD5:
|
||||
h.h = md5.New
|
||||
case SHA1:
|
||||
h.h = sha1.New
|
||||
case SHA224:
|
||||
h.h = sha256.New224
|
||||
case SHA256:
|
||||
h.h = sha256.New
|
||||
case SHA384:
|
||||
h.h = sha512.New384
|
||||
case SHA512:
|
||||
h.h = sha512.New
|
||||
case SHA512224:
|
||||
h.h = sha512.New512_224
|
||||
case SHA512256:
|
||||
h.h = sha512.New512_256
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg)
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// HashFile hashes the file content, JSON or binary, both in golang native format.
|
||||
func (h *hasher) HashFile(v interface{}) ([]byte, error) {
|
||||
hf := h.h()
|
||||
walk(v, hf)
|
||||
return hf.Sum(nil), nil
|
||||
}
|
||||
|
||||
// walk hashes the file content, JSON or binary, both in golang native format.
|
||||
//
|
||||
// Computation for unstructured documents is a hash of the document.
|
||||
//
|
||||
// Computation for the types of structured JSON document is as follows:
|
||||
//
|
||||
// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }.
|
||||
//
|
||||
// array: Hash [, then digest of the value, then comma (between items) and finally ].
|
||||
func walk(v interface{}, h io.Writer) {
|
||||
|
||||
switch x := v.(type) {
|
||||
case map[string]interface{}:
|
||||
_, _ = h.Write([]byte("{"))
|
||||
|
||||
var keys []string
|
||||
for k := range x {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
_, _ = h.Write([]byte(","))
|
||||
}
|
||||
|
||||
_, _ = h.Write(encodePrimitive(key))
|
||||
_, _ = h.Write([]byte(":"))
|
||||
walk(x[key], h)
|
||||
}
|
||||
|
||||
_, _ = h.Write([]byte("}"))
|
||||
case []interface{}:
|
||||
_, _ = h.Write([]byte("["))
|
||||
|
||||
for i, e := range x {
|
||||
if i > 0 {
|
||||
_, _ = h.Write([]byte(","))
|
||||
}
|
||||
walk(e, h)
|
||||
}
|
||||
|
||||
_, _ = h.Write([]byte("]"))
|
||||
case []byte:
|
||||
_, _ = h.Write(x)
|
||||
default:
|
||||
_, _ = h.Write(encodePrimitive(x))
|
||||
}
|
||||
}
|
||||
|
||||
func encodePrimitive(v interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
encoder.SetEscapeHTML(false)
|
||||
_ = encoder.Encode(v)
|
||||
return []byte(strings.Trim(buf.String(), "\n"))
|
||||
return v1.NewSignatureHasher(alg)
|
||||
}
|
||||
|
||||
126
vendor/github.com/open-policy-agent/opa/bundle/keys.go
generated
vendored
126
vendor/github.com/open-policy-agent/opa/bundle/keys.go
generated
vendored
@@ -6,139 +6,25 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jwa"
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jws/sign"
|
||||
"github.com/open-policy-agent/opa/keys"
|
||||
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTokenSigningAlg = "RS256"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
// KeyConfig holds the keys used to sign or verify bundles and tokens
|
||||
// Moved to own package, alias kept for backwards compatibility
|
||||
type KeyConfig = keys.Config
|
||||
type KeyConfig = v1.KeyConfig
|
||||
|
||||
// VerificationConfig represents the key configuration used to verify a signed bundle
|
||||
type VerificationConfig struct {
|
||||
PublicKeys map[string]*KeyConfig
|
||||
KeyID string `json:"keyid"`
|
||||
Scope string `json:"scope"`
|
||||
Exclude []string `json:"exclude_files"`
|
||||
}
|
||||
type VerificationConfig = v1.VerificationConfig
|
||||
|
||||
// NewVerificationConfig return a new VerificationConfig
|
||||
func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig {
|
||||
return &VerificationConfig{
|
||||
PublicKeys: keys,
|
||||
KeyID: id,
|
||||
Scope: scope,
|
||||
Exclude: exclude,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateAndInjectDefaults validates the config and inserts default values
|
||||
func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error {
|
||||
vc.PublicKeys = keys
|
||||
|
||||
if vc.KeyID != "" {
|
||||
found := false
|
||||
for key := range keys {
|
||||
if key == vc.KeyID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("key id %s not found", vc.KeyID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPublicKey returns the public key corresponding to the given key id
|
||||
func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) {
|
||||
var kc *KeyConfig
|
||||
var ok bool
|
||||
|
||||
if kc, ok = vc.PublicKeys[id]; !ok {
|
||||
return nil, fmt.Errorf("verification key corresponding to ID %v not found", id)
|
||||
}
|
||||
return kc, nil
|
||||
return v1.NewVerificationConfig(keys, id, scope, exclude)
|
||||
}
|
||||
|
||||
// SigningConfig represents the key configuration used to generate a signed bundle
|
||||
type SigningConfig struct {
|
||||
Plugin string
|
||||
Key string
|
||||
Algorithm string
|
||||
ClaimsPath string
|
||||
}
|
||||
type SigningConfig = v1.SigningConfig
|
||||
|
||||
// NewSigningConfig return a new SigningConfig
|
||||
func NewSigningConfig(key, alg, claimsPath string) *SigningConfig {
|
||||
if alg == "" {
|
||||
alg = defaultTokenSigningAlg
|
||||
}
|
||||
|
||||
return &SigningConfig{
|
||||
Plugin: defaultSignerID,
|
||||
Key: key,
|
||||
Algorithm: alg,
|
||||
ClaimsPath: claimsPath,
|
||||
}
|
||||
}
|
||||
|
||||
// WithPlugin sets the signing plugin in the signing config
|
||||
func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig {
|
||||
if plugin != "" {
|
||||
s.Plugin = plugin
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// GetPrivateKey returns the private key or secret from the signing config
|
||||
func (s *SigningConfig) GetPrivateKey() (interface{}, error) {
|
||||
|
||||
block, _ := pem.Decode([]byte(s.Key))
|
||||
if block != nil {
|
||||
return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm))
|
||||
}
|
||||
|
||||
var priv string
|
||||
if _, err := os.Stat(s.Key); err == nil {
|
||||
bs, err := os.ReadFile(s.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv = string(bs)
|
||||
} else if os.IsNotExist(err) {
|
||||
priv = s.Key
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm))
|
||||
}
|
||||
|
||||
// GetClaims returns the claims by reading the file specified in the signing config
|
||||
func (s *SigningConfig) GetClaims() (map[string]interface{}, error) {
|
||||
var claims map[string]interface{}
|
||||
|
||||
bs, err := os.ReadFile(s.ClaimsPath)
|
||||
if err != nil {
|
||||
return claims, err
|
||||
}
|
||||
|
||||
if err := util.UnmarshalJSON(bs, &claims); err != nil {
|
||||
return claims, err
|
||||
}
|
||||
return claims, nil
|
||||
return v1.NewSigningConfig(key, alg, claimsPath)
|
||||
}
|
||||
|
||||
112
vendor/github.com/open-policy-agent/opa/bundle/sign.go
generated
vendored
112
vendor/github.com/open-policy-agent/opa/bundle/sign.go
generated
vendored
@@ -6,130 +6,30 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jwa"
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jws"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
const defaultSignerID = "_default"
|
||||
|
||||
var signers map[string]Signer
|
||||
|
||||
// Signer is the interface expected for implementations that generate bundle signatures.
|
||||
type Signer interface {
|
||||
GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error)
|
||||
}
|
||||
type Signer v1.Signer
|
||||
|
||||
// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified
|
||||
// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates
|
||||
// a signed token given the list of files to be included in the payload and the bundle
|
||||
// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token.
|
||||
func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
|
||||
var plugin string
|
||||
// for backwards compatibility, check if there is no plugin specified, and use default
|
||||
if sc.Plugin == "" {
|
||||
plugin = defaultSignerID
|
||||
} else {
|
||||
plugin = sc.Plugin
|
||||
}
|
||||
signer, err := GetSigner(plugin)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return signer.GenerateSignedToken(files, sc, keyID)
|
||||
return v1.GenerateSignedToken(files, sc, keyID)
|
||||
}
|
||||
|
||||
// DefaultSigner is the default bundle signing implementation. It signs bundles by generating
|
||||
// a JWT and signing it using a locally-accessible private key.
|
||||
type DefaultSigner struct{}
|
||||
|
||||
// GenerateSignedToken generates a signed token given the list of files to be
|
||||
// included in the payload and the bundle signing config. The keyID if non-empty,
|
||||
// represents the value for the "keyid" claim in the token
|
||||
func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
|
||||
payload, err := generatePayload(files, sc, keyID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
privateKey, err := sc.GetPrivateKey()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var headers jws.StandardHeaders
|
||||
|
||||
if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if keyID != "" {
|
||||
if err := headers.Set(jws.KeyIDKey, keyID); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hdr, err := json.Marshal(headers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
token, err := jws.SignLiteral(payload,
|
||||
jwa.SignatureAlgorithm(sc.Algorithm),
|
||||
privateKey,
|
||||
hdr,
|
||||
rand.Reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(token), nil
|
||||
}
|
||||
|
||||
func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) {
|
||||
payload := make(map[string]interface{})
|
||||
payload["files"] = files
|
||||
|
||||
if sc.ClaimsPath != "" {
|
||||
claims, err := sc.GetClaims()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for claim, value := range claims {
|
||||
payload[claim] = value
|
||||
}
|
||||
} else {
|
||||
if keyID != "" {
|
||||
// keyid claim is deprecated but include it for backwards compatibility.
|
||||
payload["keyid"] = keyID
|
||||
}
|
||||
}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
type DefaultSigner v1.DefaultSigner
|
||||
|
||||
// GetSigner returns the Signer registered under the given id
|
||||
func GetSigner(id string) (Signer, error) {
|
||||
signer, ok := signers[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no signer exists under id %s", id)
|
||||
}
|
||||
return signer, nil
|
||||
return v1.GetSigner(id)
|
||||
}
|
||||
|
||||
// RegisterSigner registers a Signer under the given id
|
||||
func RegisterSigner(id string, s Signer) error {
|
||||
if id == defaultSignerID {
|
||||
return fmt.Errorf("signer id %s is reserved, use a different id", id)
|
||||
}
|
||||
signers[id] = s
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
signers = map[string]Signer{
|
||||
defaultSignerID: &DefaultSigner{},
|
||||
}
|
||||
return v1.RegisterSigner(id, s)
|
||||
}
|
||||
|
||||
955
vendor/github.com/open-policy-agent/opa/bundle/store.go
generated
vendored
955
vendor/github.com/open-policy-agent/opa/bundle/store.go
generated
vendored
File diff suppressed because it is too large
Load Diff
207
vendor/github.com/open-policy-agent/opa/bundle/verify.go
generated
vendored
207
vendor/github.com/open-policy-agent/opa/bundle/verify.go
generated
vendored
@@ -6,26 +6,11 @@
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jwa"
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jws"
|
||||
"github.com/open-policy-agent/opa/internal/jwx/jws/verify"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
v1 "github.com/open-policy-agent/opa/v1/bundle"
|
||||
)
|
||||
|
||||
const defaultVerifierID = "_default"
|
||||
|
||||
var verifiers map[string]Verifier
|
||||
|
||||
// Verifier is the interface expected for implementations that verify bundle signatures.
|
||||
type Verifier interface {
|
||||
VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error)
|
||||
}
|
||||
type Verifier v1.Verifier
|
||||
|
||||
// VerifyBundleSignature will retrieve the Verifier implementation based
|
||||
// on the Plugin specified in SignaturesConfig, and call its implementation
|
||||
@@ -33,199 +18,19 @@ type Verifier interface {
|
||||
// using the given public keys or secret. If a signature is verified, it keeps
|
||||
// track of the files specified in the JWT payload
|
||||
func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
|
||||
// default implementation does not return a nil for map, so don't
|
||||
// do it here either
|
||||
files := make(map[string]FileInfo)
|
||||
var plugin string
|
||||
// for backwards compatibility, check if there is no plugin specified, and use default
|
||||
if sc.Plugin == "" {
|
||||
plugin = defaultVerifierID
|
||||
} else {
|
||||
plugin = sc.Plugin
|
||||
}
|
||||
verifier, err := GetVerifier(plugin)
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
return verifier.VerifyBundleSignature(sc, bvc)
|
||||
return v1.VerifyBundleSignature(sc, bvc)
|
||||
}
|
||||
|
||||
// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking
|
||||
// the JWT signature using a locally-accessible public key.
|
||||
type DefaultVerifier struct{}
|
||||
|
||||
// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
|
||||
// If a signature is verified, it keeps track of the files specified in the JWT payload
|
||||
func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
|
||||
files := make(map[string]FileInfo)
|
||||
|
||||
if len(sc.Signatures) == 0 {
|
||||
return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
|
||||
}
|
||||
|
||||
if len(sc.Signatures) > 1 {
|
||||
return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
|
||||
}
|
||||
|
||||
for _, token := range sc.Signatures {
|
||||
payload, err := verifyJWTSignature(token, bvc)
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
|
||||
for _, file := range payload.Files {
|
||||
files[file.Name] = file
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
|
||||
// decode JWT to check if the header specifies the key to use and/or if claims have the scope.
|
||||
|
||||
parts, err := jws.SplitCompact(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var decodedHeader []byte
|
||||
if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
|
||||
return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err)
|
||||
}
|
||||
|
||||
var hdr jws.StandardHeaders
|
||||
if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse JWT headers: %w", err)
|
||||
}
|
||||
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ds DecodedSignature
|
||||
if err := json.Unmarshal(payload, &ds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for the id of the key to use for JWT signature verification
|
||||
// first in the OPA config. If not found, then check the JWT kid.
|
||||
keyID := bvc.KeyID
|
||||
if keyID == "" {
|
||||
keyID = hdr.KeyID
|
||||
}
|
||||
if keyID == "" {
|
||||
// If header has no key id, check the deprecated key claim.
|
||||
keyID = ds.KeyID
|
||||
}
|
||||
|
||||
if keyID == "" {
|
||||
return nil, fmt.Errorf("verification key ID is empty")
|
||||
}
|
||||
|
||||
// now that we have the keyID, fetch the actual key
|
||||
keyConfig, err := bvc.GetPublicKey(keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// verify JWT signature
|
||||
alg := jwa.SignatureAlgorithm(keyConfig.Algorithm)
|
||||
key, err := verify.GetSigningKey(keyConfig.Key, alg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = jws.Verify([]byte(token), alg, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// verify the scope
|
||||
scope := bvc.Scope
|
||||
if scope == "" {
|
||||
scope = keyConfig.Scope
|
||||
}
|
||||
|
||||
if ds.Scope != scope {
|
||||
return nil, fmt.Errorf("scope mismatch")
|
||||
}
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
|
||||
func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
|
||||
var file FileInfo
|
||||
var ok bool
|
||||
|
||||
if file, ok = files[path]; !ok {
|
||||
return fmt.Errorf("file %v not included in bundle signature", path)
|
||||
}
|
||||
|
||||
if file.Algorithm == "" {
|
||||
return fmt.Errorf("no hashing algorithm provided for file %v", path)
|
||||
}
|
||||
|
||||
hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// hash the file content
|
||||
// For unstructured files, hash the byte stream of the file
|
||||
// For structured files, read the byte stream and parse into a JSON structure;
|
||||
// then recursively order the fields of all objects alphabetically and then apply
|
||||
// the hash function to result to compute the hash. This ensures that the digital signature is
|
||||
// independent of whitespace and other non-semantic JSON features.
|
||||
var value interface{}
|
||||
if IsStructuredDoc(path) {
|
||||
err := util.Unmarshal(data.Bytes(), &value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
value = data.Bytes()
|
||||
}
|
||||
|
||||
bs, err := hash.HashFile(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// compare file hash with same file in the JWT payloads
|
||||
fb, err := hex.DecodeString(file.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(fb, bs) {
|
||||
return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
|
||||
}
|
||||
|
||||
delete(files, path)
|
||||
return nil
|
||||
}
|
||||
type DefaultVerifier = v1.DefaultVerifier
|
||||
|
||||
// GetVerifier returns the Verifier registered under the given id
|
||||
func GetVerifier(id string) (Verifier, error) {
|
||||
verifier, ok := verifiers[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no verifier exists under id %s", id)
|
||||
}
|
||||
return verifier, nil
|
||||
return v1.GetVerifier(id)
|
||||
}
|
||||
|
||||
// RegisterVerifier registers a Verifier under the given id
|
||||
func RegisterVerifier(id string, v Verifier) error {
|
||||
if id == defaultVerifierID {
|
||||
return fmt.Errorf("verifier id %s is reserved, use a different id", id)
|
||||
}
|
||||
verifiers[id] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
verifiers = map[string]Verifier{
|
||||
defaultVerifierID: &DefaultVerifier{},
|
||||
}
|
||||
return v1.RegisterVerifier(id, v)
|
||||
}
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/capabilities/doc.go
generated
vendored
Normal file
8
vendor/github.com/open-policy-agent/opa/capabilities/doc.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
|
||||
// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
|
||||
// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
|
||||
package capabilities
|
||||
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json
generated
vendored
Normal file
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json
generated
vendored
Normal file
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json
generated
vendored
Normal file
4835
vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8
vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
generated
vendored
@@ -11,10 +11,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/bundle"
|
||||
"github.com/open-policy-agent/opa/resolver/wasm"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/bundle"
|
||||
"github.com/open-policy-agent/opa/v1/resolver/wasm"
|
||||
"github.com/open-policy-agent/opa/v1/storage"
|
||||
)
|
||||
|
||||
// LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the
|
||||
|
||||
11
vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
generated
vendored
11
vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go
generated
vendored
@@ -5,9 +5,9 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/schemas"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/schemas"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
type SchemaFile string
|
||||
@@ -32,7 +32,10 @@ func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error
|
||||
schemaSet := ast.NewSchemaSet()
|
||||
schemaSet.Put(ast.SchemaRootRef, schemaDefinitions[AuthorizationPolicySchema])
|
||||
|
||||
errs := ast.NewCompiler().WithSchemas(schemaSet).PassesTypeCheckRules(rules)
|
||||
errs := ast.NewCompiler().
|
||||
WithDefaultRegoVersion(compiler.DefaultRegoVersion()).
|
||||
WithSchemas(schemaSet).
|
||||
PassesTypeCheckRules(rules)
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
|
||||
6
vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
generated
vendored
6
vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/internal/compiler/wasm/opa"
|
||||
"github.com/open-policy-agent/opa/internal/debug"
|
||||
"github.com/open-policy-agent/opa/internal/wasm/encoding"
|
||||
@@ -20,8 +19,9 @@ import (
|
||||
"github.com/open-policy-agent/opa/internal/wasm/module"
|
||||
"github.com/open-policy-agent/opa/internal/wasm/types"
|
||||
"github.com/open-policy-agent/opa/internal/wasm/util"
|
||||
"github.com/open-policy-agent/opa/ir"
|
||||
opatypes "github.com/open-policy-agent/opa/types"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ir"
|
||||
opatypes "github.com/open-policy-agent/opa/v1/types"
|
||||
)
|
||||
|
||||
// Record Wasm ABI version in exported global variable
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/internal/config/config.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/internal/config/config.go
generated
vendored
@@ -15,11 +15,11 @@ import (
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/strvals"
|
||||
"github.com/open-policy-agent/opa/keys"
|
||||
"github.com/open-policy-agent/opa/logging"
|
||||
"github.com/open-policy-agent/opa/plugins/rest"
|
||||
"github.com/open-policy-agent/opa/tracing"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"github.com/open-policy-agent/opa/v1/keys"
|
||||
"github.com/open-policy-agent/opa/v1/logging"
|
||||
"github.com/open-policy-agent/opa/v1/plugins/rest"
|
||||
"github.com/open-policy-agent/opa/v1/tracing"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
// ServiceOptions stores the options passed to ParseServicesConfig
|
||||
|
||||
14
vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
generated
vendored
14
vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go
generated
vendored
@@ -146,14 +146,13 @@
|
||||
package edittree
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/internal/edittree/bitvector"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// Deletions are encoded with a nil value pointer.
|
||||
@@ -213,10 +212,10 @@ func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) {
|
||||
case ast.Null, ast.Boolean, ast.String, ast.Var:
|
||||
equal = func(y ast.Value) bool { return x == y }
|
||||
case ast.Number:
|
||||
if xi, err := json.Number(x).Int64(); err == nil {
|
||||
if xi, ok := x.Int64(); ok {
|
||||
equal = func(y ast.Value) bool {
|
||||
if y, ok := y.(ast.Number); ok {
|
||||
if yi, err := json.Number(y).Int64(); err == nil {
|
||||
if yi, ok := y.Int64(); ok {
|
||||
return xi == yi
|
||||
}
|
||||
}
|
||||
@@ -725,9 +724,9 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
|
||||
|
||||
// Fall back to looking up the key in e.value.
|
||||
// Extend the tree if key is present. Error otherwise.
|
||||
if v, err := x.Find(ast.Ref{ast.IntNumberTerm(idx)}); err == nil {
|
||||
if v, err := x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}); err == nil {
|
||||
// TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead?
|
||||
_, err := e.Delete(ast.IntNumberTerm(idx))
|
||||
_, err := e.Delete(ast.InternedIntNumberTerm(idx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1026,8 +1025,7 @@ func (e *EditTree) Exists(path ast.Ref) bool {
|
||||
}
|
||||
// Fallback if child lookup failed.
|
||||
// We have to ensure that the lookup term is a number here, or Find will fail.
|
||||
k := ast.Ref{ast.IntNumberTerm(idx)}.Concat(path[1:])
|
||||
_, err = x.Find(k)
|
||||
_, err = x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}.Concat(path[1:]))
|
||||
return err == nil
|
||||
default:
|
||||
// Catch all primitive types.
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go
generated
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
package future
|
||||
|
||||
import "github.com/open-policy-agent/opa/ast"
|
||||
import "github.com/open-policy-agent/opa/v1/ast"
|
||||
|
||||
// FilterFutureImports filters OUT any future imports from the passed slice of
|
||||
// `*ast.Import`s.
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go
generated
vendored
@@ -7,7 +7,7 @@ package future
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// ParserOptionsFromFutureImports transforms a slice of `ast.Import`s into the
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go
generated
vendored
@@ -25,10 +25,6 @@
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type schemaReferencePool struct {
|
||||
documents map[string]*SubSchema
|
||||
}
|
||||
@@ -44,7 +40,7 @@ func newSchemaReferencePool() *schemaReferencePool {
|
||||
func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
|
||||
internalLog("Schema Reference ( %s )", ref)
|
||||
}
|
||||
|
||||
if sch, ok := p.documents[ref]; ok {
|
||||
@@ -60,7 +56,7 @@ func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) {
|
||||
func (p *schemaReferencePool) Add(ref string, sch *SubSchema) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
|
||||
internalLog("Add Schema Reference %s to pool", ref)
|
||||
}
|
||||
if _, ok := p.documents[ref]; !ok {
|
||||
p.documents[ref] = sch
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go
generated
vendored
@@ -348,7 +348,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
|
||||
if len(currentSubSchema.dependencies) > 0 {
|
||||
if currentNodeMap, ok := currentNode.(map[string]interface{}); ok {
|
||||
for elementKey := range currentNodeMap {
|
||||
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
|
||||
@@ -469,7 +469,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
} else {
|
||||
if currentSubSchema.ItemsChildren != nil && len(currentSubSchema.ItemsChildren) > 0 {
|
||||
if len(currentSubSchema.ItemsChildren) > 0 {
|
||||
|
||||
nbItems := len(currentSubSchema.ItemsChildren)
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ func init() {
|
||||
}
|
||||
|
||||
addError(
|
||||
Message(message),
|
||||
Message(message), //nolint:govet
|
||||
At(field.Position),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -20,7 +20,7 @@ func init() {
|
||||
message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition)
|
||||
|
||||
addError(
|
||||
Message(message),
|
||||
Message(message), //nolint:govet
|
||||
At(inlineFragment.Position),
|
||||
)
|
||||
})
|
||||
@@ -33,7 +33,7 @@ func init() {
|
||||
message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition)
|
||||
|
||||
addError(
|
||||
Message(message),
|
||||
Message(message), //nolint:govet
|
||||
At(fragment.Position),
|
||||
)
|
||||
})
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go
generated
vendored
@@ -7,7 +7,7 @@ package patch
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/v1/storage"
|
||||
)
|
||||
|
||||
// ParsePatchPathEscaped returns a new path for the given escaped str.
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
generated
vendored
@@ -11,10 +11,10 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/ast/location"
|
||||
"github.com/open-policy-agent/opa/internal/debug"
|
||||
"github.com/open-policy-agent/opa/ir"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ast/location"
|
||||
"github.com/open-policy-agent/opa/v1/ir"
|
||||
)
|
||||
|
||||
// QuerySet represents the input to the planner.
|
||||
@@ -1037,7 +1037,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
|
||||
args = p.defaultOperands()
|
||||
} else if decl, ok := p.decls[operator]; ok {
|
||||
relation = decl.Relation
|
||||
arity = len(decl.Decl.Args())
|
||||
arity = decl.Decl.Arity()
|
||||
void = decl.Decl.Result() == nil
|
||||
name = operator
|
||||
p.externs[operator] = decl
|
||||
@@ -1519,7 +1519,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error
|
||||
p.loc = loc
|
||||
return p.planObjectComprehension(v, iter)
|
||||
default:
|
||||
return fmt.Errorf("%v term not implemented", ast.TypeName(v))
|
||||
return fmt.Errorf("%v term not implemented", ast.ValueName(v))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
generated
vendored
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
)
|
||||
|
||||
// funcstack implements a simple map structure used to keep track of virtual
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
generated
vendored
@@ -5,8 +5,8 @@
|
||||
package planner
|
||||
|
||||
import (
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/ir"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ir"
|
||||
)
|
||||
|
||||
type varstack []map[ast.Var]ir.Local
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/version"
|
||||
"github.com/open-policy-agent/opa/logging"
|
||||
"github.com/open-policy-agent/opa/v1/logging"
|
||||
)
|
||||
|
||||
// Values taken from
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/open-policy-agent/opa/internal/version"
|
||||
"github.com/open-policy-agent/opa/logging"
|
||||
"github.com/open-policy-agent/opa/v1/logging"
|
||||
)
|
||||
|
||||
// Values taken from
|
||||
|
||||
19
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
generated
vendored
19
vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go
generated
vendored
@@ -13,13 +13,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
func stringFromTerm(t *ast.Term) string {
|
||||
@@ -67,19 +67,6 @@ func sha256MAC(message string, key []byte) []byte {
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func sortKeys(strMap map[string][]string) []string {
|
||||
keys := make([]string, len(strMap))
|
||||
|
||||
i := 0
|
||||
for k := range strMap {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials.
|
||||
func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error {
|
||||
// General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
|
||||
@@ -168,7 +155,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body []
|
||||
canonicalReq += theURL.RawQuery + "\n" // RAW Query String
|
||||
|
||||
// include the values for the signed headers
|
||||
orderedKeys := sortKeys(headersToSign)
|
||||
orderedKeys := util.KeysSorted(headersToSign)
|
||||
for _, k := range orderedKeys {
|
||||
canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n"
|
||||
}
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go
generated
vendored
@@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/open-policy-agent/opa/logging"
|
||||
"github.com/open-policy-agent/opa/v1/logging"
|
||||
)
|
||||
|
||||
// DoRequestWithClient is a convenience function to get the body of an HTTP response with
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/ref/ref.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/ref/ref.go
generated
vendored
@@ -9,8 +9,8 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/storage"
|
||||
)
|
||||
|
||||
// ParseDataPath returns a ref from the slash separated path s rooted at data.
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go
generated
vendored
@@ -4,11 +4,11 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/metrics"
|
||||
"github.com/open-policy-agent/opa/topdown/builtins"
|
||||
"github.com/open-policy-agent/opa/topdown/cache"
|
||||
"github.com/open-policy-agent/opa/topdown/print"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/metrics"
|
||||
"github.com/open-policy-agent/opa/v1/topdown/builtins"
|
||||
"github.com/open-policy-agent/opa/v1/topdown/cache"
|
||||
"github.com/open-policy-agent/opa/v1/topdown/print"
|
||||
)
|
||||
|
||||
// Result holds the evaluation result.
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/internal/report/report.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/internal/report/report.go
generated
vendored
@@ -17,12 +17,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/open-policy-agent/opa/keys"
|
||||
"github.com/open-policy-agent/opa/logging"
|
||||
"github.com/open-policy-agent/opa/v1/keys"
|
||||
"github.com/open-policy-agent/opa/v1/logging"
|
||||
"github.com/open-policy-agent/opa/v1/version"
|
||||
|
||||
"github.com/open-policy-agent/opa/plugins/rest"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"github.com/open-policy-agent/opa/version"
|
||||
"github.com/open-policy-agent/opa/v1/plugins/rest"
|
||||
"github.com/open-policy-agent/opa/v1/util"
|
||||
)
|
||||
|
||||
// ExternalServiceURL is the base HTTP URL for a telemetry service.
|
||||
|
||||
11
vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go
generated
vendored
11
vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go
generated
vendored
@@ -12,12 +12,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/bundle"
|
||||
storedversion "github.com/open-policy-agent/opa/internal/version"
|
||||
"github.com/open-policy-agent/opa/loader"
|
||||
"github.com/open-policy-agent/opa/metrics"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/open-policy-agent/opa/v1/bundle"
|
||||
"github.com/open-policy-agent/opa/v1/loader"
|
||||
"github.com/open-policy-agent/opa/v1/metrics"
|
||||
"github.com/open-policy-agent/opa/v1/storage"
|
||||
)
|
||||
|
||||
// InsertAndCompileOptions contains the input for the operation.
|
||||
@@ -53,6 +53,7 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser
|
||||
}
|
||||
|
||||
compiler := ast.NewCompiler().
|
||||
WithDefaultRegoVersion(opts.ParserOptions.RegoVersion).
|
||||
SetErrorLimit(opts.MaxErrors).
|
||||
WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)).
|
||||
WithEnablePrintStatements(opts.EnablePrintStatements)
|
||||
|
||||
2
vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go
generated
vendored
@@ -31,7 +31,7 @@ var ErrNotList = errors.New("not a list")
|
||||
|
||||
// MaxIndex is the maximum index that will be allowed by setIndex.
|
||||
// The default value 65536 = 1024 * 64
|
||||
var MaxIndex = 65536
|
||||
const MaxIndex = 65536
|
||||
|
||||
// ToYAML takes a string of arguments and converts to a YAML document.
|
||||
func ToYAML(s string) (string, error) {
|
||||
|
||||
4
vendor/github.com/open-policy-agent/opa/internal/version/version.go
generated
vendored
4
vendor/github.com/open-policy-agent/opa/internal/version/version.go
generated
vendored
@@ -10,8 +10,8 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/version"
|
||||
"github.com/open-policy-agent/opa/v1/storage"
|
||||
"github.com/open-policy-agent/opa/v1/version"
|
||||
)
|
||||
|
||||
var versionPath = storage.MustParsePath("/system/version")
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
generated
vendored
8
vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
generated
vendored
@@ -809,19 +809,19 @@ func readLimits(r io.Reader, l *module.Limit) error {
|
||||
return err
|
||||
}
|
||||
|
||||
min, err := leb128.ReadVarUint32(r)
|
||||
minLim, err := leb128.ReadVarUint32(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Min = min
|
||||
l.Min = minLim
|
||||
|
||||
if b == 1 {
|
||||
max, err := leb128.ReadVarUint32(r)
|
||||
maxLim, err := leb128.ReadVarUint32(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Max = &max
|
||||
l.Max = &maxLim
|
||||
} else if b != 0 {
|
||||
return fmt.Errorf("illegal limit flag")
|
||||
}
|
||||
|
||||
8
vendor/github.com/open-policy-agent/opa/loader/doc.go
generated
vendored
Normal file
8
vendor/github.com/open-policy-agent/opa/loader/doc.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended.
|
||||
// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead.
|
||||
// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information.
|
||||
package loader
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user