build(deps): bump github.com/open-policy-agent/opa from 1.4.2 to 1.5.0

Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 1.4.2 to 1.5.0.
- [Release notes](https://github.com/open-policy-agent/opa/releases)
- [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-policy-agent/opa/compare/v1.4.2...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/open-policy-agent/opa
  dependency-version: 1.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-06-02 15:18:43 +00:00
committed by GitHub
parent 524e13ae89
commit 51805e710d
255 changed files with 10086 additions and 3162 deletions

7
go.mod
View File

@@ -62,7 +62,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.23.4
github.com/onsi/gomega v1.37.0
github.com/open-policy-agent/opa v1.4.2
github.com/open-policy-agent/opa v1.5.0
github.com/opencloud-eu/libre-graph-api-go v1.0.6
github.com/opencloud-eu/reva/v2 v2.33.1-0.20250520152851-d33c49bb52b9
github.com/orcaman/concurrent-map v1.0.0
@@ -177,7 +177,7 @@ require (
github.com/evanphx/json-patch/v5 v5.5.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/gdexlab/go-render v1.0.1 // indirect
github.com/go-acme/lego/v4 v4.4.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
@@ -277,7 +277,7 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
github.com/prometheus/alertmanager v0.28.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/statsd_exporter v0.22.8 // indirect
@@ -305,6 +305,7 @@ require (
github.com/tidwall/pretty v1.2.1 // indirect
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
github.com/trustelem/zxcvbn v1.0.1 // indirect
github.com/vektah/gqlparser/v2 v2.5.26 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wk8/go-ordered-map v1.0.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect

16
go.sum
View File

@@ -109,6 +109,8 @@ github.com/alexedwards/argon2id v1.0.0/go.mod h1:tYKkqIjzXvZdzPvADMWOEZ+l6+BD6Ct
github.com/aliyun/alibaba-cloud-sdk-go v1.61.976/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA=
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 h1:I9YN9WMo3SUh7p/4wKeNvD/IQla3U3SUa61U7ul+xM4=
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964/go.mod h1:eFiR01PwTcpbzXtdMces7zxg6utvFM5puiWHpWB8D/k=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0=
@@ -318,8 +320,8 @@ github.com/fschade/icap-client v0.0.0-20240802074440-aade4a234387 h1:Y3wZgTr29sL
github.com/fschade/icap-client v0.0.0-20240802074440-aade4a234387/go.mod h1:HpntrRsQA6RKNXy2Nbr4kVj+NO3OYWpAQUVxeya+3sU=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U=
@@ -861,8 +863,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/open-policy-agent/opa v1.4.2 h1:ag4upP7zMsa4WE2p1pwAFeG4Pn3mNwfAx9DLhhJfbjU=
github.com/open-policy-agent/opa v1.4.2/go.mod h1:DNzZPKqKh4U0n0ANxcCVlw8lCSv2c+h5G/3QvSYdWZ8=
github.com/open-policy-agent/opa v1.5.0 h1:npsQMUZvafCLYHofoNrZ0cSWbvoDpasvWtrHXdEvSuM=
github.com/open-policy-agent/opa v1.5.0/go.mod h1:bYbS7u+uhTI+cxHQIpzvr5hxX0hV7urWtY+38ZtjMgk=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a h1:Sakl76blJAaM6NxylVkgSzktjo2dS504iDotEFJsh3M=
github.com/opencloud-eu/go-micro-plugins/v4/store/nats-js-kv v0.0.0-20250512152754-23325793059a/go.mod h1:pjcozWijkNPbEtX5SIQaxEW/h8VAVZYTLx+70bmB3LY=
github.com/opencloud-eu/libre-graph-api-go v1.0.6 h1:bUQq0tfaLboZZmPuI6C1rr/wFIVOIM9IsE1WqI5QsDA=
@@ -933,8 +935,8 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20170706130215-fb369f752a7f/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -1107,6 +1109,8 @@ github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/vektah/gqlparser/v2 v2.5.26 h1:REqqFkO8+SOEgZHR/eHScjjVjGS8Nk3RMO/juiTobN4=
github.com/vektah/gqlparser/v2 v2.5.26/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
github.com/vinyldns/go-vinyldns v0.0.0-20200917153823-148a5f6b8f14/go.mod h1:RWc47jtnVuQv6+lY3c768WtXCas/Xi+U5UFc5xULmYg=
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=

View File

@@ -1,7 +1,7 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-14-1
image_family: freebsd-14-2
install_script:
- pkg update -f
- pkg install -y go

View File

@@ -1,6 +1,39 @@
# Changelog
1.8.0 2023-10-31
1.9.0 2024-04-04
----------------
### Changes and fixes
- all: make BufferedWatcher buffered again ([#657])
- inotify: fix race when adding/removing watches while a watched path is being
deleted ([#678], [#686])
- inotify: don't send empty event if a watched path is unmounted ([#655])
- inotify: don't register duplicate watches when watching both a symlink and its
target; previously that would get "half-added" and removing the second would
panic ([#679])
- kqueue: fix watching relative symlinks ([#681])
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
kqueue ([#682])
- illumos: don't send error if changed file is deleted while processing the
event ([#678])
[#657]: https://github.com/fsnotify/fsnotify/pull/657
[#678]: https://github.com/fsnotify/fsnotify/pull/678
[#686]: https://github.com/fsnotify/fsnotify/pull/686
[#655]: https://github.com/fsnotify/fsnotify/pull/655
[#681]: https://github.com/fsnotify/fsnotify/pull/681
[#679]: https://github.com/fsnotify/fsnotify/pull/679
[#682]: https://github.com/fsnotify/fsnotify/pull/682
1.8.0 2024-10-31
----------------
### Additions

View File

@@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported.
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
parallel by default, so -parallel=1 is probably a good
idea).
print [any strings] # Print text to stdout; for debugging.
touch path
mkdir [-p] dir

View File

@@ -15,7 +15,6 @@ Platform support:
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
@@ -25,7 +24,6 @@ untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----

View File

@@ -9,6 +9,7 @@ package fsnotify
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
@@ -19,27 +20,25 @@ import (
)
type fen struct {
*shared
Events chan Event
Errors chan error
mu sync.Mutex
port *unix.EventPort
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
dirs map[string]Op // Explicitly watched directories
watches map[string]Op // Explicitly watched non-directories
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
w := &fen{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
dirs: make(map[string]Op),
watches: make(map[string]Op),
done: make(chan struct{}),
}
var err error
@@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
return w, nil
}
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
func (w *fen) sendEvent(name string, op Op) (sent bool) {
select {
case <-w.done:
return false
case w.Events <- Event{Name: name, Op: op}:
return true
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
func (w *fen) sendError(err error) (sent bool) {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *fen) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *fen) Close() error {
// Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
if w.shared.close() {
return nil
}
close(w.done)
return w.port.Close()
}
@@ -209,7 +169,7 @@ func (w *fen) readEvents() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(err) {
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
return
}
}
@@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(path, Rename) {
if !w.sendEvent(Event{Name: path, Op: Rename}) {
return nil
}
// Don't keep watching the new file name
@@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't keep watching the file that was removed
@@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Suppress extra write events on removed directories; they are not
@@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(path, Remove) {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't return the error
@@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
return err
}
} else {
if !w.sendEvent(path, Write) {
if !w.sendEvent(Event{Name: path, Op: Write}) {
return nil
}
}
@@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(path, Chmod) {
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
return nil
}
}
@@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error {
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
return w.associateFile(path, stat, isWatched)
err := w.associateFile(path, stat, isWatched)
if errors.Is(err, fs.ErrNotExist) {
// Path may have been removed since the stat.
err = nil
}
return err
}
return nil
}
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen, as
// everything else should still be watched.
func (w *fen) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
files, err := os.ReadDir(path)
if err != nil {
// Directory no longer exists: probably just deleted since we got the
// event.
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
@@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error {
return err
}
err = w.associateFile(path, finfo, false)
if errors.Is(err, fs.ErrNotExist) {
// File may have disappeared between getting the dir listing and
// adding the port: that's okay to ignore.
continue
}
if !w.sendError(err) {
return nil
}
if !w.sendEvent(path, Create) {
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
@@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && !errors.Is(err, unix.ENOENT) {
return err
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
}
@@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if true {
events |= unix.FILE_ATTRIB
}
return w.port.AssociatePath(path, stat, events, stat.Mode())
err := w.port.AssociatePath(path, stat, events, stat.Mode())
if err != nil {
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
err := w.port.DissociatePath(path)
if err != nil {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) WatchList() []string {

View File

@@ -19,6 +19,7 @@ import (
)
type inotify struct {
*shared
Events chan Event
Errors chan error
@@ -27,8 +28,6 @@ type inotify struct {
fd int
inotifyFile *os.File
watches *watches
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
// Store rename cookies in an array, with the index wrapping to 0. Almost
@@ -52,7 +51,6 @@ type inotify struct {
type (
watches struct {
mu sync.RWMutex
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
@@ -75,34 +73,13 @@ func newWatches() *watches {
}
}
func (w *watches) len() int {
w.mu.RLock()
defer w.mu.RUnlock()
return len(w.wd)
}
func (w *watches) add(ww *watch) {
w.mu.Lock()
defer w.mu.Unlock()
w.wd[ww.wd] = ww
w.path[ww.path] = ww.wd
}
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
watch := w.wd[wd] // Could have had Remove() called. See #616.
if watch == nil {
return
}
delete(w.path, watch.path)
delete(w.wd, wd)
}
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
func (w *watches) len() int { return len(w.wd) }
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
func (w *watches) removePath(path string) ([]uint32, error) {
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
@@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) {
wds := make([]uint32, 0, 8)
wds = append(wds, wd)
for p, rwd := range w.path {
if filepath.HasPrefix(p, path) {
if strings.HasPrefix(p, path) {
delete(w.path, p)
delete(w.wd, rwd)
wds = append(wds, rwd)
@@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) {
return wds, nil
}
func (w *watches) byPath(path string) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[w.path[path]]
}
func (w *watches) byWd(wd uint32) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[wd]
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
w.mu.Lock()
defer w.mu.Unlock()
var existing *watch
wd, ok := w.path[path]
if ok {
@@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error
return nil
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
@@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
}
w := &inotify{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
@@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
return w, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *inotify) sendEvent(e Event) bool {
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *inotify) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *inotify) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *inotify) Close() error {
w.doneMu.Lock()
if w.isClosed() {
w.doneMu.Unlock()
if w.shared.close() {
return nil
}
close(w.done)
w.doneMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -244,9 +168,7 @@ func (w *inotify) Close() error {
return err
}
// Wait for goroutine to close
<-w.doneResp
<-w.doneResp // Wait for readEvents() to finish.
return nil
}
@@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
add := func(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
}
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
if recurse {
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
@@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error {
w.sendEvent(Event{Name: root, Op: Create})
}
return w.add(root, with, true)
return add(root, with, true)
})
}
return w.add(path, with, false)
}
func (w *inotify) add(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
return add(path, with, false)
}
func (w *inotify) register(path string, flags uint32, recurse bool) error {
@@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error {
return nil, err
}
if e, ok := w.watches.wd[uint32(wd)]; ok {
return e, nil
}
if existing == nil {
return &watch{
wd: uint32(wd),
@@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(filepath.Clean(name))
}
@@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, w.watches.len())
w.watches.mu.RLock()
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
w.watches.mu.RUnlock()
return entries
}
@@ -418,21 +348,17 @@ func (w *inotify) readEvents() {
close(w.Events)
}()
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
errno error // Syscall errno
)
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
for {
// See if we have been closed.
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
switch {
case errors.Unwrap(err) == os.ErrClosed:
return
case err != nil:
if err != nil {
if errors.Is(err, os.ErrClosed) {
return
}
if !w.sendError(err) {
return
}
@@ -440,13 +366,9 @@ func (w *inotify) readEvents() {
}
if n < unix.SizeofInotifyEvent {
var err error
err := errors.New("notify: short read in readEvents()") // Read was too short.
if n == 0 {
err = io.EOF // If EOF is received. This should really never happen.
} else if n < 0 {
err = errno // If an error occurred while reading.
} else {
err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@@ -454,134 +376,137 @@ func (w *inotify) readEvents() {
continue
}
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
// We don't know how many events we just read into the buffer While the
// offset points to at least one whole event.
var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
var (
// Point "raw" to the event in the buffer
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask = uint32(raw.Mask)
nameLen = uint32(raw.Len)
// Move to the next event in the buffer
next = func() { offset += unix.SizeofInotifyEvent + nameLen }
)
// Point to the event in the buffer.
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
if mask&unix.IN_Q_OVERFLOW != 0 {
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
/// If the event happened to the watched directory or the watched
/// file, the kernel doesn't append the filename to the event, but
/// we would like to always fill the the "Name" field with a valid
/// filename. We retrieve the path of the watch from the "paths"
/// map.
watch := w.watches.byWd(uint32(raw.Wd))
/// Can be nil if Remove() was called in another goroutine for this
/// path inbetween reading the events from the kernel and reading
/// the internal state. Not much we can do about it, so just skip.
/// See #616.
if watch == nil {
next()
continue
ev, ok := w.handleEvent(inEvent, &buf, offset)
if !ok {
return
}
name := watch.path
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
if debug {
internal.Debug(name, raw.Mask, raw.Cookie)
}
if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
next()
continue
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse {
next() // Do nothing
continue
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return
}
}
}
/// Skip if we're watching both this path and the parent; the parent
/// will already send a delete so no need to do it twice.
if mask&unix.IN_DELETE_SELF != 0 {
if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
next()
continue
}
}
ev := w.newEvent(name, mask, raw.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return
}
// This was a directory rename, so we need to update all
// the children.
//
// TODO: this is of course pretty slow; we should use a
// better data structure for storing all of this, e.g. store
// children in the watch. I have some code for this in my
// kqueue refactor we can use in the future. For now I'm
// okay with this as it's not publicly available.
// Correctness first, performance second.
if ev.renamedFrom != "" {
w.watches.mu.Lock()
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
w.watches.mu.Unlock()
}
}
}
/// Send the events that are not ignored on the events channel
if !w.sendEvent(ev) {
return
}
next()
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + inEvent.Len
}
}
}
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
w.mu.Lock()
defer w.mu.Unlock()
/// If the event happened to the watched directory or the watched file, the
/// kernel doesn't append the filename to the event, but we would like to
/// always fill the the "Name" field with a valid filename. We retrieve the
/// path of the watch from the "paths" map.
///
/// Can be nil if Remove() was called in another goroutine for this path
/// inbetween reading the events from the kernel and reading the internal
/// state. Not much we can do about it, so just skip. See #616.
watch := w.watches.byWd(uint32(inEvent.Wd))
if watch == nil {
return Event{}, true
}
var (
name = watch.path
nameLen = uint32(inEvent.Len)
)
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bb := *buf
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
}
if debug {
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
}
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
w.watches.remove(watch)
return Event{}, true
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch)
}
// We can't really update the state when a watched path is moved; only
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse { // Do nothing
return Event{}, true
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return Event{}, false
}
}
}
/// Skip if we're watching both this path and the parent; the parent will
/// already send a delete so no need to do it twice.
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
_, ok := w.watches.path[filepath.Dir(watch.path)]
if ok {
return Event{}, true
}
}
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return Event{}, false
}
// This was a directory rename, so we need to update all the
// children.
//
// TODO: this is of course pretty slow; we should use a better data
// structure for storing all of this, e.g. store children in the
// watch. I have some code for this in my kqueue refactor we can use
// in the future. For now I'm okay with this as it's not publicly
// available. Correctness first, performance second.
if ev.renamedFrom != "" {
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
}
}
}
return ev, true
}
func (w *inotify) isRecursive(path string) bool {
ww := w.watches.byPath(path)
if ww == nil { // path could be a file, so also check the Dir.
@@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool {
}
func (w *inotify) state() {
w.watches.mu.Lock()
defer w.watches.mu.Unlock()
w.mu.Lock()
defer w.mu.Unlock()
for wd, ww := range w.watches.wd {
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
}

View File

@@ -16,14 +16,13 @@ import (
)
type kqueue struct {
*shared
Events chan Event
Errors chan error
kq int // File descriptor (as returned by the kqueue() syscall).
closepipe [2]int // Pipe used for closing kq.
watches *watches
done chan struct{}
doneMu sync.Mutex
}
type (
@@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) {
return info, ok
}
func (w *watches) updateDirFlags(path string, flags uint32) {
func (w *watches) updateDirFlags(path string, flags uint32) bool {
w.mu.Lock()
defer w.mu.Unlock()
fd := w.path[path]
fd, ok := w.path[path]
if !ok { // Already deleted: don't re-set it here.
return false
}
info := w.wd[fd]
info.dirFlags = flags
w.wd[fd] = info
return true
}
func (w *watches) remove(fd int, path string) bool {
@@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool {
return ok
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(0, ev, errs)
}
var defaultBufferSize = 0
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
w := &kqueue{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
kq: kq,
closepipe: closepipe,
done: make(chan struct{}),
watches: newWatches(),
}
@@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
if err != nil {
return kq, closepipe, err
}
@@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) {
return kq, closepipe, nil
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *kqueue) sendEvent(e Event) bool {
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *kqueue) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *kqueue) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
func (w *kqueue) Close() error {
w.doneMu.Lock()
if w.isClosed() {
w.doneMu.Unlock()
if w.shared.close() {
return nil
}
close(w.done)
w.doneMu.Unlock()
pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
// Send "quit" message to the reader goroutine.
unix.Close(w.closepipe[1])
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
return nil
}
@@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
_, err := w.addWatch(name, noteAllEvents)
_, err := w.addWatch(name, noteAllEvents, false)
if err != nil {
return err
}
@@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
if w.isClosed() {
return "", ErrClosed
}
@@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
// Follow symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
// Follow symlinks, but only for paths added with Add(), and not paths
// we're adding from internalWatch from a listdir.
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
// Return nil because Linux can add unresolvable symlinks to the
// watch list without problems, so maintain consistency with
// that. There will be no file events for broken symlinks.
// TODO: more specific check; returns os.PathError; ENOENT?
return "", nil
return "", err
}
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
_, alreadyWatching = w.watches.byPath(link)
@@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
name = link
fi, err = os.Lstat(name)
if err != nil {
return "", nil
return "", err
}
}
@@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
if errors.Is(err, unix.EINTR) {
continue
}
return "", err
}
@@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
w.watches.updateDirFlags(name, flags)
if !w.watches.updateDirFlags(name, flags) {
return "", nil
}
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
d := name
if info.linkName != "" {
d = info.linkName
}
if err := w.watchDirectoryFiles(d); err != nil {
return "", err
}
}
@@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
@@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
info, _ := w.watches.byPath(name)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
// Watch file to mimic Linux inotify.
return w.addWatch(name, noteAllEvents, true)
}
// Register events with the queue.
@@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
}
func (w *kqueue) xSupports(op Op) bool {
if runtime.GOOS == "freebsd" {
//return true // Supports everything.
}
//if runtime.GOOS == "freebsd" {
// return true // Supports everything.
//}
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false

View File

@@ -9,12 +9,11 @@ type other struct {
Errors chan error
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
return newBackend(ev, errs)
}
func (w *other) Close() error { return nil }
func (w *other) WatchList() []string { return nil }
func (w *other) Add(name string) error { return nil }

View File

@@ -28,18 +28,16 @@ type readDirChangesW struct {
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
done chan chan<- error
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
func newBackend(ev chan Event, errs chan error) (backend, error) {
return newBufferedBackend(50, ev, errs)
}
var defaultBufferSize = 50
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
func newBackend(ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
quit: make(chan chan<- error, 1),
done: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
@@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool
event := w.newEvent(name, uint32(mask))
event.renamedFrom = renamedFrom
select {
case ch := <-w.quit:
w.quit <- ch
case ch := <-w.done:
w.done <- ch
case w.Events <- event:
}
return true
@@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
case <-w.quit:
return false
}
}
@@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error {
w.closed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
// Send "done" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
w.done <- ch
if err := w.wakeupReader(); err != nil {
return err
}
@@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() {
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
case ch := <-w.done:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {

View File

@@ -244,12 +244,13 @@ var (
// ErrUnsupported is returned by AddWith() when WithOps() specified an
// Unportable event that's not supported on this platform.
//lint:ignore ST1012 not relevant
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
)
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
ev, errs := make(chan Event), make(chan error)
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
@@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) {
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
ev, errs := make(chan Event), make(chan error)
b, err := newBufferedBackend(sz, ev, errs)
ev, errs := make(chan Event, sz), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
}
@@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() }
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
// The order is undefined, and may differ per call. Returns nil if
// [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
// Supports reports if all the listed operations are supported by this platform.

View File

@@ -9,14 +9,14 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
func SetRlimit() {
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
var l syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
if err == nil && l.Cur != l.Max {

View File

@@ -9,8 +9,8 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64

View File

@@ -1,4 +1,4 @@
//go:build !windows && !darwin && !freebsd
//go:build !windows && !darwin && !freebsd && !plan9
package internal
@@ -9,8 +9,8 @@ import (
)
var (
SyscallEACCES = syscall.EACCES
UnixEACCES = unix.EACCES
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64

View File

@@ -10,8 +10,8 @@ import (
// Just a dummy.
var (
SyscallEACCES = errors.New("dummy")
UnixEACCES = errors.New("dummy")
ErrSyscallEACCES = errors.New("dummy")
ErrUnixEACCES = errors.New("dummy")
)
func SetRlimit() {}

64
vendor/github.com/fsnotify/fsnotify/shared.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package fsnotify
import "sync"
type shared struct {
Events chan Event
Errors chan error
done chan struct{}
mu sync.Mutex
}
func newShared(ev chan Event, errs chan error) *shared {
return &shared{
Events: ev,
Errors: errs,
done: make(chan struct{}),
}
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *shared) sendEvent(e Event) bool {
if e.Op == 0 {
return true
}
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *shared) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *shared) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Mark as closed; returns true if it was already closed.
func (w *shared) close() bool {
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
return true
}
close(w.done)
return false
}

3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf generated vendored Normal file
View File

@@ -0,0 +1,3 @@
checks = ['all',
'-U1000', # Don't complain about unused functions.
]

View File

@@ -34,6 +34,6 @@ import (
// Sets are considered equal if and only if the symmetric difference of a and b
// is empty.
// Other comparisons are consistent but not defined.
func Compare(a, b interface{}) int {
func Compare(a, b any) int {
return v1.Compare(a, b)
}

View File

@@ -41,6 +41,6 @@ type ErrorDetails = v1.ErrorDetails
type Error = v1.Error
// NewError returns a new Error object.
func NewError(code string, loc *Location, f string, a ...interface{}) *Error {
func NewError(code string, loc *Location, f string, a ...any) *Error {
return v1.NewError(code, loc, f, a...)
}

View File

@@ -211,7 +211,7 @@ func NewBody(exprs ...*Expr) Body {
}
// NewExpr returns a new Expr object.
func NewExpr(terms interface{}) *Expr {
func NewExpr(terms any) *Expr {
return v1.NewExpr(terms)
}
@@ -222,7 +222,7 @@ func NewBuiltinExpr(terms ...*Term) *Expr {
}
// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified.
func Copy(x interface{}) interface{} {
func Copy(x any) any {
return v1.Copy(x)
}

View File

@@ -13,6 +13,6 @@ import (
// Pretty writes a pretty representation of the AST rooted at x to w.
//
// This is function is intended for debug purposes when inspecting ASTs.
func Pretty(w io.Writer, x interface{}) {
func Pretty(w io.Writer, x any) {
v1.Pretty(w, x)
}

View File

@@ -9,6 +9,6 @@ import (
)
// TypeName returns a human readable name for the AST element type.
func TypeName(x interface{}) string {
func TypeName(x any) string {
return v1.TypeName(x)
}

View File

@@ -30,7 +30,7 @@ func NewLocation(text []byte, file string, row int, col int) *Location {
type Value = v1.Value
// InterfaceToValue converts a native Go value x to a Value.
func InterfaceToValue(x interface{}) (Value, error) {
func InterfaceToValue(x any) (Value, error) {
return v1.InterfaceToValue(x)
}
@@ -40,7 +40,7 @@ func ValueFromReader(r io.Reader) (Value, error) {
}
// As converts v into a Go native type referred to by x.
func As(v Value, x interface{}) error {
func As(v Value, x any) error {
return v1.As(v, x)
}
@@ -62,13 +62,13 @@ func IsUnknownValueErr(err error) bool {
// ValueToInterface returns the Go representation of an AST value. The AST
// value should not contain any values that require evaluation (e.g., vars,
// comprehensions, etc.)
func ValueToInterface(v Value, resolver Resolver) (interface{}, error) {
func ValueToInterface(v Value, resolver Resolver) (any, error) {
return v1.ValueToInterface(v, resolver)
}
// JSON returns the JSON representation of v. The value must not contain any
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
func JSON(v Value) (interface{}, error) {
func JSON(v Value) (any, error) {
return v1.JSON(v)
}
@@ -77,7 +77,7 @@ type JSONOpt = v1.JSONOpt
// JSONWithOpt returns the JSON representation of v. The value must not contain any
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) {
func JSONWithOpt(v Value, opt JSONOpt) (any, error) {
return v1.JSONWithOpt(v, opt)
}
@@ -85,14 +85,14 @@ func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) {
// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If
// the conversion fails, this function will panic. This function is mostly for
// test purposes.
func MustJSON(v Value) interface{} {
func MustJSON(v Value) any {
return v1.MustJSON(v)
}
// MustInterfaceToValue converts a native Go value x to a Value. If the
// conversion fails, this function will panic. This function is mostly for test
// purposes.
func MustInterfaceToValue(x interface{}) Value {
func MustInterfaceToValue(x any) Value {
return v1.MustInterfaceToValue(x)
}
@@ -115,17 +115,17 @@ func IsComprehension(x Value) bool {
}
// ContainsRefs returns true if the Value v contains refs.
func ContainsRefs(v interface{}) bool {
func ContainsRefs(v any) bool {
return v1.ContainsRefs(v)
}
// ContainsComprehensions returns true if the Value v contains comprehensions.
func ContainsComprehensions(v interface{}) bool {
func ContainsComprehensions(v any) bool {
return v1.ContainsComprehensions(v)
}
// ContainsClosures returns true if the Value v contains closures.
func ContainsClosures(v interface{}) bool {
func ContainsClosures(v any) bool {
return v1.ContainsClosures(v)
}
@@ -256,7 +256,7 @@ func ObjectTerm(o ...[2]*Term) *Term {
return v1.ObjectTerm(o...)
}
func LazyObject(blob map[string]interface{}) Object {
func LazyObject(blob map[string]any) Object {
return v1.LazyObject(blob)
}

View File

@@ -16,22 +16,22 @@ type Transformer = v1.Transformer
// Transform iterates the AST and calls the Transform function on the
// Transformer t for x before recursing.
func Transform(t Transformer, x interface{}) (interface{}, error) {
func Transform(t Transformer, x any) (any, error) {
return v1.Transform(t, x)
}
// TransformRefs calls the function f on all references under x.
func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) {
func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) {
return v1.TransformRefs(x, f)
}
// TransformVars calls the function f on all vars under x.
func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) {
func TransformVars(x any, f func(Var) (Value, error)) (any, error) {
return v1.TransformVars(x, f)
}
// TransformComprehensions calls the functio nf on all comprehensions under x.
func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) {
func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) {
return v1.TransformComprehensions(x, f)
}
@@ -41,6 +41,6 @@ type GenericTransformer = v1.GenericTransformer
// NewGenericTransformer returns a new GenericTransformer that will transform
// AST nodes using the function f.
func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer {
func NewGenericTransformer(f func(x any) (any, error)) *GenericTransformer {
return v1.NewGenericTransformer(f)
}

View File

@@ -21,68 +21,68 @@ type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor
// Walk iterates the AST by calling the Visit function on the Visitor
// v for x before recursing.
// Deprecated: use GenericVisitor.Walk
func Walk(v Visitor, x interface{}) {
func Walk(v Visitor, x any) {
v1.Walk(v, x)
}
// WalkBeforeAndAfter iterates the AST by calling the Visit function on the
// Visitor v for x before recursing.
// Deprecated: use GenericVisitor.Walk
func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) {
func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) {
v1.WalkBeforeAndAfter(v, x)
}
// WalkVars calls the function f on all vars under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkVars(x interface{}, f func(Var) bool) {
func WalkVars(x any, f func(Var) bool) {
v1.WalkVars(x, f)
}
// WalkClosures calls the function f on all closures under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkClosures(x interface{}, f func(interface{}) bool) {
func WalkClosures(x any, f func(any) bool) {
v1.WalkClosures(x, f)
}
// WalkRefs calls the function f on all references under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkRefs(x interface{}, f func(Ref) bool) {
func WalkRefs(x any, f func(Ref) bool) {
v1.WalkRefs(x, f)
}
// WalkTerms calls the function f on all terms under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkTerms(x interface{}, f func(*Term) bool) {
func WalkTerms(x any, f func(*Term) bool) {
v1.WalkTerms(x, f)
}
// WalkWiths calls the function f on all with modifiers under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkWiths(x interface{}, f func(*With) bool) {
func WalkWiths(x any, f func(*With) bool) {
v1.WalkWiths(x, f)
}
// WalkExprs calls the function f on all expressions under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkExprs(x interface{}, f func(*Expr) bool) {
func WalkExprs(x any, f func(*Expr) bool) {
v1.WalkExprs(x, f)
}
// WalkBodies calls the function f on all bodies under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkBodies(x interface{}, f func(Body) bool) {
func WalkBodies(x any, f func(Body) bool) {
v1.WalkBodies(x, f)
}
// WalkRules calls the function f on all rules under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkRules(x interface{}, f func(*Rule) bool) {
func WalkRules(x any, f func(*Rule) bool) {
v1.WalkRules(x, f)
}
// WalkNodes calls the function f on all nodes under x. If the function f
// returns true, AST nodes under the last node will not be visited.
func WalkNodes(x interface{}, f func(Node) bool) {
func WalkNodes(x any, f func(Node) bool) {
v1.WalkNodes(x, f)
}
@@ -93,7 +93,7 @@ type GenericVisitor = v1.GenericVisitor
// NewGenericVisitor returns a new GenericVisitor that will invoke the function
// f on AST nodes.
func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor {
func NewGenericVisitor(f func(x any) bool) *GenericVisitor {
return v1.NewGenericVisitor(f)
}
@@ -105,7 +105,7 @@ type BeforeAfterVisitor = v1.BeforeAfterVisitor
// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that
// will invoke the functions before and after AST nodes.
func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor {
func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor {
return v1.NewBeforeAfterVisitor(before, after)
}

View File

@@ -7,6 +7,7 @@ package bundle
import (
"context"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/storage"
v1 "github.com/open-policy-agent/opa/v1/bundle"
)
@@ -70,7 +71,7 @@ func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn s
// ReadBundleMetadataFromStore returns the metadata in the specified bundle.
// If the bundle is not activated, this function will return
// storage NotFound error.
func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) {
func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]any, error) {
return v1.ReadBundleMetadataFromStore(ctx, store, txn, name)
}
@@ -87,7 +88,7 @@ type ActivateOpts = v1.ActivateOpts
// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record
// the manifest in storage. The compiler provided will have had the polices compiled on it.
func Activate(opts *ActivateOpts) error {
return v1.Activate(opts)
return v1.Activate(setActivateDefaultRegoVersion(opts))
}
// DeactivateOpts defines options for the Deactivate API call
@@ -95,7 +96,7 @@ type DeactivateOpts = v1.DeactivateOpts
// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store.
func Deactivate(opts *DeactivateOpts) error {
return v1.Deactivate(opts)
return v1.Deactivate(setDeactivateDefaultRegoVersion(opts))
}
// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location.
@@ -121,3 +122,31 @@ func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn s
func ActivateLegacy(opts *ActivateOpts) error {
return v1.ActivateLegacy(opts)
}
func setActivateDefaultRegoVersion(opts *ActivateOpts) *ActivateOpts {
if opts == nil {
return nil
}
if opts.ParserOptions.RegoVersion == ast.RegoUndefined {
cpy := *opts
cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion
return &cpy
}
return opts
}
func setDeactivateDefaultRegoVersion(opts *DeactivateOpts) *DeactivateOpts {
if opts == nil {
return nil
}
if opts.ParserOptions.RegoVersion == ast.RegoUndefined {
cpy := *opts
cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion
return &cpy
}
return opts
}

File diff suppressed because it is too large Load Diff

View File

@@ -70,7 +70,7 @@ func ParseServicesConfig(opts ServiceOptions) (map[string]rest.Client, error) {
// read from disk (if specified) and overrides will be applied. If no config file is
// specified, the overrides can still be applied to an empty config.
func Load(configFile string, overrides []string, overrideFiles []string) ([]byte, error) {
baseConf := map[string]interface{}{}
baseConf := map[string]any{}
// User specified config file
if configFile != "" {
@@ -88,7 +88,7 @@ func Load(configFile string, overrides []string, overrideFiles []string) ([]byte
}
}
overrideConf := map[string]interface{}{}
overrideConf := map[string]any{}
// User specified a config override via --set
for _, override := range overrides {
@@ -100,7 +100,7 @@ func Load(configFile string, overrides []string, overrideFiles []string) ([]byte
// User specified a config override value via --set-file
for _, override := range overrideFiles {
reader := func(rs []rune) (interface{}, error) {
reader := func(rs []rune) (any, error) {
bytes, err := os.ReadFile(string(rs))
value := strings.TrimSpace(string(bytes))
return value, err
@@ -141,21 +141,21 @@ func subEnvVars(s string) string {
}
// mergeValues will merge source and destination map, preferring values from the source map
func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} {
func mergeValues(dest map[string]any, src map[string]any) map[string]any {
for k, v := range src {
// If the key doesn't exist already, then just set the key to that value
if _, exists := dest[k]; !exists {
dest[k] = v
continue
}
nextMap, ok := v.(map[string]interface{})
nextMap, ok := v.(map[string]any)
// If it isn't another map, overwrite the value
if !ok {
dest[k] = v
continue
}
// Edge case: If the key exists in the destination, but isn't a map
destMap, isMap := dest[k].(map[string]interface{})
destMap, isMap := dest[k].(map[string]any)
// If the source map has a map for this key, prefer it
if !isMap {
dest[k] = v

View File

@@ -8,7 +8,7 @@ import (
// Debug allows printing debug messages.
type Debug interface {
// Printf prints, with a short file:line-number prefix
Printf(format string, args ...interface{})
Printf(format string, args ...any)
// Writer returns the writer being written to, which may be
// `io.Discard` if no debug output is requested.
Writer() io.Writer

View File

@@ -5,25 +5,25 @@
package deepcopy
// DeepCopy performs a recursive deep copy for nested slices/maps and
// returns the copied object. Supports []interface{}
// and map[string]interface{} only
func DeepCopy(val interface{}) interface{} {
// returns the copied object. Supports []any
// and map[string]any only
func DeepCopy(val any) any {
switch val := val.(type) {
case []interface{}:
cpy := make([]interface{}, len(val))
case []any:
cpy := make([]any, len(val))
for i := range cpy {
cpy[i] = DeepCopy(val[i])
}
return cpy
case map[string]interface{}:
case map[string]any:
return Map(val)
default:
return val
}
}
func Map(val map[string]interface{}) map[string]interface{} {
cpy := make(map[string]interface{}, len(val))
func Map(val map[string]any) map[string]any {
cpy := make(map[string]any, len(val))
for k := range val {
cpy[k] = DeepCopy(val[k])
}

View File

@@ -51,7 +51,7 @@ func shiftLower(bit byte, b []byte) byte {
// position of the first byte in the slice.
// This returns the bit that was shifted off the last byte.
func shiftHigher(bit byte, b []byte) byte {
for i := 0; i < len(b); i++ {
for i := range b {
newByte := b[i] << 1
newByte |= bit
bit = (b[i] & 0x80) >> 7

View File

@@ -723,15 +723,17 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) {
return child.Unfold(path[1:])
}
idxt := ast.InternedIntNumberTerm(idx)
// Fall back to looking up the key in e.value.
// Extend the tree if key is present. Error otherwise.
if v, err := x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}); err == nil {
if v, err := x.Find(ast.Ref{idxt}); err == nil {
// TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead?
_, err := e.Delete(ast.InternedIntNumberTerm(idx))
_, err := e.Delete(idxt)
if err != nil {
return nil, err
}
child, err := e.Insert(ast.IntNumberTerm(idx), ast.NewTerm(v))
child, err := e.Insert(idxt, ast.NewTerm(v))
if err != nil {
return nil, err
}

View File

@@ -19,14 +19,12 @@ func FilterFutureImports(imps []*ast.Import) []*ast.Import {
return ret
}
var keywordsTerm = ast.StringTerm("keywords")
// IsAllFutureKeywords returns true if the passed *ast.Import is `future.keywords`
func IsAllFutureKeywords(imp *ast.Import) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 2 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(keywordsTerm)
path[1].Equal(ast.InternedStringTerm("keywords"))
}
// IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}`
@@ -34,7 +32,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool {
path := imp.Path.Value.(ast.Ref)
return len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(keywordsTerm) &&
path[1].Equal(ast.InternedStringTerm("keywords")) &&
path[2].Equal(ast.StringTerm(kw))
}
@@ -42,7 +40,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) {
path := imp.Path.Value.(ast.Ref)
if len(path) == 3 &&
ast.FutureRootDocument.Equal(path[0]) &&
path[1].Equal(keywordsTerm) {
path[1].Equal(ast.InternedStringTerm("keywords")) {
if str, ok := path[2].Value.(ast.String); ok {
return string(str), true
}

View File

@@ -86,12 +86,12 @@ func (dc draftConfigs) GetSchemaURL(draft Draft) string {
return ""
}
func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
func parseSchemaURL(documentNode any) (string, *Draft, error) {
if _, ok := documentNode.(bool); ok {
return "", nil, nil
}
m, ok := documentNode.(map[string]interface{})
m, ok := documentNode.(map[string]any)
if !ok {
return "", nil, errors.New("schema is invalid")
}

View File

@@ -212,7 +212,7 @@ type (
)
// newError takes a ResultError type and sets the type, context, description, details, value, and field
func newError(err ResultError, context *JSONContext, value interface{}, locale locale, details ErrorDetails) {
func newError(err ResultError, context *JSONContext, value any, locale locale, details ErrorDetails) {
var t string
var d string
switch err.(type) {

View File

@@ -14,7 +14,7 @@ type (
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
FormatChecker interface {
// IsFormat checks if input has the correct format
IsFormat(input interface{}) bool
IsFormat(input any) bool
}
// FormatCheckerChain holds the formatters
@@ -174,7 +174,7 @@ func (c *FormatCheckerChain) Has(name string) bool {
// IsFormat will check an input against a FormatChecker with the given name
// to see if it is the correct format
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
func (c *FormatCheckerChain) IsFormat(name string, input any) bool {
lock.RLock()
f, ok := c.formatters[name]
lock.RUnlock()
@@ -188,7 +188,7 @@ func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted e-mail address
func (f EmailFormatChecker) IsFormat(input interface{}) bool {
func (f EmailFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -199,7 +199,7 @@ func (f EmailFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted IPv4-address
func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
func (f IPV4FormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -211,7 +211,7 @@ func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted IPv6=address
func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
func (f IPV6FormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -223,7 +223,7 @@ func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6
func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
func (f DateTimeFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -247,7 +247,7 @@ func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD)
func (f DateFormatChecker) IsFormat(input interface{}) bool {
func (f DateFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -257,7 +257,7 @@ func (f DateFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00)
func (f TimeFormatChecker) IsFormat(input interface{}) bool {
func (f TimeFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -272,7 +272,7 @@ func (f TimeFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986
func (f URIFormatChecker) IsFormat(input interface{}) bool {
func (f URIFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -288,7 +288,7 @@ func (f URIFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986
func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
func (f URIReferenceFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -299,7 +299,7 @@ func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted URI template per RFC6570
func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
func (f URITemplateFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -314,7 +314,7 @@ func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted hostname
func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
func (f HostnameFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -324,7 +324,7 @@ func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted UUID
func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
func (f UUIDFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -334,7 +334,7 @@ func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted regular expression
func (f RegexFormatChecker) IsFormat(input interface{}) bool {
func (f RegexFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -348,7 +348,7 @@ func (f RegexFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901
func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
func (f JSONPointerFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true
@@ -358,7 +358,7 @@ func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
}
// IsFormat checks if input is a correctly formatted relative JSON Pointer
func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
func (f RelativeJSONPointerFormatChecker) IsFormat(input any) bool {
asString, ok := input.(string)
if !ok {
return true

View File

@@ -32,6 +32,6 @@ import (
const internalLogEnabled = false
func internalLog(format string, v ...interface{}) {
func internalLog(format string, v ...any) {
log.Printf(format, v...)
}

View File

@@ -77,8 +77,8 @@ var osFS = osFileSystem(os.Open)
// JSONLoader defines the JSON loader interface
type JSONLoader interface {
JSONSource() interface{}
LoadJSON() (interface{}, error)
JSONSource() any
LoadJSON() (any, error)
JSONReference() (gojsonreference.JsonReference, error)
LoaderFactory() JSONLoaderFactory
}
@@ -130,7 +130,7 @@ type jsonReferenceLoader struct {
source string
}
func (l *jsonReferenceLoader) JSONSource() interface{} {
func (l *jsonReferenceLoader) JSONSource() any {
return l.source
}
@@ -160,7 +160,7 @@ func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader
}
}
func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
func (l *jsonReferenceLoader) LoadJSON() (any, error) {
var err error
@@ -207,7 +207,7 @@ func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
return nil, fmt.Errorf("remote reference loading disabled: %s", reference.String())
}
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
func (l *jsonReferenceLoader) loadFromHTTP(address string) (any, error) {
resp, err := http.Get(address)
if err != nil {
@@ -227,7 +227,7 @@ func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error)
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
}
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
func (l *jsonReferenceLoader) loadFromFile(path string) (any, error) {
f, err := l.fs.Open(path)
if err != nil {
return nil, err
@@ -249,7 +249,7 @@ type jsonStringLoader struct {
source string
}
func (l *jsonStringLoader) JSONSource() interface{} {
func (l *jsonStringLoader) JSONSource() any {
return l.source
}
@@ -266,7 +266,7 @@ func NewStringLoader(source string) JSONLoader {
return &jsonStringLoader{source: source}
}
func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
func (l *jsonStringLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(strings.NewReader(l.JSONSource().(string)))
@@ -278,7 +278,7 @@ type jsonBytesLoader struct {
source []byte
}
func (l *jsonBytesLoader) JSONSource() interface{} {
func (l *jsonBytesLoader) JSONSource() any {
return l.source
}
@@ -295,18 +295,18 @@ func NewBytesLoader(source []byte) JSONLoader {
return &jsonBytesLoader{source: source}
}
func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
func (l *jsonBytesLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(bytes.NewReader(l.JSONSource().([]byte)))
}
// JSON Go (types) loader
// used to load JSONs from the code as maps, interface{}, structs ...
// used to load JSONs from the code as maps, any, structs ...
type jsonGoLoader struct {
source interface{}
source any
}
func (l *jsonGoLoader) JSONSource() interface{} {
func (l *jsonGoLoader) JSONSource() any {
return l.source
}
@@ -319,11 +319,11 @@ func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
}
// NewGoLoader creates a new JSONLoader from a given Go struct
func NewGoLoader(source interface{}) JSONLoader {
func NewGoLoader(source any) JSONLoader {
return &jsonGoLoader{source: source}
}
func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
func (l *jsonGoLoader) LoadJSON() (any, error) {
// convert it to a compliant JSON first to avoid types "mismatches"
@@ -352,11 +352,11 @@ func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
}
func (l *jsonIOLoader) JSONSource() interface{} {
func (l *jsonIOLoader) JSONSource() any {
return l.buf.String()
}
func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
func (l *jsonIOLoader) LoadJSON() (any, error) {
return decodeJSONUsingNumber(l.buf)
}
@@ -369,21 +369,21 @@ func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
}
// JSON raw loader
// In case the JSON is already marshalled to interface{} use this loader
// In case the JSON is already marshalled to any use this loader
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
type jsonRawLoader struct {
source interface{}
source any
}
// NewRawLoader creates a new JSON raw loader for the given source
func NewRawLoader(source interface{}) JSONLoader {
func NewRawLoader(source any) JSONLoader {
return &jsonRawLoader{source: source}
}
func (l *jsonRawLoader) JSONSource() interface{} {
func (l *jsonRawLoader) JSONSource() any {
return l.source
}
func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
func (l *jsonRawLoader) LoadJSON() (any, error) {
return l.source, nil
}
func (l *jsonRawLoader) JSONReference() (gojsonreference.JsonReference, error) {
@@ -393,9 +393,9 @@ func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
func decodeJSONUsingNumber(r io.Reader) (any, error) {
var document interface{}
var document any
decoder := json.NewDecoder(r)
decoder.UseNumber()

View File

@@ -33,7 +33,7 @@ import (
type (
// ErrorDetails is a map of details specific to each error.
// While the values will vary, every error will contain a "field" value
ErrorDetails map[string]interface{}
ErrorDetails map[string]any
// ResultError is the interface that library errors must implement
ResultError interface {
@@ -57,9 +57,9 @@ type (
// DescriptionFormat returns the format for the description in the default text/template format
DescriptionFormat() string
// SetValue sets the value related to the error
SetValue(interface{})
SetValue(any)
// Value returns the value related to the error
Value() interface{}
Value() any
// SetDetails sets the details specific to the error
SetDetails(ErrorDetails)
// Details returns details about the error
@@ -76,7 +76,7 @@ type (
context *JSONContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message
descriptionFormat string // A format for human readable error message
value interface{} // Value given by the JSON file that is the source of the error
value any // Value given by the JSON file that is the source of the error
details ErrorDetails
}
@@ -136,12 +136,12 @@ func (v *ResultErrorFields) DescriptionFormat() string {
}
// SetValue sets the value related to the error
func (v *ResultErrorFields) SetValue(value interface{}) {
func (v *ResultErrorFields) SetValue(value any) {
v.value = value
}
// Value returns the value related to the error
func (v *ResultErrorFields) Value() interface{} {
func (v *ResultErrorFields) Value() any {
return v.value
}
@@ -203,7 +203,7 @@ func (v *Result) AddError(err ResultError, details ErrorDetails) {
v.errors = append(v.errors, err)
}
func (v *Result) addInternalError(err ResultError, context *JSONContext, value interface{}, details ErrorDetails) {
func (v *Result) addInternalError(err ResultError, context *JSONContext, value any, details ErrorDetails) {
newError(err, context, value, Locale, details)
v.errors = append(v.errors, err)
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function

View File

@@ -58,7 +58,7 @@ type Schema struct {
ReferencePool *schemaReferencePool
}
func (d *Schema) parse(document interface{}, draft Draft) error {
func (d *Schema) parse(document any, draft Draft) error {
d.RootSchema = &SubSchema{Property: StringRootSchemaProperty, Draft: &draft}
return d.parseSchema(document, d.RootSchema)
}
@@ -73,7 +73,7 @@ func (d *Schema) SetRootSchemaName(name string) {
// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
// Not much magic involved here, most of the job is to validate the key names and their values,
// then the values are copied into SubSchema struct
func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) error {
func (d *Schema) parseSchema(documentNode any, currentSchema *SubSchema) error {
if currentSchema.Draft == nil {
if currentSchema.Parent == nil {
@@ -90,7 +90,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
}
}
m, isMap := documentNode.(map[string]interface{})
m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.ParseError(),
@@ -146,10 +146,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// definitions
if v, ok := m[KeyDefinitions]; ok {
switch mt := v.(type) {
case map[string]interface{}:
case map[string]any:
for _, dv := range mt {
switch dv.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyDefinitions, Parent: currentSchema}
err := d.parseSchema(dv, newSchema)
if err != nil {
@@ -203,7 +203,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if err != nil {
return err
}
case []interface{}:
case []any:
for _, typeInArray := range t {
s, isString := typeInArray.(string)
if !isString {
@@ -231,7 +231,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
switch v := additionalProperties.(type) {
case bool:
currentSchema.additionalProperties = v
case map[string]interface{}:
case map[string]any:
newSchema := &SubSchema{Property: KeyAdditionalProperties, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.additionalProperties = newSchema
err := d.parseSchema(v, newSchema)
@@ -270,7 +270,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// propertyNames
if propertyNames, found := m[KeyPropertyNames]; found && *currentSchema.Draft >= Draft6 {
switch propertyNames.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyPropertyNames, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.propertyNames = newSchema
err := d.parseSchema(propertyNames, newSchema)
@@ -299,10 +299,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
// items
if items, found := m[KeyItems]; found {
switch i := items.(type) {
case []interface{}:
case []any:
for _, itemElement := range i {
switch itemElement.(type) {
case map[string]interface{}, bool:
case map[string]any, bool:
newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems}
newSchema.Ref = currentSchema.Ref
currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema)
@@ -315,7 +315,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
}
currentSchema.ItemsChildrenIsSingleSchema = false
}
case map[string]interface{}, bool:
case map[string]any, bool:
newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems}
newSchema.Ref = currentSchema.Ref
currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema)
@@ -334,7 +334,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
switch i := additionalItems.(type) {
case bool:
currentSchema.additionalItems = i
case map[string]interface{}:
case map[string]any:
newSchema := &SubSchema{Property: KeyAdditionalItems, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.additionalItems = newSchema
err := d.parseSchema(additionalItems, newSchema)
@@ -717,7 +717,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if vNot, found := m[KeyNot]; found {
switch vNot.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyNot, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema.not = newSchema
err := d.parseSchema(vNot, newSchema)
@@ -735,7 +735,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if *currentSchema.Draft >= Draft7 {
if vIf, found := m[KeyIf]; found {
switch vIf.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyIf, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._if = newSchema
err := d.parseSchema(vIf, newSchema)
@@ -752,7 +752,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if then, found := m[KeyThen]; found {
switch then.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyThen, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._then = newSchema
err := d.parseSchema(then, newSchema)
@@ -769,7 +769,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
if vElse, found := m[KeyElse]; found {
switch vElse.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
newSchema := &SubSchema{Property: KeyElse, Parent: currentSchema, Ref: currentSchema.Ref}
currentSchema._else = newSchema
err := d.parseSchema(vElse, newSchema)
@@ -788,9 +788,9 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema)
return nil
}
func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
func (d *Schema) parseReference(_ any, currentSchema *SubSchema) error {
var (
refdDocumentNode interface{}
refdDocumentNode any
dsp *schemaPoolDocument
err error
)
@@ -809,7 +809,7 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
newSchema.Draft = dsp.Draft
switch refdDocumentNode.(type) {
case bool, map[string]interface{}:
case bool, map[string]any:
// expected
default:
return errors.New(formatErrorDescription(
@@ -829,8 +829,8 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error {
}
func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSchema) error {
m, isMap := documentNode.(map[string]interface{})
func (d *Schema) parseProperties(documentNode any, currentSchema *SubSchema) error {
m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.MustBeOfType(),
@@ -851,19 +851,19 @@ func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSch
return nil
}
func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubSchema) error {
m, isMap := documentNode.(map[string]interface{})
func (d *Schema) parseDependencies(documentNode any, currentSchema *SubSchema) error {
m, isMap := documentNode.(map[string]any)
if !isMap {
return errors.New(formatErrorDescription(
Locale.MustBeOfType(),
ErrorDetails{"key": KeyDependencies, "type": TypeObject},
))
}
currentSchema.dependencies = make(map[string]interface{})
currentSchema.dependencies = make(map[string]any)
for k := range m {
switch values := m[k].(type) {
case []interface{}:
case []any:
var valuesToRegister []string
for _, value := range values {
str, isString := value.(string)
@@ -880,7 +880,7 @@ func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubS
currentSchema.dependencies[k] = valuesToRegister
}
case bool, map[string]interface{}:
case bool, map[string]any:
depSchema := &SubSchema{Property: k, Parent: currentSchema, Ref: currentSchema.Ref}
err := d.parseSchema(m[k], depSchema)
if err != nil {
@@ -913,7 +913,7 @@ func invalidType(expected, given string) error {
))
}
func getString(m map[string]interface{}, key string) (*string, error) {
func getString(m map[string]any, key string) (*string, error) {
v, found := m[key]
if !found {
// not found
@@ -927,13 +927,13 @@ func getString(m map[string]interface{}, key string) (*string, error) {
return &s, nil
}
func getMap(m map[string]interface{}, key string) (map[string]interface{}, error) {
func getMap(m map[string]any, key string) (map[string]any, error) {
v, found := m[key]
if !found {
// not found
return nil, nil
}
s, isMap := v.(map[string]interface{})
s, isMap := v.(map[string]any)
if !isMap {
// wrong type
return nil, invalidType(StringSchema, key)
@@ -941,12 +941,12 @@ func getMap(m map[string]interface{}, key string) (map[string]interface{}, error
return s, nil
}
func getSlice(m map[string]interface{}, key string) ([]interface{}, error) {
func getSlice(m map[string]any, key string) ([]any, error) {
v, found := m[key]
if !found {
return nil, nil
}
s, isArray := v.([]interface{})
s, isArray := v.([]any)
if !isArray {
return nil, errors.New(formatErrorDescription(
Locale.MustBeOfAn(),

View File

@@ -45,7 +45,7 @@ func NewSchemaLoader() *SchemaLoader {
return ps
}
func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
func (sl *SchemaLoader) validateMetaschema(documentNode any) error {
var (
schema string
@@ -158,7 +158,7 @@ func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
d.DocumentReference = ref
d.ReferencePool = newSchemaReferencePool()
var doc interface{}
var doc any
if ref.String() != "" {
// Get document from schema pool
spd, err := d.Pool.GetDocument(d.DocumentReference)

View File

@@ -34,7 +34,7 @@ import (
)
type schemaPoolDocument struct {
Document interface{}
Document any
Draft *Draft
}
@@ -44,7 +44,7 @@ type schemaPool struct {
autoDetect *bool
}
func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
func (p *schemaPool) parseReferences(document any, ref gojsonreference.JsonReference, pooled bool) error {
var (
draft *Draft
@@ -72,7 +72,7 @@ func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.J
return err
}
func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
func (p *schemaPool) parseReferencesRecursive(document any, ref gojsonreference.JsonReference, draft *Draft) error {
// parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
// For $ref references it takes into account the $id scope it is in and replaces
// the reference by the absolute resolved reference
@@ -80,14 +80,14 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre
// When encountering errors it fails silently. Error handling is done when the schema
// is syntactically parsed and any error encountered here should also come up there.
switch m := document.(type) {
case []interface{}:
case []any:
for _, v := range m {
err := p.parseReferencesRecursive(v, ref, draft)
if err != nil {
return err
}
}
case map[string]interface{}:
case map[string]any:
localRef := &ref
keyID := KeyIDNew
@@ -129,7 +129,7 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre
// Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
// Therefore don't treat it like a schema.
if k == KeyProperties || k == KeyDependencies || k == KeyPatternProperties {
if child, ok := v.(map[string]interface{}); ok {
if child, ok := v.(map[string]any); ok {
for _, v := range child {
err := p.parseReferencesRecursive(v, *localRef, draft)
if err != nil {

View File

@@ -28,6 +28,7 @@ package gojsonschema
import (
"errors"
"fmt"
"slices"
"strings"
)
@@ -58,13 +59,7 @@ func (t *jsonSchemaType) Add(etype string) error {
func (t *jsonSchemaType) Contains(etype string) bool {
for _, v := range t.types {
if v == etype {
return true
}
}
return false
return slices.Contains(t.types, etype)
}
func (t *jsonSchemaType) String() string {

View File

@@ -123,8 +123,8 @@ type SubSchema struct {
maxProperties *int
required []string
dependencies map[string]interface{}
additionalProperties interface{}
dependencies map[string]any
additionalProperties any
patternProperties map[string]*SubSchema
propertyNames *SubSchema
@@ -134,7 +134,7 @@ type SubSchema struct {
uniqueItems bool
contains *SubSchema
additionalItems interface{}
additionalItems any
// validation : all
_const *string //const is a golang keyword

View File

@@ -29,18 +29,14 @@ package gojsonschema
import (
"encoding/json"
"math/big"
"slices"
)
func isStringInSlice(s []string, what string) bool {
for i := range s {
if s[i] == what {
return true
}
}
return false
return slices.Contains(s, what)
}
func marshalToJSONString(value interface{}) (*string, error) {
func marshalToJSONString(value any) (*string, error) {
mBytes, err := json.Marshal(value)
if err != nil {
@@ -51,7 +47,7 @@ func marshalToJSONString(value interface{}) (*string, error) {
return &sBytes, nil
}
func marshalWithoutNumber(value interface{}) (*string, error) {
func marshalWithoutNumber(value any) (*string, error) {
// The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
// This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
@@ -63,7 +59,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) {
return nil, err
}
var document interface{}
var document any
err = json.Unmarshal([]byte(*jsonString), &document)
if err != nil {
@@ -73,7 +69,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) {
return marshalToJSONString(document)
}
func isJSONNumber(what interface{}) bool {
func isJSONNumber(what any) bool {
switch what.(type) {
@@ -84,7 +80,7 @@ func isJSONNumber(what interface{}) bool {
return false
}
func checkJSONInteger(what interface{}) (isInt bool) {
func checkJSONInteger(what any) (isInt bool) {
jsonNumber := what.(json.Number)
@@ -100,7 +96,7 @@ const (
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
)
func mustBeInteger(what interface{}) *int {
func mustBeInteger(what any) *int {
number, ok := what.(json.Number)
if !ok {
return nil
@@ -123,7 +119,7 @@ func mustBeInteger(what interface{}) *int {
return &int32Value
}
func mustBeNumber(what interface{}) *big.Rat {
func mustBeNumber(what any) *big.Rat {
number, ok := what.(json.Number)
if !ok {
return nil
@@ -136,11 +132,11 @@ func mustBeNumber(what interface{}) *big.Rat {
return nil
}
func convertDocumentNode(val interface{}) interface{} {
func convertDocumentNode(val any) any {
if lval, ok := val.([]interface{}); ok {
if lval, ok := val.([]any); ok {
res := []interface{}{}
res := []any{}
for _, v := range lval {
res = append(res, convertDocumentNode(v))
}
@@ -149,9 +145,9 @@ func convertDocumentNode(val interface{}) interface{} {
}
if mval, ok := val.(map[interface{}]interface{}); ok {
if mval, ok := val.(map[any]any); ok {
res := map[string]interface{}{}
res := map[string]any{}
for k, v := range mval {
res[k.(string)] = convertDocumentNode(v)

View File

@@ -54,21 +54,21 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) {
return v.validateDocument(root), nil
}
func (v *Schema) validateDocument(root interface{}) *Result {
func (v *Schema) validateDocument(root any) *Result {
result := &Result{}
context := NewJSONContext(StringContextRoot, nil)
v.RootSchema.validateRecursive(v.RootSchema, root, result, context)
return result
}
func (v *SubSchema) subValidateWithContext(document interface{}, context *JSONContext) *Result {
func (v *SubSchema) subValidateWithContext(document any, context *JSONContext) *Result {
result := &Result{}
v.validateRecursive(v, document, result, context)
return result
}
// Walker function to validate the json recursively against the SubSchema
func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateRecursive %s", context.String())
@@ -167,7 +167,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
return
}
castCurrentNode := currentNode.([]interface{})
castCurrentNode := currentNode.([]any)
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
@@ -190,9 +190,9 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
return
}
castCurrentNode, ok := currentNode.(map[string]interface{})
castCurrentNode, ok := currentNode.(map[string]any)
if !ok {
castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
castCurrentNode = convertDocumentNode(currentNode).(map[string]any)
}
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
@@ -264,7 +264,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i
}
// Different kinds of validation there, SubSchema / common / array / object / string...
func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateSchema %s", context.String())
@@ -349,14 +349,14 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte
}
if len(currentSubSchema.dependencies) > 0 {
if currentNodeMap, ok := currentNode.(map[string]interface{}); ok {
if currentNodeMap, ok := currentNode.(map[string]any); ok {
for elementKey := range currentNodeMap {
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
switch dependency := dependency.(type) {
case []string:
for _, dependOnKey := range dependency {
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
if _, dependencyResolved := currentNode.(map[string]any)[dependOnKey]; !dependencyResolved {
result.addInternalError(
new(MissingDependencyError),
context,
@@ -395,7 +395,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte
result.incrementScore()
}
func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateCommon %s", context.String())
@@ -452,7 +452,7 @@ func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{
result.incrementScore()
}
func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateArray %s", context.String())
@@ -578,7 +578,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface
result.incrementScore()
}
func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]any, result *Result, context *JSONContext) {
if internalLogEnabled {
internalLog("validateObject %s", context.String())
@@ -675,7 +675,7 @@ func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string
result.incrementScore()
}
func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value interface{}, result *Result, context *JSONContext) bool {
func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value any, result *Result, context *JSONContext) bool {
if internalLogEnabled {
internalLog("validatePatternProperty %s", context.String())
@@ -701,7 +701,7 @@ func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key str
return true
}
func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateString(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
// Ignore JSON numbers
stringValue, isString := value.(string)
@@ -752,7 +752,7 @@ func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{
result.incrementScore()
}
func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) {
func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) {
// Ignore non numbers
number, isNumber := value.(json.Number)

View File

@@ -1,121 +0,0 @@
# This file defines all the implicitly declared types that are required by the graphql spec. It is implicitly included by calls to LoadSchema
"The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1."
scalar Int
"The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)."
scalar Float
"The `String`scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text."
scalar String
"The `Boolean` scalar type represents `true` or `false`."
scalar Boolean
"""The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as "4") or integer (such as 4) input value will be accepted as an ID."""
scalar ID
"The @include directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional inclusion during execution as described by the if argument."
directive @include(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"The @skip directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional exclusion during execution as described by the if argument."
directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"The @deprecated built-in directive is used within the type system definition language to indicate deprecated portions of a GraphQL service's schema, such as deprecated fields on a type, arguments on a field, input fields on an input type, or values of an enum type."
directive @deprecated(reason: String = "No longer supported") on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
"The @specifiedBy built-in directive is used within the type system definition language to provide a scalar specification URL for specifying the behavior of custom scalar types."
directive @specifiedBy(url: String!) on SCALAR
type __Schema {
description: String
types: [__Type!]!
queryType: __Type!
mutationType: __Type
subscriptionType: __Type
directives: [__Directive!]!
}
type __Type {
kind: __TypeKind!
name: String
description: String
# must be non-null for OBJECT and INTERFACE, otherwise null.
fields(includeDeprecated: Boolean = false): [__Field!]
# must be non-null for OBJECT and INTERFACE, otherwise null.
interfaces: [__Type!]
# must be non-null for INTERFACE and UNION, otherwise null.
possibleTypes: [__Type!]
# must be non-null for ENUM, otherwise null.
enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
# must be non-null for INPUT_OBJECT, otherwise null.
inputFields: [__InputValue!]
# must be non-null for NON_NULL and LIST, otherwise null.
ofType: __Type
# may be non-null for custom SCALAR, otherwise null.
specifiedByURL: String
}
type __Field {
name: String!
description: String
args: [__InputValue!]!
type: __Type!
isDeprecated: Boolean!
deprecationReason: String
}
type __InputValue {
name: String!
description: String
type: __Type!
defaultValue: String
}
type __EnumValue {
name: String!
description: String
isDeprecated: Boolean!
deprecationReason: String
}
enum __TypeKind {
SCALAR
OBJECT
INTERFACE
UNION
ENUM
INPUT_OBJECT
LIST
NON_NULL
}
type __Directive {
name: String!
description: String
locations: [__DirectiveLocation!]!
args: [__InputValue!]!
isRepeatable: Boolean!
}
enum __DirectiveLocation {
QUERY
MUTATION
SUBSCRIPTION
FIELD
FRAGMENT_DEFINITION
FRAGMENT_SPREAD
INLINE_FRAGMENT
VARIABLE_DEFINITION
SCHEMA
SCALAR
OBJECT
FIELD_DEFINITION
ARGUMENT_DEFINITION
INTERFACE
UNION
ENUM
ENUM_VALUE
INPUT_OBJECT
INPUT_FIELD_DEFINITION
}

View File

@@ -1,59 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) {
// A GraphQL field is only valid if all supplied arguments are defined by that field.
observers.OnField(func(_ *Walker, field *ast.Field) {
if field.Definition == nil || field.ObjectDefinition == nil {
return
}
for _, arg := range field.Arguments {
def := field.Definition.Arguments.ForName(arg.Name)
if def != nil {
continue
}
var suggestions []string
for _, argDef := range field.Definition.Arguments {
suggestions = append(suggestions, argDef.Name)
}
addError(
Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name),
SuggestListQuoted("Did you mean", arg.Name, suggestions),
At(field.Position),
)
}
})
observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
if directive.Definition == nil {
return
}
for _, arg := range directive.Arguments {
def := directive.Definition.Arguments.ForName(arg.Name)
if def != nil {
continue
}
var suggestions []string
for _, argDef := range directive.Definition.Arguments {
suggestions = append(suggestions, argDef.Name)
}
addError(
Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name),
SuggestListQuoted("Did you mean", arg.Name, suggestions),
At(directive.Position),
)
}
})
})
}

View File

@@ -1,21 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) {
observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) {
if fragmentSpread.Definition == nil {
addError(
Message(`Unknown fragment "%s".`, fragmentSpread.Name),
At(fragmentSpread.Position),
)
}
})
})
}

View File

@@ -1,61 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("KnownTypeNames", func(observers *Events, addError AddErrFunc) {
observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) {
typeName := variable.Type.Name()
typdef := walker.Schema.Types[typeName]
if typdef != nil {
return
}
addError(
Message(`Unknown type "%s".`, typeName),
At(variable.Position),
)
})
observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) {
typedName := inlineFragment.TypeCondition
if typedName == "" {
return
}
def := walker.Schema.Types[typedName]
if def != nil {
return
}
addError(
Message(`Unknown type "%s".`, typedName),
At(inlineFragment.Position),
)
})
observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
typeName := fragment.TypeCondition
def := walker.Schema.Types[typeName]
if def != nil {
return
}
var possibleTypes []string
for _, t := range walker.Schema.Types {
possibleTypes = append(possibleTypes, t.Name)
}
addError(
Message(`Unknown type "%s".`, typeName),
SuggestListQuoted("Did you mean", typeName, possibleTypes),
At(fragment.Position),
)
})
})
}

View File

@@ -1,21 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("LoneAnonymousOperation", func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
if operation.Name == "" && len(walker.Document.Operations) > 1 {
addError(
Message(`This anonymous operation must be the only defined operation.`),
At(operation.Position),
)
}
})
})
}

View File

@@ -1,32 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("NoUnusedFragments", func(observers *Events, addError AddErrFunc) {
inFragmentDefinition := false
fragmentNameUsed := make(map[string]bool)
observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) {
if !inFragmentDefinition {
fragmentNameUsed[fragmentSpread.Name] = true
}
})
observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) {
inFragmentDefinition = true
if !fragmentNameUsed[fragment.Name] {
addError(
Message(`Fragment "%s" is never used.`, fragment.Name),
At(fragment.Position),
)
}
})
})
}

View File

@@ -1,32 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
for _, varDef := range operation.VariableDefinitions {
if varDef.Used {
continue
}
if operation.Name != "" {
addError(
Message(`Variable "$%s" is never used in operation "%s".`, varDef.Variable, operation.Name),
At(varDef.Position),
)
} else {
addError(
Message(`Variable "$%s" is never used.`, varDef.Variable),
At(varDef.Position),
)
}
}
})
})
}

View File

@@ -1,35 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) {
observers.OnField(func(_ *Walker, field *ast.Field) {
checkUniqueArgs(field.Arguments, addError)
})
observers.OnDirective(func(_ *Walker, directive *ast.Directive) {
checkUniqueArgs(directive.Arguments, addError)
})
})
}
func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) {
knownArgNames := map[string]int{}
for _, arg := range args {
if knownArgNames[arg.Name] == 1 {
addError(
Message(`There can be only one argument named "%s".`, arg.Name),
At(arg.Position),
)
}
knownArgNames[arg.Name]++
}
}

View File

@@ -1,26 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) {
observers.OnDirectiveList(func(_ *Walker, directives []*ast.Directive) {
seen := map[string]bool{}
for _, dir := range directives {
if dir.Name != "repeatable" && seen[dir.Name] {
addError(
Message(`The directive "@%s" can only be used once at this location.`, dir.Name),
At(dir.Position),
)
}
seen[dir.Name] = true
}
})
})
}

View File

@@ -1,24 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) {
seenFragments := map[string]bool{}
observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) {
if seenFragments[fragment.Name] {
addError(
Message(`There can be only one fragment named "%s".`, fragment.Name),
At(fragment.Position),
)
}
seenFragments[fragment.Name] = true
})
})
}

View File

@@ -1,29 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(_ *Walker, value *ast.Value) {
if value.Kind != ast.ObjectValue {
return
}
seen := map[string]bool{}
for _, field := range value.Children {
if seen[field.Name] {
addError(
Message(`There can be only one input field named "%s".`, field.Name),
At(field.Position),
)
}
seen[field.Name] = true
}
})
})
}

View File

@@ -1,24 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) {
seen := map[string]bool{}
observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
if seen[operation.Name] {
addError(
Message(`There can be only one operation named "%s".`, operation.Name),
At(operation.Position),
)
}
seen[operation.Name] = true
})
})
}

View File

@@ -1,26 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
seen := map[string]int{}
for _, def := range operation.VariableDefinitions {
// add the same error only once per a variable.
if seen[def.Variable] == 1 {
addError(
Message(`There can be only one variable named "$%s".`, def.Variable),
At(def.Position),
)
}
seen[def.Variable]++
}
})
})
}

View File

@@ -1,168 +0,0 @@
package validator
import (
"errors"
"fmt"
"strconv"
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(_ *Walker, value *ast.Value) {
if value.Definition == nil || value.ExpectedType == nil {
return
}
if value.Kind == ast.NullValue && value.ExpectedType.NonNull {
addError(
Message(`Expected value of type "%s", found %s.`, value.ExpectedType.String(), value.String()),
At(value.Position),
)
}
if value.Definition.Kind == ast.Scalar {
// Skip custom validating scalars
if !value.Definition.OneOf("Int", "Float", "String", "Boolean", "ID") {
return
}
}
var possibleEnums []string
if value.Definition.Kind == ast.Enum {
for _, val := range value.Definition.EnumValues {
possibleEnums = append(possibleEnums, val.Name)
}
}
rawVal, err := value.Value(nil)
if err != nil {
unexpectedTypeMessage(addError, value)
}
switch value.Kind {
case ast.NullValue:
return
case ast.ListValue:
if value.ExpectedType.Elem == nil {
unexpectedTypeMessage(addError, value)
return
}
case ast.IntValue:
if !value.Definition.OneOf("Int", "Float", "ID") {
unexpectedTypeMessage(addError, value)
}
case ast.FloatValue:
if !value.Definition.OneOf("Float") {
unexpectedTypeMessage(addError, value)
}
case ast.StringValue, ast.BlockValue:
if value.Definition.Kind == ast.Enum {
rawValStr := fmt.Sprint(rawVal)
addError(
Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()),
SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
At(value.Position),
)
} else if !value.Definition.OneOf("String", "ID") {
unexpectedTypeMessage(addError, value)
}
case ast.EnumValue:
if value.Definition.Kind != ast.Enum {
rawValStr := fmt.Sprint(rawVal)
addError(
unexpectedTypeMessageOnly(value),
SuggestListUnquoted("Did you mean the enum value", rawValStr, possibleEnums),
At(value.Position),
)
} else if value.Definition.EnumValues.ForName(value.Raw) == nil {
rawValStr := fmt.Sprint(rawVal)
addError(
Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()),
SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums),
At(value.Position),
)
}
case ast.BooleanValue:
if !value.Definition.OneOf("Boolean") {
unexpectedTypeMessage(addError, value)
}
case ast.ObjectValue:
for _, field := range value.Definition.Fields {
if field.Type.NonNull {
fieldValue := value.Children.ForName(field.Name)
if fieldValue == nil && field.DefaultValue == nil {
addError(
Message(`Field "%s.%s" of required type "%s" was not provided.`, value.Definition.Name, field.Name, field.Type.String()),
At(value.Position),
)
continue
}
}
}
for _, fieldValue := range value.Children {
if value.Definition.Fields.ForName(fieldValue.Name) == nil {
var suggestions []string
for _, fieldValue := range value.Definition.Fields {
suggestions = append(suggestions, fieldValue.Name)
}
addError(
Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name),
SuggestListQuoted("Did you mean", fieldValue.Name, suggestions),
At(fieldValue.Position),
)
}
}
case ast.Variable:
return
default:
panic(fmt.Errorf("unhandled %T", value))
}
})
})
}
func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) {
addError(
unexpectedTypeMessageOnly(v),
At(v.Position),
)
}
func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption {
switch v.ExpectedType.String() {
case "Int", "Int!":
if _, err := strconv.ParseInt(v.Raw, 10, 32); err != nil && errors.Is(err, strconv.ErrRange) {
return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String())
}
return Message(`Int cannot represent non-integer value: %s`, v.String())
case "String", "String!", "[String]":
return Message(`String cannot represent a non string value: %s`, v.String())
case "Boolean", "Boolean!":
return Message(`Boolean cannot represent a non boolean value: %s`, v.String())
case "Float", "Float!":
return Message(`Float cannot represent non numeric value: %s`, v.String())
case "ID", "ID!":
return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String())
default:
if v.Definition.Kind == ast.Enum {
return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String())
}
return Message(`Expected value of type "%s", found %s.`, v.ExpectedType.String(), v.String())
}
}

View File

@@ -1,30 +0,0 @@
package validator
import (
"github.com/open-policy-agent/opa/internal/gqlparser/ast"
//nolint:revive // Validator rules each use dot imports for convenience.
. "github.com/open-policy-agent/opa/internal/gqlparser/validator"
)
func init() {
AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) {
for _, def := range operation.VariableDefinitions {
if def.Definition == nil {
continue
}
if !def.Definition.IsInputType() {
addError(
Message(
`Variable "$%s" cannot be non-input type "%s".`,
def.Variable,
def.Type.String(),
),
At(def.Position),
)
}
}
})
})
}

View File

@@ -1,45 +0,0 @@
package validator
import (
//nolint:revive
. "github.com/open-policy-agent/opa/internal/gqlparser/ast"
"github.com/open-policy-agent/opa/internal/gqlparser/gqlerror"
)
type AddErrFunc func(options ...ErrorOption)
type ruleFunc func(observers *Events, addError AddErrFunc)
type rule struct {
name string
rule ruleFunc
}
var rules []rule
// addRule to rule set.
// f is called once each time `Validate` is executed.
func AddRule(name string, f ruleFunc) {
rules = append(rules, rule{name: name, rule: f})
}
func Validate(schema *Schema, doc *QueryDocument) gqlerror.List {
var errs gqlerror.List
observers := &Events{}
for i := range rules {
rule := rules[i]
rule.rule(observers, func(options ...ErrorOption) {
err := &gqlerror.Error{
Rule: rule.name,
}
for _, o := range options {
o(err)
}
errs = append(errs, err)
})
}
Walk(schema, doc, observers)
return errs
}

View File

@@ -21,7 +21,7 @@ const (
// Accept is used when conversion from values given by
// outside sources (such as JSON payloads) is required
func (keyType *KeyType) Accept(value interface{}) error {
func (keyType *KeyType) Accept(value any) error {
var tmp KeyType
switch x := value.(type) {
case string:

View File

@@ -32,7 +32,7 @@ const (
// Accept is used when conversion from values given by
// outside sources (such as JSON payloads) is required
func (signature *SignatureAlgorithm) Accept(value interface{}) error {
func (signature *SignatureAlgorithm) Accept(value any) error {
var tmp SignatureAlgorithm
switch x := value.(type) {
case string:

View File

@@ -39,12 +39,12 @@ func newECDSAPrivateKey(key *ecdsa.PrivateKey) (*ECDSAPrivateKey, error) {
}
// Materialize returns the EC-DSA public key represented by this JWK
func (k ECDSAPublicKey) Materialize() (interface{}, error) {
func (k ECDSAPublicKey) Materialize() (any, error) {
return k.key, nil
}
// Materialize returns the EC-DSA private key represented by this JWK
func (k ECDSAPrivateKey) Materialize() (interface{}, error) {
func (k ECDSAPrivateKey) Materialize() (any, error) {
return k.key, nil
}

View File

@@ -18,15 +18,15 @@ const (
// Headers provides a common interface to all future possible headers
type Headers interface {
Get(string) (interface{}, bool)
Set(string, interface{}) error
Walk(func(string, interface{}) error) error
Get(string) (any, bool)
Set(string, any) error
Walk(func(string, any) error) error
GetAlgorithm() jwa.SignatureAlgorithm
GetKeyID() string
GetKeyOps() KeyOperationList
GetKeyType() jwa.KeyType
GetKeyUsage() string
GetPrivateParams() map[string]interface{}
GetPrivateParams() map[string]any
}
// StandardHeaders stores the common JWK parameters
@@ -36,7 +36,7 @@ type StandardHeaders struct {
KeyOps KeyOperationList `json:"key_ops,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.3
KeyType jwa.KeyType `json:"kty,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.1
KeyUsage string `json:"use,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.2
PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
PrivateParams map[string]any `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
}
// GetAlgorithm is a convenience function to retrieve the corresponding value stored in the StandardHeaders
@@ -68,12 +68,12 @@ func (h *StandardHeaders) GetKeyUsage() string {
}
// GetPrivateParams is a convenience function to retrieve the corresponding value stored in the StandardHeaders
func (h *StandardHeaders) GetPrivateParams() map[string]interface{} {
func (h *StandardHeaders) GetPrivateParams() map[string]any {
return h.PrivateParams
}
// Get is a general getter function for JWK StandardHeaders structure
func (h *StandardHeaders) Get(name string) (interface{}, bool) {
func (h *StandardHeaders) Get(name string) (any, bool) {
switch name {
case AlgorithmKey:
alg := h.GetAlgorithm()
@@ -117,7 +117,7 @@ func (h *StandardHeaders) Get(name string) (interface{}, bool) {
}
// Set is a general getter function for JWK StandardHeaders structure
func (h *StandardHeaders) Set(name string, value interface{}) error {
func (h *StandardHeaders) Set(name string, value any) error {
switch name {
case AlgorithmKey:
var acceptor jwa.SignatureAlgorithm
@@ -149,7 +149,7 @@ func (h *StandardHeaders) Set(name string, value interface{}) error {
}
return fmt.Errorf("invalid value for %s key: %T", KeyUsageKey, value)
case PrivateParamsKey:
if v, ok := value.(map[string]interface{}); ok {
if v, ok := value.(map[string]any); ok {
h.PrivateParams = v
return nil
}
@@ -160,7 +160,7 @@ func (h *StandardHeaders) Set(name string, value interface{}) error {
}
// Walk iterates over all JWK standard headers fields while applying a function to its value.
func (h StandardHeaders) Walk(f func(string, interface{}) error) error {
func (h StandardHeaders) Walk(f func(string, any) error) error {
for _, key := range []string{AlgorithmKey, KeyIDKey, KeyOpsKey, KeyTypeKey, KeyUsageKey, PrivateParamsKey} {
if v, ok := h.Get(key); ok {
if err := f(key, v); err != nil {

View File

@@ -24,7 +24,7 @@ type Key interface {
// RSA types would create *rsa.PublicKey or *rsa.PrivateKey,
// EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey,
// and OctetSeq types create a []byte key.
Materialize() (interface{}, error)
Materialize() (any, error)
GenerateKey(*RawKeyJSON) error
}

View File

@@ -15,7 +15,7 @@ import (
// For rsa key types *rsa.PublicKey is returned; for ecdsa key types *ecdsa.PublicKey;
// for byte slice (raw) keys, the key itself is returned. If the corresponding
// public key cannot be deduced, an error is returned
func GetPublicKey(key interface{}) (interface{}, error) {
func GetPublicKey(key any) (any, error) {
if key == nil {
return nil, errors.New("jwk.New requires a non-nil key")
}
@@ -23,7 +23,7 @@ func GetPublicKey(key interface{}) (interface{}, error) {
switch v := key.(type) {
// Mental note: although Public() is defined in both types,
// you can not coalesce the clauses for rsa.PrivateKey and
// ecdsa.PrivateKey, as then `v` becomes interface{}
// ecdsa.PrivateKey, as then `v` becomes any
// b/c the compiler cannot deduce the exact type.
case *rsa.PrivateKey:
return v.Public(), nil
@@ -37,7 +37,7 @@ func GetPublicKey(key interface{}) (interface{}, error) {
}
// GetKeyTypeFromKey creates a jwk.Key from the given key.
func GetKeyTypeFromKey(key interface{}) jwa.KeyType {
func GetKeyTypeFromKey(key any) jwa.KeyType {
switch key.(type) {
case *rsa.PrivateKey, *rsa.PublicKey:
@@ -52,7 +52,7 @@ func GetKeyTypeFromKey(key interface{}) jwa.KeyType {
}
// New creates a jwk.Key from the given key.
func New(key interface{}) (Key, error) {
func New(key any) (Key, error) {
if key == nil {
return nil, errors.New("jwk.New requires a non-nil key")
}

View File

@@ -39,7 +39,7 @@ const (
)
// Accept determines if Key Operation is valid
func (keyOperationList *KeyOperationList) Accept(v interface{}) error {
func (keyOperationList *KeyOperationList) Accept(v any) error {
switch x := v.(type) {
case KeyOperationList:
*keyOperationList = x

View File

@@ -65,7 +65,7 @@ func newRSAPrivateKey(key *rsa.PrivateKey) (*RSAPrivateKey, error) {
}
// Materialize returns the standard RSA Public Key representation stored in the internal representation
func (k *RSAPublicKey) Materialize() (interface{}, error) {
func (k *RSAPublicKey) Materialize() (any, error) {
if k.key == nil {
return nil, errors.New("key has no rsa.PublicKey associated with it")
}
@@ -73,7 +73,7 @@ func (k *RSAPublicKey) Materialize() (interface{}, error) {
}
// Materialize returns the standard RSA Private Key representation stored in the internal representation
func (k *RSAPrivateKey) Materialize() (interface{}, error) {
func (k *RSAPrivateKey) Materialize() (any, error) {
if k.key == nil {
return nil, errors.New("key has no rsa.PrivateKey associated with it")
}

View File

@@ -21,7 +21,7 @@ func newSymmetricKey(key []byte) (*SymmetricKey, error) {
// Materialize returns the octets for this symmetric key.
// Since this is a symmetric key, this just calls Octets
func (s SymmetricKey) Materialize() (interface{}, error) {
func (s SymmetricKey) Materialize() (any, error) {
return s.Octets(), nil
}

View File

@@ -20,8 +20,8 @@ const (
// Headers provides a common interface for common header parameters
type Headers interface {
Get(string) (interface{}, bool)
Set(string, interface{}) error
Get(string) (any, bool)
Set(string, any) error
GetAlgorithm() jwa.SignatureAlgorithm
}
@@ -33,7 +33,7 @@ type StandardHeaders struct {
JWK string `json:"jwk,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.3
JWKSetURL string `json:"jku,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.2
KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
PrivateParams map[string]any `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
Type string `json:"typ,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
}
@@ -43,7 +43,7 @@ func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm {
}
// Get is a general getter function for StandardHeaders structure
func (h *StandardHeaders) Get(name string) (interface{}, bool) {
func (h *StandardHeaders) Get(name string) (any, bool) {
switch name {
case AlgorithmKey:
v := h.Algorithm
@@ -99,7 +99,7 @@ func (h *StandardHeaders) Get(name string) (interface{}, bool) {
}
// Set is a general setter function for StandardHeaders structure
func (h *StandardHeaders) Set(name string, value interface{}) error {
func (h *StandardHeaders) Set(name string, value any) error {
switch name {
case AlgorithmKey:
if err := h.Algorithm.Accept(value); err != nil {
@@ -137,7 +137,7 @@ func (h *StandardHeaders) Set(name string, value interface{}) error {
}
return fmt.Errorf("invalid value for %s key: %T", KeyIDKey, value)
case PrivateParamsKey:
if v, ok := value.(map[string]interface{}); ok {
if v, ok := value.(map[string]any); ok {
h.PrivateParams = v
return nil
}

View File

@@ -38,7 +38,7 @@ import (
// SignLiteral generates a Signature for the given Payload and Headers, and serializes
// it in compact serialization format. In this format you may NOT use
// multiple signers.
func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key interface{}, hdrBuf []byte, rnd io.Reader) ([]byte, error) {
func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key any, hdrBuf []byte, rnd io.Reader) ([]byte, error) {
encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
signingInput := strings.Join(
@@ -77,7 +77,7 @@ func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key interface{}, hd
// multiple signers.
//
// If you would like to pass custom Headers, use the WithHeaders option.
func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key interface{}) ([]byte, error) {
func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key any) ([]byte, error) {
var headers Headers = &StandardHeaders{}
err := headers.Set(AlgorithmKey, alg)
@@ -99,7 +99,7 @@ func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key interface{})
// Payload that was signed is returned. If you need more fine-grained
// control of the verification process, manually call `Parse`, generate a
// verifier, and call `Verify` on the parsed JWS message object.
func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte, err error) {
func Verify(buf []byte, alg jwa.SignatureAlgorithm, key any) (ret []byte, err error) {
verifier, err := verify.New(alg)
if err != nil {

View File

@@ -72,7 +72,7 @@ func (s ECDSASigner) Algorithm() jwa.SignatureAlgorithm {
// SignWithRand signs payload with a ECDSA private key and a provided randomness
// source (such as `rand.Reader`).
func (s ECDSASigner) SignWithRand(payload []byte, key interface{}, r io.Reader) ([]byte, error) {
func (s ECDSASigner) SignWithRand(payload []byte, key any, r io.Reader) ([]byte, error) {
if key == nil {
return nil, errors.New("missing private key while signing payload")
}
@@ -85,6 +85,6 @@ func (s ECDSASigner) SignWithRand(payload []byte, key interface{}, r io.Reader)
}
// Sign signs payload with a ECDSA private key
func (s ECDSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
func (s ECDSASigner) Sign(payload []byte, key any) ([]byte, error) {
return s.SignWithRand(payload, key, rand.Reader)
}

View File

@@ -52,7 +52,7 @@ func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm {
}
// Sign signs payload with a Symmetric key
func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
func (s HMACSigner) Sign(payload []byte, key any) ([]byte, error) {
hmackey, ok := key.([]byte)
if !ok {
return nil, fmt.Errorf(`invalid key type %T. []byte is required`, key)

View File

@@ -16,7 +16,7 @@ type Signer interface {
// for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
// `*"crypto/rsa".PrivateKey` type.
// Check the documentation for each signer for details
Sign(payload []byte, key interface{}) ([]byte, error)
Sign(payload []byte, key any) ([]byte, error)
Algorithm() jwa.SignatureAlgorithm
}

View File

@@ -84,7 +84,7 @@ func (s RSASigner) Algorithm() jwa.SignatureAlgorithm {
// Sign creates a signature using crypto/rsa. key must be a non-nil instance of
// `*"crypto/rsa".PrivateKey`.
func (s RSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
func (s RSASigner) Sign(payload []byte, key any) ([]byte, error) {
if key == nil {
return nil, errors.New(`missing private key while signing payload`)
}

View File

@@ -26,7 +26,7 @@ func New(alg jwa.SignatureAlgorithm) (Signer, error) {
// GetSigningKey returns a *rsa.PrivateKey or *ecdsa.PrivateKey typically encoded in PEM blocks of type "RSA PRIVATE KEY"
// or "EC PRIVATE KEY" for RSA and ECDSA family of algorithms.
// For HMAC family, it return a []byte value
func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (any, error) {
switch alg {
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
block, _ := pem.Decode([]byte(key))

View File

@@ -54,7 +54,7 @@ func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSAVerifier, error) {
}
// Verify checks whether the signature for a given input and key is correct
func (v ECDSAVerifier) Verify(payload []byte, signature []byte, key interface{}) error {
func (v ECDSAVerifier) Verify(payload []byte, signature []byte, key any) error {
if key == nil {
return errors.New(`missing public key while verifying payload`)
}

View File

@@ -19,7 +19,7 @@ func newHMAC(alg jwa.SignatureAlgorithm) (*HMACVerifier, error) {
}
// Verify checks whether the signature for a given input and key is correct
func (v HMACVerifier) Verify(signingInput, signature []byte, key interface{}) (err error) {
func (v HMACVerifier) Verify(signingInput, signature []byte, key any) (err error) {
expected, err := v.signer.Sign(signingInput, key)
if err != nil {

View File

@@ -16,7 +16,7 @@ type Verifier interface {
// for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
// `*"crypto/rsa".PublicKey` type.
// Check the documentation for each verifier for details
Verify(payload []byte, signature []byte, key interface{}) error
Verify(payload []byte, signature []byte, key any) error
}
type rsaVerifyFunc func([]byte, []byte, *rsa.PublicKey) error

View File

@@ -75,7 +75,7 @@ func newRSA(alg jwa.SignatureAlgorithm) (*RSAVerifier, error) {
}
// Verify checks if a JWS is valid.
func (v RSAVerifier) Verify(payload, signature []byte, key interface{}) error {
func (v RSAVerifier) Verify(payload, signature []byte, key any) error {
if key == nil {
return errors.New(`missing public key while verifying payload`)
}

View File

@@ -29,7 +29,7 @@ func New(alg jwa.SignatureAlgorithm) (Verifier, error) {
// GetSigningKey returns a *rsa.PublicKey or *ecdsa.PublicKey typically encoded in PEM blocks of type "PUBLIC KEY",
// for RSA and ECDSA family of algorithms.
// For HMAC family, it return a []byte value
func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (any, error) {
switch alg {
case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512:
block, _ := pem.Decode([]byte(key))

View File

@@ -8,7 +8,7 @@ package merge
// InterfaceMaps returns the result of merging a and b. If a and b cannot be
// merged because of conflicting key-value pairs, ok is false.
func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[string]interface{}, bool) {
func InterfaceMaps(a map[string]any, b map[string]any) (map[string]any, bool) {
if a == nil {
return b, true
@@ -21,7 +21,7 @@ func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[stri
return merge(a, b), true
}
func merge(a, b map[string]interface{}) map[string]interface{} {
func merge(a, b map[string]any) map[string]any {
for k := range b {
@@ -32,8 +32,8 @@ func merge(a, b map[string]interface{}) map[string]interface{} {
continue
}
existObj := exist.(map[string]interface{})
addObj := add.(map[string]interface{})
existObj := exist.(map[string]any)
addObj := add.(map[string]any)
a[k] = merge(existObj, addObj)
}
@@ -41,7 +41,7 @@ func merge(a, b map[string]interface{}) map[string]interface{} {
return a
}
func hasConflicts(a, b map[string]interface{}) bool {
func hasConflicts(a, b map[string]any) bool {
for k := range b {
add := b[k]
@@ -50,8 +50,8 @@ func hasConflicts(a, b map[string]interface{}) bool {
continue
}
existObj, existOk := exist.(map[string]interface{})
addObj, addOk := add.(map[string]interface{})
existObj, existOk := exist.(map[string]any)
addObj, addOk := add.(map[string]any)
if !existOk || !addOk {
return true
}

View File

@@ -51,10 +51,10 @@ type Planner struct {
// debugf prepends the planner location. We're passing callstack depth 2 because
// it should still log the file location of p.debugf.
func (p *Planner) debugf(format string, args ...interface{}) {
func (p *Planner) debugf(format string, args ...any) {
var msg string
if p.loc != nil {
msg = fmt.Sprintf("%s: "+format, append([]interface{}{p.loc}, args...)...)
msg = fmt.Sprintf("%s: "+format, append([]any{p.loc}, args...)...)
} else {
msg = fmt.Sprintf(format, args...)
}
@@ -211,13 +211,15 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
// Set the location to the rule head.
p.loc = rules[0].Head.Loc()
pcount := p.funcs.argVars()
params := make([]ir.Local, 0, pcount+len(rules[0].Head.Args))
for range pcount {
params = append(params, p.newLocal())
}
// Create function definition for rules.
fn := &ir.Func{
Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path),
Params: []ir.Local{
p.newLocal(), // input document
p.newLocal(), // data document
},
Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path),
Params: params,
Return: p.newLocal(),
Path: append([]string{fmt.Sprintf("g%d", p.funcs.gen())}, pathPieces...),
}
@@ -227,7 +229,10 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
fn.Params = append(fn.Params, p.newLocal())
}
params := fn.Params[2:]
// only those added as formal parameters:
// f(x, y) is planned as f(data, input, x, y)
// pcount > 2 means there are vars passed along through with replacements by variables
params = fn.Params[pcount:]
// Initialize return value for partial set/object rules. Complete document
// rules assign directly to `fn.Return`.
@@ -301,10 +306,11 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
// Setup planner for block.
p.lnext = lnext
p.vars = newVarstack(map[ast.Var]ir.Local{
ast.InputRootDocument.Value.(ast.Var): fn.Params[0],
ast.DefaultRootDocument.Value.(ast.Var): fn.Params[1],
})
vs := make(map[ast.Var]ir.Local, p.funcs.argVars())
for i, v := range p.funcs.vars() {
vs[v] = fn.Params[i]
}
p.vars = newVarstack(vs)
curr := &ir.Block{}
*blocks = append(*blocks, curr)
@@ -672,13 +678,17 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
values := make([]*ast.Term, 0, len(e.With)) // NOTE(sr): we could be overallocating if there are builtin replacements
targets := make([]ast.Ref, 0, len(e.With))
vars := []ast.Var{}
mocks := frame{}
for _, w := range e.With {
v := w.Target.Value.(ast.Ref)
switch {
case p.isFunction(v): // nothing to do
case p.isFunctionOrBuiltin(v): // track var values
if wvar, ok := w.Value.Value.(ast.Var); ok {
vars = append(vars, wvar)
}
case ast.DefaultRootDocument.Equal(v[0]) ||
ast.InputRootDocument.Equal(v[0]):
@@ -735,7 +745,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
// planning of this expression (transitively).
shadowing := p.dataRefsShadowRuletrie(dataRefs) || len(mocks) > 0
if shadowing {
p.funcs.Push(map[string]string{})
p.funcs.Push(map[string]string{}, vars)
for _, ref := range dataRefs {
p.rules.Push(ref)
}
@@ -756,7 +766,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
p.mocks.PushFrame(mocks)
if shadowing {
p.funcs.Push(map[string]string{})
p.funcs.Push(map[string]string{}, vars)
for _, ref := range dataRefs {
p.rules.Push(ref)
}
@@ -990,6 +1000,15 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
op := e.Operator()
if replacement := p.mocks.Lookup(operator); replacement != nil {
if _, ok := replacement.Value.(ast.Var); ok {
var arity int
if node := p.rules.Lookup(op); node != nil {
arity = node.Arity() // NB(sr): We don't need to plan what isn't called, only lookup arity
} else if bi, ok := p.decls[operator]; ok {
arity = bi.Decl.Arity()
}
return p.planExprCallValue(replacement, arity, operands, iter)
}
if r, ok := replacement.Value.(ast.Ref); ok {
if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) {
// replacement is builtin
@@ -1018,7 +1037,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
// replacement is a value, or ref
if bi, ok := p.decls[operator]; ok {
return p.planExprCallValue(replacement, len(bi.Decl.FuncArgs().Args), operands, iter)
return p.planExprCallValue(replacement, bi.Decl.Arity(), operands, iter)
}
if node := p.rules.Lookup(op); node != nil {
return p.planExprCallValue(replacement, node.Arity(), operands, iter)
@@ -1562,9 +1581,7 @@ func (p *Planner) planString(str ast.String, iter planiter) error {
}
func (p *Planner) planVar(v ast.Var, iter planiter) error {
p.ltarget = op(p.vars.GetOrElse(v, func() ir.Local {
return p.newLocal()
}))
p.ltarget = op(p.vars.GetOrElse(v, p.newLocal))
return iter()
}
@@ -1922,12 +1939,15 @@ func (p *Planner) planRefData(virtual *ruletrie, base *baseptr, ref ast.Ref, ind
if err != nil {
return err
}
p.appendStmt(&ir.CallStmt{
call := ir.CallStmt{
Func: funcName,
Args: p.defaultOperands(),
Args: make([]ir.Operand, 0, p.funcs.argVars()),
Result: p.ltarget.Value.(ir.Local),
})
}
for _, v := range p.funcs.vars() {
call.Args = append(call.Args, p.vars.GetOpOrEmpty(v))
}
p.appendStmt(&call)
return p.planRefRec(ref, index+1, iter)
}
@@ -2551,17 +2571,20 @@ func (p *Planner) unseenVars(t *ast.Term) bool {
}
func (p *Planner) defaultOperands() []ir.Operand {
return []ir.Operand{
p.vars.GetOpOrEmpty(ast.InputRootDocument.Value.(ast.Var)),
p.vars.GetOpOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)),
pcount := p.funcs.argVars()
operands := make([]ir.Operand, pcount)
for i, v := range p.funcs.vars() {
operands[i] = p.vars.GetOpOrEmpty(v)
}
return operands
}
func (p *Planner) isFunction(r ast.Ref) bool {
func (p *Planner) isFunctionOrBuiltin(r ast.Ref) bool {
if node := p.rules.Lookup(r); node != nil {
return node.Arity() > 0
}
return false
_, ok := p.decls[r.String()]
return ok
}
func op(v ir.Val) ir.Operand {

View File

@@ -20,20 +20,44 @@ type funcstack struct {
}
type taggedPairs struct {
pairs map[string]string
gen int
pairs map[string]string
vars []ast.Var
vcount int
gen int
}
func newFuncstack() *funcstack {
return &funcstack{
stack: []taggedPairs{{pairs: map[string]string{}, gen: 0}},
next: 1}
stack: []taggedPairs{
{
pairs: map[string]string{},
gen: 0,
vars: []ast.Var{
ast.InputRootDocument.Value.(ast.Var),
ast.DefaultRootDocument.Value.(ast.Var),
},
vcount: 2,
},
},
next: 1}
}
func (p funcstack) last() taggedPairs {
return p.stack[len(p.stack)-1]
}
func (p funcstack) argVars() int {
return p.last().vcount
}
func (p funcstack) vars() []ast.Var {
ret := make([]ast.Var, 0, p.last().vcount)
for i := range p.stack {
ret = append(ret, p.stack[i].vars...)
}
return ret
}
func (p funcstack) Add(key, value string) {
p.last().pairs[key] = value
}
@@ -43,8 +67,13 @@ func (p funcstack) Get(key string) (string, bool) {
return value, ok
}
func (p *funcstack) Push(funcs map[string]string) {
p.stack = append(p.stack, taggedPairs{pairs: funcs, gen: p.next})
func (p *funcstack) Push(funcs map[string]string, vars []ast.Var) {
p.stack = append(p.stack, taggedPairs{
pairs: funcs,
gen: p.next,
vars: vars,
vcount: p.last().vcount + len(vars),
})
p.next++
}

View File

@@ -16,7 +16,7 @@ func ConstantTimeByteCompare(x, y []byte) (int, error) {
xLarger, yLarger := 0, 0
for i := 0; i < len(x); i++ {
for i := range x {
xByte, yByte := int(x[i]), int(y[i])
x := ((yByte - xByte) >> 8) & 1

View File

@@ -18,7 +18,7 @@ func DoRequestWithClient(req *http.Request, client *http.Client, desc string, lo
}
defer resp.Body.Close()
logger.WithFields(map[string]interface{}{
logger.WithFields(map[string]any{
"url": req.URL.String(),
"status": resp.Status,
"headers": resp.Header,

View File

@@ -16,10 +16,7 @@ import (
// ParseDataPath returns a ref from the slash separated path s rooted at data.
// All path segments are treated as identifier strings.
func ParseDataPath(s string) (ast.Ref, error) {
s = "/" + strings.TrimPrefix(s, "/")
path, ok := storage.ParsePath(s)
path, ok := storage.ParsePath("/" + strings.TrimPrefix(s, "/"))
if !ok {
return nil, errors.New("invalid path")
}
@@ -29,7 +26,7 @@ func ParseDataPath(s string) (ast.Ref, error) {
// ArrayPath will take an ast.Array and build an ast.Ref using the ast.Terms in the Array
func ArrayPath(a *ast.Array) ast.Ref {
var ref ast.Ref
ref := make(ast.Ref, 0, a.Len())
a.Foreach(func(term *ast.Term) {
ref = append(ref, term)

View File

@@ -36,10 +36,10 @@ type EvalEngine interface {
Init() (EvalEngine, error)
Entrypoints(context.Context) (map[string]int32, error)
WithPolicyBytes([]byte) EvalEngine
WithDataJSON(interface{}) EvalEngine
WithDataJSON(any) EvalEngine
Eval(context.Context, EvalOpts) (*Result, error)
SetData(context.Context, interface{}) error
SetDataPath(context.Context, []string, interface{}) error
SetData(context.Context, any) error
SetDataPath(context.Context, []string, any) error
RemoveDataPath(context.Context, []string) error
Close()
}

View File

@@ -18,7 +18,7 @@ type Result struct {
// EvalOpts define options for performing an evaluation.
type EvalOpts struct {
Input *interface{}
Input *any
Metrics metrics.Metrics
Entrypoint int32
Time time.Time

View File

@@ -81,9 +81,9 @@ func New(id string, opts Options) (*Reporter, error) {
url = ExternalServiceURL
}
restConfig := []byte(fmt.Sprintf(`{
restConfig := fmt.Appendf(nil, `{
"url": %q,
}`, url))
}`, url)
client, err := rest.New(restConfig, map[string]*keys.Config{}, rest.Logger(opts.Logger))
if err != nil {

View File

@@ -46,8 +46,8 @@ func ToYAML(s string) (string, error) {
// Parse parses a set line.
//
// A set line is of the form name1=value1,name2=value2
func Parse(s string) (map[string]interface{}, error) {
vals := map[string]interface{}{}
func Parse(s string) (map[string]any, error) {
vals := map[string]any{}
scanner := bytes.NewBufferString(s)
t := newParser(scanner, vals, false)
err := t.parse()
@@ -57,8 +57,8 @@ func Parse(s string) (map[string]interface{}, error) {
// ParseString parses a set line and forces a string value.
//
// A set line is of the form name1=value1,name2=value2
func ParseString(s string) (map[string]interface{}, error) {
vals := map[string]interface{}{}
func ParseString(s string) (map[string]any, error) {
vals := map[string]any{}
scanner := bytes.NewBufferString(s)
t := newParser(scanner, vals, true)
err := t.parse()
@@ -69,7 +69,7 @@ func ParseString(s string) (map[string]interface{}, error) {
//
// If the strval string has a key that exists in dest, it overwrites the
// dest version.
func ParseInto(s string, dest map[string]interface{}) error {
func ParseInto(s string, dest map[string]any) error {
scanner := bytes.NewBufferString(s)
t := newParser(scanner, dest, false)
return t.parse()
@@ -78,7 +78,7 @@ func ParseInto(s string, dest map[string]interface{}) error {
// ParseIntoFile parses a filevals line and merges the result into dest.
//
// This method always returns a string as the value.
func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal) error {
func ParseIntoFile(s string, dest map[string]any, runesToVal runesToVal) error {
scanner := bytes.NewBufferString(s)
t := newFileParser(scanner, dest, runesToVal)
return t.parse()
@@ -87,7 +87,7 @@ func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal)
// ParseIntoString parses a strvals line and merges the result into dest.
//
// This method always returns a string as the value.
func ParseIntoString(s string, dest map[string]interface{}) error {
func ParseIntoString(s string, dest map[string]any) error {
scanner := bytes.NewBufferString(s)
t := newParser(scanner, dest, true)
return t.parse()
@@ -101,20 +101,20 @@ func ParseIntoString(s string, dest map[string]interface{}) error {
// where st is a boolean to figure out if we're forcing it to parse values as string
type parser struct {
sc *bytes.Buffer
data map[string]interface{}
data map[string]any
runesToVal runesToVal
}
type runesToVal func([]rune) (interface{}, error)
type runesToVal func([]rune) (any, error)
func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser {
rs2v := func(rs []rune) (interface{}, error) {
func newParser(sc *bytes.Buffer, data map[string]any, stringBool bool) *parser {
rs2v := func(rs []rune) (any, error) {
return typedVal(rs, stringBool), nil
}
return &parser{sc: sc, data: data, runesToVal: rs2v}
}
func newFileParser(sc *bytes.Buffer, data map[string]interface{}, runesToVal runesToVal) *parser {
func newFileParser(sc *bytes.Buffer, data map[string]any, runesToVal runesToVal) *parser {
return &parser{sc: sc, data: data, runesToVal: runesToVal}
}
@@ -139,7 +139,7 @@ func runeSet(r []rune) map[rune]bool {
return s
}
func (t *parser) key(data map[string]interface{}) error {
func (t *parser) key(data map[string]any) error {
stop := runeSet([]rune{'=', '[', ',', '.'})
for {
switch k, last, err := runesUntil(t.sc, stop); {
@@ -156,9 +156,9 @@ func (t *parser) key(data map[string]interface{}) error {
}
kk := string(k)
// Find or create target list
list := []interface{}{}
list := []any{}
if _, ok := data[kk]; ok {
list = data[kk].([]interface{})
list = data[kk].([]any)
}
// Now we need to get the value after the ].
@@ -194,9 +194,9 @@ func (t *parser) key(data map[string]interface{}) error {
return fmt.Errorf("key %q has no value (cannot end with ,)", string(k))
case last == '.':
// First, create or find the target map.
inner := map[string]interface{}{}
inner := map[string]any{}
if _, ok := data[string(k)]; ok {
inner = data[string(k)].(map[string]interface{})
inner = data[string(k)].(map[string]any)
}
// Recurse
@@ -210,7 +210,7 @@ func (t *parser) key(data map[string]interface{}) error {
}
}
func set(data map[string]interface{}, key string, val interface{}) {
func set(data map[string]any, key string, val any) {
// If key is empty, don't set it.
if len(key) == 0 {
return
@@ -218,7 +218,7 @@ func set(data map[string]interface{}, key string, val interface{}) {
data[key] = val
}
func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) {
func setIndex(list []any, index int, val any) (l2 []any, err error) {
// There are possible index values that are out of range on a target system
// causing a panic. This will catch the panic and return an error instead.
// The value of the index that causes a panic varies from system to system.
@@ -235,7 +235,7 @@ func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{},
return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex)
}
if len(list) <= index {
newlist := make([]interface{}, index+1)
newlist := make([]any, index+1)
copy(newlist, list)
list = newlist
}
@@ -254,7 +254,7 @@ func (t *parser) keyIndex() (int, error) {
return strconv.Atoi(string(v))
}
func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) {
func (t *parser) listItem(list []any, i int) ([]any, error) {
if i < 0 {
return list, fmt.Errorf("negative %d index not allowed", i)
}
@@ -298,14 +298,14 @@ func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) {
return setIndex(list, i, list2)
case last == '.':
// We have a nested object. Send to t.key
inner := map[string]interface{}{}
inner := map[string]any{}
if len(list) > i {
var ok bool
inner, ok = list[i].(map[string]interface{})
inner, ok = list[i].(map[string]any)
if !ok {
// We have indices out of order. Initialize empty value.
list[i] = map[string]interface{}{}
inner = list[i].(map[string]interface{})
list[i] = map[string]any{}
inner = list[i].(map[string]any)
}
}
@@ -326,21 +326,21 @@ func (t *parser) val() ([]rune, error) {
return v, err
}
func (t *parser) valList() ([]interface{}, error) {
func (t *parser) valList() ([]any, error) {
r, _, e := t.sc.ReadRune()
if e != nil {
return []interface{}{}, e
return []any{}, e
}
if r != '{' {
e = t.sc.UnreadRune()
if e != nil {
return []interface{}{}, e
return []any{}, e
}
return []interface{}{}, ErrNotList
return []any{}, ErrNotList
}
list := []interface{}{}
list := []any{}
stop := runeSet([]rune{',', '}'})
for {
switch rs, last, err := runesUntil(t.sc, stop); {
@@ -354,7 +354,7 @@ func (t *parser) valList() ([]interface{}, error) {
if r, _, e := t.sc.ReadRune(); e == nil && r != ',' {
e = t.sc.UnreadRune()
if e != nil {
return []interface{}{}, e
return []any{}, e
}
}
v, e := t.runesToVal(rs)
@@ -395,7 +395,7 @@ func inMap(k rune, m map[rune]bool) bool {
return ok
}
func typedVal(v []rune, st bool) interface{} {
func typedVal(v []rune, st bool) any {
val := string(v)
if st {

View File

@@ -32,12 +32,12 @@ func New(r io.Reader) (string, error) {
// if parsing fails, it will return an empty map. It will fill the map
// with some decoded values with fillMap
// ref: https://datatracker.ietf.org/doc/html/rfc4122
func Parse(s string) (map[string]interface{}, error) {
func Parse(s string) (map[string]any, error) {
uuid, err := uuid.Parse(s)
if err != nil {
return nil, err
}
out := make(map[string]interface{}, getVersionLen(int(uuid.Version())))
out := make(map[string]any, getVersionLen(int(uuid.Version())))
fillMap(out, uuid)
return out, nil
}
@@ -46,7 +46,7 @@ func Parse(s string) (map[string]interface{}, error) {
// Version 1-2 has decodable values that could be of use, version 4 is random,
// and version 3,5 is not feasible to extract data. Generated with either MD5 or SHA1 hash
// ref: https://datatracker.ietf.org/doc/html/rfc4122 about creation of UUIDs
func fillMap(m map[string]interface{}, u uuid.UUID) {
func fillMap(m map[string]any, u uuid.UUID) {
m["version"] = int(u.Version())
m["variant"] = u.Variant().String()
switch version := m["version"]; version {

View File

@@ -24,7 +24,7 @@ func Write(ctx context.Context, store storage.Store, txn storage.Transaction) er
return err
}
return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]interface{}{
return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]any{
"version": version.Version,
"build_commit": version.Vcs,
"build_timestamp": version.Timestamp,

View File

@@ -112,8 +112,8 @@ func (Br) Op() opcode.Opcode {
}
// ImmediateArgs returns the block index to break to.
func (i Br) ImmediateArgs() []interface{} {
return []interface{}{i.Index}
func (i Br) ImmediateArgs() []any {
return []any{i.Index}
}
// BrIf represents a WASM br_if instruction.
@@ -127,8 +127,8 @@ func (BrIf) Op() opcode.Opcode {
}
// ImmediateArgs returns the block index to break to.
func (i BrIf) ImmediateArgs() []interface{} {
return []interface{}{i.Index}
func (i BrIf) ImmediateArgs() []any {
return []any{i.Index}
}
// Call represents a WASM call instruction.
@@ -142,8 +142,8 @@ func (Call) Op() opcode.Opcode {
}
// ImmediateArgs returns the function index.
func (i Call) ImmediateArgs() []interface{} {
return []interface{}{i.Index}
func (i Call) ImmediateArgs() []any {
return []any{i.Index}
}
// CallIndirect represents a WASM call_indirect instruction.
@@ -158,8 +158,8 @@ func (CallIndirect) Op() opcode.Opcode {
}
// ImmediateArgs returns the function index.
func (i CallIndirect) ImmediateArgs() []interface{} {
return []interface{}{i.Index, i.Reserved}
func (i CallIndirect) ImmediateArgs() []any {
return []any{i.Index, i.Reserved}
}
// Return represents a WASM return instruction.

View File

@@ -15,14 +15,14 @@ type NoImmediateArgs struct {
}
// ImmediateArgs returns the immedate arguments of an instruction.
func (NoImmediateArgs) ImmediateArgs() []interface{} {
func (NoImmediateArgs) ImmediateArgs() []any {
return nil
}
// Instruction represents a single WASM instruction.
type Instruction interface {
Op() opcode.Opcode
ImmediateArgs() []interface{}
ImmediateArgs() []any
}
// StructuredInstruction represents a structured control instruction like br_if.

View File

@@ -18,8 +18,8 @@ func (I32Load) Op() opcode.Opcode {
}
// ImmediateArgs returns the static offset and alignment operands.
func (i I32Load) ImmediateArgs() []interface{} {
return []interface{}{i.Align, i.Offset}
func (i I32Load) ImmediateArgs() []any {
return []any{i.Align, i.Offset}
}
// I32Store represents the WASM i32.store instruction.
@@ -34,6 +34,6 @@ func (I32Store) Op() opcode.Opcode {
}
// ImmediateArgs returns the static offset and alignment operands.
func (i I32Store) ImmediateArgs() []interface{} {
return []interface{}{i.Align, i.Offset}
func (i I32Store) ImmediateArgs() []any {
return []any{i.Align, i.Offset}
}

View File

@@ -19,8 +19,8 @@ func (I32Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the i32 value to push onto the stack.
func (i I32Const) ImmediateArgs() []interface{} {
return []interface{}{i.Value}
func (i I32Const) ImmediateArgs() []any {
return []any{i.Value}
}
// I64Const represents the WASM i64.const instruction.
@@ -34,8 +34,8 @@ func (I64Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the i64 value to push onto the stack.
func (i I64Const) ImmediateArgs() []interface{} {
return []interface{}{i.Value}
func (i I64Const) ImmediateArgs() []any {
return []any{i.Value}
}
// F32Const represents the WASM f32.const instruction.
@@ -49,8 +49,8 @@ func (F32Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the f32 value to push onto the stack.
func (i F32Const) ImmediateArgs() []interface{} {
return []interface{}{i.Value}
func (i F32Const) ImmediateArgs() []any {
return []any{i.Value}
}
// F64Const represents the WASM f64.const instruction.
@@ -64,8 +64,8 @@ func (F64Const) Op() opcode.Opcode {
}
// ImmediateArgs returns the f64 value to push onto the stack.
func (i F64Const) ImmediateArgs() []interface{} {
return []interface{}{i.Value}
func (i F64Const) ImmediateArgs() []any {
return []any{i.Value}
}
// I32Eqz represents the WASM i32.eqz instruction.

Some files were not shown because too many files have changed in this diff Show More