build(deps): bump github.com/open-policy-agent/opa from 0.60.0 to 0.61.0

Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 0.60.0 to 0.61.0.
- [Release notes](https://github.com/open-policy-agent/opa/releases)
- [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-policy-agent/opa/compare/v0.60.0...v0.61.0)

---
updated-dependencies:
- dependency-name: github.com/open-policy-agent/opa
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2024-01-30 06:09:53 +00:00
committed by Ralf Haferkamp
parent 7980be48a1
commit 690d44cfb0
23 changed files with 5206 additions and 151 deletions

6
go.mod
View File

@@ -46,7 +46,7 @@ require (
github.com/golang/protobuf v1.5.3
github.com/google/go-cmp v0.6.0
github.com/google/go-tika v0.3.0
github.com/google/uuid v1.5.0
github.com/google/uuid v1.6.0
github.com/gookit/config/v2 v2.2.5
github.com/gorilla/mux v1.8.1
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0
@@ -67,7 +67,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.15.0
github.com/onsi/gomega v1.31.0
github.com/open-policy-agent/opa v0.60.0
github.com/open-policy-agent/opa v0.61.0
github.com/orcaman/concurrent-map v1.0.0
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240115110609-b018a896364e
github.com/pkg/errors v0.9.1
@@ -280,7 +280,7 @@ require (
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect

12
go.sum
View File

@@ -1396,8 +1396,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
@@ -1778,10 +1778,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
github.com/open-policy-agent/opa v0.60.0 h1:ZPoPt4yeNs5UXCpd/P/btpSyR8CR0wfhVoh9BOwgJNs=
github.com/open-policy-agent/opa v0.60.0/go.mod h1:aD5IK6AiLNYBjNXn7E02++yC8l4Z+bRDvgM6Ss0bBzA=
github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/open-policy-agent/opa v0.61.0 h1:nhncQ2CAYtQTV/SMBhDDPsCpCQsUW+zO/1j+T5V7oZg=
github.com/open-policy-agent/opa v0.61.0/go.mod h1:7OUuzJnsS9yHf8lw0ApfcbrnaRG1EkN3J2fuuqi4G/E=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=

View File

@@ -1,5 +1,18 @@
# Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)

View File

@@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
)
// NewHash returns a new UUID derived from the hash of space concatenated with

View File

@@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) {
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
@@ -52,7 +52,7 @@ func makeV7(uuid []byte) {
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a |
| unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -61,7 +61,7 @@ func makeV7(uuid []byte) {
*/
_ = uuid[15] // bounds check
t := timeNow().UnixMilli()
t, s := getV7Time()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
@@ -70,6 +70,35 @@ func makeV7(uuid []byte) {
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (uuid[6] & 0x0F)
// uuid[8] has already has right version
uuid[6] = 0x70 | (0x0F & byte(s>>8))
uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
}

View File

@@ -122,19 +122,13 @@ type ParserOptions struct {
SkipRules bool
JSONOptions *astJSON.Options
// RegoVersion is the version of Rego to parse for.
// RegoV1Compatible additionally affects the Rego version. Use EffectiveRegoVersion to get the effective Rego version.
RegoVersion RegoVersion
// RegoV1Compatible is equivalent to setting RegoVersion to RegoV0CompatV1.
// RegoV1Compatible takes precedence, and if set to true, RegoVersion is ignored.
// Deprecated: use RegoVersion instead. Will be removed in a future version of OPA.
RegoV1Compatible bool
RegoVersion RegoVersion
unreleasedKeywords bool // TODO(sr): cleanup
}
// EffectiveRegoVersion returns the effective RegoVersion to use for parsing.
// Deprecated: Use RegoVersion instead.
func (po *ParserOptions) EffectiveRegoVersion() RegoVersion {
if po.RegoV1Compatible {
return RegoV0CompatV1
}
return po.RegoVersion
}
@@ -291,7 +285,7 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
allowedFutureKeywords := map[string]tokens.Token{}
if p.po.EffectiveRegoVersion() == RegoV1 {
if p.po.RegoVersion == RegoV1 {
// RegoV1 includes all future keywords in the default language definition
for k, v := range futureKeywords {
allowedFutureKeywords[k] = v
@@ -325,7 +319,7 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
}
selected := map[string]tokens.Token{}
if p.po.AllFutureKeywords || p.po.EffectiveRegoVersion() == RegoV1 {
if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 {
for kw, tok := range allowedFutureKeywords {
selected[kw] = tok
}
@@ -346,7 +340,7 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
}
p.s.s = p.s.s.WithKeywords(selected)
if p.po.EffectiveRegoVersion() == RegoV1 {
if p.po.RegoVersion == RegoV1 {
for kw, tok := range allowedFutureKeywords {
p.s.s.AddKeyword(kw, tok)
}
@@ -2614,7 +2608,7 @@ func (p *Parser) regoV1Import(imp *Import) {
return
}
if p.po.EffectiveRegoVersion() == RegoV1 {
if p.po.RegoVersion == RegoV1 {
// We're parsing for Rego v1, where the 'rego.v1' import is a no-op.
return
}

View File

@@ -477,7 +477,7 @@ func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module,
if err != nil {
return nil, err
}
return parseModule(filename, stmts, comments, popts.EffectiveRegoVersion())
return parseModule(filename, stmts, comments, popts.RegoVersion)
}
// ParseBody returns exactly one body.
@@ -626,7 +626,7 @@ func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Sta
WithCapabilities(popts.Capabilities).
WithSkipRules(popts.SkipRules).
WithJSONOptions(popts.JSONOptions).
WithRegoVersion(popts.EffectiveRegoVersion()).
WithRegoVersion(popts.RegoVersion).
withUnreleasedKeywords(popts.unreleasedKeywords)
stmts, comments, errs := parser.Parse()
@@ -698,22 +698,7 @@ func parseModule(filename string, stmts []Statement, comments []*Comment, regoCo
if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 {
for _, rule := range mod.Rules {
for r := rule; r != nil; r = r.Else {
var t string
if r.isFunction() {
t = "function"
} else {
t = "rule"
}
if r.generatedBody && r.Head.generatedValue {
errs = append(errs, NewError(ParseErr, r.Location, "%s must have value assignment and/or body declaration", t))
}
if r.Body != nil && !r.generatedBody && !ruleDeclarationHasKeyword(r, tokens.If) && !r.Default {
errs = append(errs, NewError(ParseErr, r.Location, "`if` keyword is required before %s body", t))
}
if r.Head.RuleKind() == MultiValue && !ruleDeclarationHasKeyword(r, tokens.Contains) {
errs = append(errs, NewError(ParseErr, r.Location, "`contains` keyword is required for partial set rules"))
}
errs = append(errs, CheckRegoV1(r)...)
}
}
}

View File

@@ -1,5 +1,11 @@
package ast
import (
"fmt"
"github.com/open-policy-agent/opa/ast/internal/tokens"
)
func checkDuplicateImports(modules []*Module) (errors Errors) {
for _, module := range modules {
processedImports := map[Var]*Import{}
@@ -116,11 +122,43 @@ func checkDeprecatedBuiltinsForCurrentVersion(node interface{}) Errors {
return checkDeprecatedBuiltins(deprecatedBuiltins, node)
}
// CheckRegoV1 checks the given module for errors that are specific to Rego v1
func CheckRegoV1(module *Module) Errors {
// CheckRegoV1 checks the given module or rule for errors that are specific to Rego v1.
// Passing something other than an *ast.Rule or *ast.Module is considered a programming error, and will cause a panic.
func CheckRegoV1(x interface{}) Errors {
switch x := x.(type) {
case *Module:
return checkRegoV1Module(x)
case *Rule:
return checkRegoV1Rule(x)
}
panic(fmt.Sprintf("cannot check rego-v1 compatibility on type %T", x))
}
func checkRegoV1Module(module *Module) Errors {
var errors Errors
errors = append(errors, checkDuplicateImports([]*Module{module})...)
errors = append(errors, checkRootDocumentOverrides(module)...)
errors = append(errors, checkDeprecatedBuiltinsForCurrentVersion(module)...)
return errors
}
func checkRegoV1Rule(rule *Rule) Errors {
t := "rule"
if rule.isFunction() {
t = "function"
}
var errs Errors
if rule.generatedBody && rule.Head.generatedValue {
errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t))
}
if rule.Body != nil && !rule.generatedBody && !ruleDeclarationHasKeyword(rule, tokens.If) && !rule.Default {
errs = append(errs, NewError(ParseErr, rule.Location, "`if` keyword is required before %s body", t))
}
if rule.Head.RuleKind() == MultiValue && !ruleDeclarationHasKeyword(rule, tokens.Contains) {
errs = append(errs, NewError(ParseErr, rule.Location, "`contains` keyword is required for partial set rules"))
}
return errs
}

View File

@@ -1465,14 +1465,25 @@ func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes in
}
func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) {
if bb, ok := f.reader.(*bytes.Buffer); ok {
_ = f.Close() // always close, even on error
if int64(bb.Len()) >= sizeLimitBytes {
return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)",
strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1)
}
return *bb, nil
}
var buf bytes.Buffer
n, err := f.Read(&buf, sizeLimitBytes)
f.Close() // always close, even on error
_ = f.Close() // always close, even on error
if err != nil && err != io.EOF {
return buf, err
} else if err == nil && n >= sizeLimitBytes {
return buf, fmt.Errorf("bundle file '%v' exceeded max size (%v bytes)", strings.TrimPrefix(f.Path(), "/"), sizeLimitBytes-1)
return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1)
}
return buf, nil

View File

@@ -17,6 +17,8 @@ import (
"github.com/open-policy-agent/opa/storage"
)
const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)"
// Descriptor contains information about a file and
// can be used to read the file contents.
type Descriptor struct {
@@ -63,7 +65,7 @@ func (f *lazyFile) Close() error {
return nil
}
func newDescriptor(url, path string, reader io.Reader) *Descriptor {
func NewDescriptor(url, path string, reader io.Reader) *Descriptor {
return &Descriptor{
url: url,
path: path,
@@ -71,7 +73,7 @@ func newDescriptor(url, path string, reader io.Reader) *Descriptor {
}
}
func (d *Descriptor) withCloser(closer io.Closer) *Descriptor {
func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor {
d.closer = closer
d.closeOnce = new(sync.Once)
return d
@@ -123,14 +125,16 @@ type DirectoryLoader interface {
NextFile() (*Descriptor, error)
WithFilter(filter filter.LoaderFilter) DirectoryLoader
WithPathFormat(PathFormat) DirectoryLoader
WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader
}
type dirLoader struct {
root string
files []string
idx int
filter filter.LoaderFilter
pathFormat PathFormat
root string
files []string
idx int
filter filter.LoaderFilter
pathFormat PathFormat
maxSizeLimitBytes int64
}
// Normalize root directory, ex "./src/bundle" -> "src/bundle"
@@ -171,6 +175,12 @@ func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
return d
}
// WithSizeLimitBytes specifies the maximum size of any file in the directory to read
func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
d.maxSizeLimitBytes = sizeLimitBytes
return d
}
func formatPath(fileName string, root string, pathFormat PathFormat) string {
switch pathFormat {
case SlashRooted:
@@ -206,6 +216,9 @@ func (d *dirLoader) NextFile() (*Descriptor, error) {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) {
return nil
}
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes)
}
d.files = append(d.files, path)
} else if info != nil && info.Mode().IsDir() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
@@ -230,19 +243,20 @@ func (d *dirLoader) NextFile() (*Descriptor, error) {
fh := newLazyFile(fileName)
cleanedPath := formatPath(fileName, d.root, d.pathFormat)
f := newDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).withCloser(fh)
f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh)
return f, nil
}
type tarballLoader struct {
baseURL string
r io.Reader
tr *tar.Reader
files []file
idx int
filter filter.LoaderFilter
skipDir map[string]struct{}
pathFormat PathFormat
baseURL string
r io.Reader
tr *tar.Reader
files []file
idx int
filter filter.LoaderFilter
skipDir map[string]struct{}
pathFormat PathFormat
maxSizeLimitBytes int64
}
type file struct {
@@ -285,6 +299,12 @@ func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
return t
}
// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read
func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
t.maxSizeLimitBytes = sizeLimitBytes
return t
}
// NextFile iterates to the next file in the directory tree
// and returns a file Descriptor for the file.
func (t *tarballLoader) NextFile() (*Descriptor, error) {
@@ -306,6 +326,7 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) {
for {
header, err := t.tr.Next()
if err == io.EOF {
break
}
@@ -343,6 +364,10 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) {
}
}
if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes {
return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes)
}
f := file{name: header.Name}
var buf bytes.Buffer
@@ -372,16 +397,14 @@ func (t *tarballLoader) NextFile() (*Descriptor, error) {
t.idx++
cleanedPath := formatPath(f.name, "", t.pathFormat)
d := newDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader)
d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader)
return d, nil
}
// Next implements the storage.Iterator interface.
// It iterates to the next policy or data file in the directory tree
// and returns a storage.Update for the file.
func (it *iterator) Next() (*storage.Update, error) {
if it.files == nil {
it.files = []file{}

View File

@@ -19,12 +19,13 @@ const (
type dirLoaderFS struct {
sync.Mutex
filesystem fs.FS
files []string
idx int
filter filter.LoaderFilter
root string
pathFormat PathFormat
filesystem fs.FS
files []string
idx int
filter filter.LoaderFilter
root string
pathFormat PathFormat
maxSizeLimitBytes int64
}
// NewFSLoader returns a basic DirectoryLoader implementation
@@ -61,6 +62,10 @@ func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) erro
return nil
}
if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes {
return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes)
}
d.files = append(d.files, path)
} else if dirEntry.Type().IsDir() {
if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) {
@@ -83,6 +88,12 @@ func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader {
return d
}
// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read
func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader {
d.maxSizeLimitBytes = sizeLimitBytes
return d
}
// NextFile iterates to the next file in the directory tree
// and returns a file Descriptor for the file.
func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
@@ -111,6 +122,6 @@ func (d *dirLoaderFS) NextFile() (*Descriptor, error) {
}
cleanedPath := formatPath(fileName, d.root, d.pathFormat)
f := newDescriptor(cleanedPath, cleanedPath, fh).withCloser(fh)
f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh)
return f, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -26,20 +26,10 @@ type Opts struct {
// carry along their original source locations.
IgnoreLocations bool
// RegoV1 is equivalent to setting RegoVersion to ast.RegoV0Compat1.
// RegoV1 takes precedence over RegoVersion.
// Deprecated: use RegoVersion instead.
RegoV1 bool
// RegoVersion is the version of Rego to format code for.
RegoVersion ast.RegoVersion
}
func (o *Opts) effectiveRegoVersion() ast.RegoVersion {
if o.RegoV1 {
return ast.RegoV0CompatV1
}
return o.RegoVersion
}
// defaultLocationFile is the file name used in `Ast()` for terms
// without a location, as could happen when pretty-printing the
// results of partial eval.
@@ -54,7 +44,7 @@ func Source(filename string, src []byte) ([]byte, error) {
func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
parserOpts := ast.ParserOptions{}
if opts.effectiveRegoVersion() == ast.RegoV1 {
if opts.RegoVersion == ast.RegoV1 {
// If the rego version is V1, wee need to parse it as such, to allow for future keywords not being imported.
// Otherwise, we'll default to RegoV0
parserOpts.RegoVersion = ast.RegoV1
@@ -65,7 +55,7 @@ func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) {
return nil, err
}
if opts.effectiveRegoVersion() == ast.RegoV0CompatV1 || opts.effectiveRegoVersion() == ast.RegoV1 {
if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 {
errors := ast.CheckRegoV1(module)
if len(errors) > 0 {
return nil, errors
@@ -133,7 +123,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
o := fmtOpts{}
if opts.effectiveRegoVersion() == ast.RegoV0CompatV1 || opts.effectiveRegoVersion() == ast.RegoV1 {
if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 {
o.regoV1 = true
o.ifs = true
o.contains = true
@@ -194,13 +184,13 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) {
switch x := x.(type) {
case *ast.Module:
if opts.effectiveRegoVersion() == ast.RegoV1 {
if opts.RegoVersion == ast.RegoV1 {
x.Imports = filterRegoV1Import(x.Imports)
} else if opts.effectiveRegoVersion() == ast.RegoV0CompatV1 {
} else if opts.RegoVersion == ast.RegoV0CompatV1 {
x.Imports = ensureRegoV1Import(x.Imports)
}
if opts.effectiveRegoVersion() == ast.RegoV0CompatV1 || opts.effectiveRegoVersion() == ast.RegoV1 || moduleIsRegoV1Compatible(x) {
if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) {
x.Imports = future.FilterFutureImports(x.Imports)
} else {
for kw := range extraFutureKeywordImports {

View File

@@ -89,6 +89,10 @@ func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn st
// LoadBundleFromDisk loads a previously persisted activated bundle from disk
func LoadBundleFromDisk(path, name string, bvc *bundle.VerificationConfig) (*bundle.Bundle, error) {
return LoadBundleFromDiskForRegoVersion(ast.RegoV0, path, name, bvc)
}
func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name string, bvc *bundle.VerificationConfig) (*bundle.Bundle, error) {
bundlePath := filepath.Join(path, name, "bundle.tar.gz")
if _, err := os.Stat(bundlePath); err == nil {
@@ -98,7 +102,8 @@ func LoadBundleFromDisk(path, name string, bvc *bundle.VerificationConfig) (*bun
}
defer f.Close()
r := bundle.NewCustomReader(bundle.NewTarballLoaderWithBaseURL(f, ""))
r := bundle.NewCustomReader(bundle.NewTarballLoaderWithBaseURL(f, "")).
WithRegoVersion(regoVersion)
if bvc != nil {
r = r.WithBundleVerificationConfig(bvc)

View File

@@ -28,6 +28,7 @@ type InsertAndCompileOptions struct {
Bundles map[string]*bundle.Bundle
MaxErrors int
EnablePrintStatements bool
ParserOptions ast.ParserOptions
}
// InsertAndCompileResult contains the output of the operation.
@@ -58,13 +59,14 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser
m := metrics.New()
activation := &bundle.ActivateOpts{
Ctx: ctx,
Store: opts.Store,
Txn: opts.Txn,
Compiler: compiler,
Metrics: m,
Bundles: opts.Bundles,
ExtraModules: policies,
Ctx: ctx,
Store: opts.Store,
Txn: opts.Txn,
Compiler: compiler,
Metrics: m,
Bundles: opts.Bundles,
ExtraModules: policies,
ParserOptions: opts.ParserOptions,
}
err := bundle.Activate(activation)

View File

@@ -102,11 +102,6 @@ type FileLoader interface {
WithProcessAnnotation(bool) FileLoader
WithCapabilities(*ast.Capabilities) FileLoader
WithJSONOptions(*astJSON.Options) FileLoader
// WithRegoV1Compatible
// Deprecated: use WithRegoVersion instead
WithRegoV1Compatible(bool) FileLoader
WithRegoVersion(ast.RegoVersion) FileLoader
}
@@ -187,14 +182,6 @@ func (fl *fileLoader) WithJSONOptions(opts *astJSON.Options) FileLoader {
return fl
}
// WithRegoV1Compatible enforces Rego v0 with Rego v1 compatibility.
// See ParserOptions.RegoV1Compatible for more details.
// Deprecated: use WithRegoVersion instead
func (fl *fileLoader) WithRegoV1Compatible(compatible bool) FileLoader {
fl.opts.RegoV1Compatible = compatible
return fl
}
// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules.
func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader {
fl.opts.RegoVersion = version
@@ -270,7 +257,7 @@ func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) {
WithProcessAnnotations(fl.opts.ProcessAnnotation).
WithCapabilities(fl.opts.Capabilities).
WithJSONOptions(fl.opts.JSONOptions).
WithRegoVersion(fl.opts.EffectiveRegoVersion())
WithRegoVersion(fl.opts.RegoVersion)
// For bundle directories add the full path in front of module file names
// to simplify debugging.

View File

@@ -210,6 +210,7 @@ type Manager struct {
reporter *report.Reporter
opaReportNotifyCh chan struct{}
stop chan chan struct{}
parserOptions ast.ParserOptions
}
type managerContextKey string
@@ -395,6 +396,13 @@ func WithHooks(hs hooks.Hooks) func(*Manager) {
}
}
// WithParserOptions sets the parser options to be used by the plugin manager.
func WithParserOptions(opts ast.ParserOptions) func(*Manager) {
return func(m *Manager) {
m.parserOptions = opts
}
}
// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service.
func WithEnableTelemetry(enableTelemetry bool) func(*Manager) {
return func(m *Manager) {
@@ -876,7 +884,7 @@ func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event s
// compiler on the context but the server does not (nor would users
// implementing their own policy loading.)
if compiler == nil && event.PolicyChanged() {
compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements)
compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements, m.ParserOptions())
}
if compiler != nil {
@@ -913,7 +921,7 @@ func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event s
}
}
func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool) (*ast.Compiler, error) {
func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool, popts ast.ParserOptions) (*ast.Compiler, error) {
policies, err := store.ListPolicies(ctx, txn)
if err != nil {
return nil, err
@@ -925,7 +933,7 @@ func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage
if err != nil {
return nil, err
}
module, err := ast.ParseModule(policy, string(bs))
module, err := ast.ParseModuleWithOpts(policy, string(bs), popts)
if err != nil {
return nil, err
}
@@ -1085,3 +1093,7 @@ func (m *Manager) sendOPAUpdateLoop(ctx context.Context) {
}
}
}
func (m *Manager) ParserOptions() ast.ParserOptions {
return m.parserOptions
}

View File

@@ -7,15 +7,20 @@ package cache
import (
"container/list"
"context"
"fmt"
"math"
"sync"
"time"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/util"
)
const (
defaultMaxSizeBytes = int64(0) // unlimited
defaultMaxSizeBytes = int64(0) // unlimited
defaultForcedEvictionThresholdPercentage = int64(100) // trigger at max_size_bytes
defaultStaleEntryEvictionPeriodSeconds = int64(0) // never
)
// Config represents the configuration of the inter-query cache.
@@ -24,8 +29,13 @@ type Config struct {
}
// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize.
// MaxSizeBytes - max capacity of cache in bytes
// ForcedEvictionThresholdPercentage - capacity usage in percentage after which forced FIFO eviction starts
// StaleEntryEvictionPeriodSeconds - time period between end of previous and start of new stale entry eviction routine
type InterQueryBuiltinCacheConfig struct {
MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"`
MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"`
ForcedEvictionThresholdPercentage *int64 `json:"forced_eviction_threshold_percentage,omitempty"`
StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"`
}
// ParseCachingConfig returns the config for the inter-query cache.
@@ -33,7 +43,11 @@ func ParseCachingConfig(raw []byte) (*Config, error) {
if raw == nil {
maxSize := new(int64)
*maxSize = defaultMaxSizeBytes
return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize}}, nil
threshold := new(int64)
*threshold = defaultForcedEvictionThresholdPercentage
period := new(int64)
*period = defaultStaleEntryEvictionPeriodSeconds
return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize, ForcedEvictionThresholdPercentage: threshold, StaleEntryEvictionPeriodSeconds: period}}, nil
}
var config Config
@@ -55,6 +69,26 @@ func (c *Config) validateAndInjectDefaults() error {
*maxSize = defaultMaxSizeBytes
c.InterQueryBuiltinCache.MaxSizeBytes = maxSize
}
if c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage == nil {
threshold := new(int64)
*threshold = defaultForcedEvictionThresholdPercentage
c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage = threshold
} else {
threshold := *c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage
if threshold < 0 || threshold > 100 {
return fmt.Errorf("invalid forced_eviction_threshold_percentage %v", threshold)
}
}
if c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds == nil {
period := new(int64)
*period = defaultStaleEntryEvictionPeriodSeconds
c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds = period
} else {
period := *c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds
if period < 0 {
return fmt.Errorf("invalid stale_entry_eviction_period_seconds %v", period)
}
}
return nil
}
@@ -68,23 +102,55 @@ type InterQueryCacheValue interface {
type InterQueryCache interface {
Get(key ast.Value) (value InterQueryCacheValue, found bool)
Insert(key ast.Value, value InterQueryCacheValue) int
InsertWithExpiry(key ast.Value, value InterQueryCacheValue, expiresAt time.Time) int
Delete(key ast.Value)
UpdateConfig(config *Config)
Clone(value InterQueryCacheValue) (InterQueryCacheValue, error)
}
// NewInterQueryCache returns a new inter-query cache.
// The cache uses a FIFO eviction policy when it reaches the forced eviction threshold.
// Parameters:
//
// config - to configure the InterQueryCache
func NewInterQueryCache(config *Config) InterQueryCache {
return &cache{
items: map[string]cacheItem{},
usage: 0,
config: config,
l: list.New(),
return newCache(config)
}
// NewInterQueryCacheWithContext returns a new inter-query cache with context.
// The cache uses a combination of FIFO eviction policy when it reaches the forced eviction threshold
// and a periodic cleanup routine to remove stale entries that exceed their expiration time, if specified.
// If configured with a zero stale_entry_eviction_period_seconds value, the stale entry cleanup routine is disabled.
//
// Parameters:
//
// ctx - used to control lifecycle of the stale entry cleanup routine
// config - to configure the InterQueryCache
func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache {
iqCache := newCache(config)
if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 {
cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second)
go func() {
for {
select {
case <-cleanupTicker.C:
cleanupTicker.Stop()
iqCache.cleanStaleValues()
cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second)
case <-ctx.Done():
cleanupTicker.Stop()
return
}
}
}()
}
return iqCache
}
type cacheItem struct {
value InterQueryCacheValue
expiresAt time.Time
keyElement *list.Element
}
@@ -96,11 +162,26 @@ type cache struct {
mtx sync.Mutex
}
// Insert inserts a key k into the cache with value v.
func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) {
func newCache(config *Config) *cache {
return &cache{
items: map[string]cacheItem{},
usage: 0,
config: config,
l: list.New(),
}
}
// InsertWithExpiry inserts a key k into the cache with value v with an expiration time expiresAt.
// A zero time value for expiresAt indicates no expiry
func (c *cache) InsertWithExpiry(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) {
c.mtx.Lock()
defer c.mtx.Unlock()
return c.unsafeInsert(k, v)
return c.unsafeInsert(k, v, expiresAt)
}
// Insert inserts a key k into the cache with value v with no expiration time.
func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) {
return c.InsertWithExpiry(k, v, time.Time{})
}
// Get returns the value in the cache for k.
@@ -137,10 +218,9 @@ func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error)
return c.unsafeClone(value)
}
func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue) (dropped int) {
func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) {
size := v.SizeInBytes()
limit := c.maxSizeBytes()
limit := int64(math.Ceil(float64(c.forcedEvictionThresholdPercentage())/100.0) * (float64(c.maxSizeBytes())))
if limit > 0 {
if size > limit {
dropped++
@@ -159,6 +239,7 @@ func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue) (dropped int)
c.items[k.String()] = cacheItem{
value: v,
expiresAt: expiresAt,
keyElement: c.l.PushBack(k),
}
c.usage += size
@@ -191,3 +272,32 @@ func (c *cache) maxSizeBytes() int64 {
}
return *c.config.InterQueryBuiltinCache.MaxSizeBytes
}
func (c *cache) forcedEvictionThresholdPercentage() int64 {
if c.config == nil {
return defaultForcedEvictionThresholdPercentage
}
return *c.config.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage
}
func (c *cache) staleEntryEvictionTimePeriodSeconds() int64 {
if c.config == nil {
return defaultStaleEntryEvictionPeriodSeconds
}
return *c.config.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds
}
func (c *cache) cleanStaleValues() (dropped int) {
c.mtx.Lock()
defer c.mtx.Unlock()
for key := c.l.Front(); key != nil; {
nextKey := key.Next()
// if expiresAt is zero, the item doesn't have an expiry
if ea := c.items[(key.Value.(ast.Value)).String()].expiresAt; !ea.IsZero() && ea.Before(time.Now()) {
c.unsafeDelete(key.Value.(ast.Value))
dropped++
}
key = nextKey
}
return dropped
}

View File

@@ -888,7 +888,7 @@ func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) {
pcv = cachedRespData
}
c.bctx.InterQueryBuiltinCache.Insert(c.key, pcv)
c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt)
return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode)
}
@@ -924,18 +924,19 @@ func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp
}
var pcv cache.InterQueryCacheValue
var pcvData *interQueryCacheData
if cachingMode == defaultCachingMode {
pcv, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams)
pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams)
} else {
pcv, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams)
pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams)
pcv = pcvData
}
if err != nil {
return err
}
requestCache.Insert(key, pcv)
requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt)
return nil
}
@@ -1030,17 +1031,17 @@ type interQueryCacheValue struct {
Data []byte
}
func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, error) {
func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) {
data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams)
if err != nil {
return nil, err
return nil, nil, err
}
b, err := json.Marshal(data)
if err != nil {
return nil, err
return nil, nil, err
}
return &interQueryCacheValue{Data: b}, nil
return &interQueryCacheValue{Data: b}, data, nil
}
func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) {

View File

@@ -11,7 +11,7 @@ import (
)
// Version is the canonical version of OPA.
var Version = "0.60.0"
var Version = "0.61.0"
// GoVersion is the version of Go this was built with
var GoVersion = runtime.Version()

View File

@@ -33,6 +33,34 @@ type Spec struct {
ZOS *ZOS `json:"zos,omitempty" platform:"zos"`
}
// Scheduler represents the scheduling attributes for a process. It is based on
// the Linux sched_setattr(2) syscall.
type Scheduler struct {
// Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER).
Policy LinuxSchedulerPolicy `json:"policy"`
// Nice is the nice value for the process, which affects its priority.
Nice int32 `json:"nice,omitempty"`
// Priority represents the static priority of the process.
Priority int32 `json:"priority,omitempty"`
// Flags is an array of scheduling flags.
Flags []LinuxSchedulerFlag `json:"flags,omitempty"`
// The following ones are used by the DEADLINE scheduler.
// Runtime is the amount of time in nanoseconds during which the process
// is allowed to run in a given period.
Runtime uint64 `json:"runtime,omitempty"`
// Deadline is the absolute deadline for the process to complete its execution.
Deadline uint64 `json:"deadline,omitempty"`
// Period is the length of the period in nanoseconds used for determining the process runtime.
Period uint64 `json:"period,omitempty"`
}
// Process contains information to start a specific application inside the container.
type Process struct {
// Terminal creates an interactive terminal for the container.
@@ -60,8 +88,12 @@ type Process struct {
ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
// Specify an oom_score_adj for the container.
OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"`
// Scheduler specifies the scheduling attributes for a process
Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"`
// SelinuxLabel specifies the selinux context that the container process is run as.
SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
// IOPriority contains the I/O priority settings for the cgroup.
IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"`
}
// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process.
@@ -79,6 +111,22 @@ type LinuxCapabilities struct {
Ambient []string `json:"ambient,omitempty" platform:"linux"`
}
// IOPriority represents I/O priority settings for the container's processes within the process group.
type LinuxIOPriority struct {
Class IOPriorityClass `json:"class"`
Priority int `json:"priority"`
}
// IOPriorityClass represents an I/O scheduling class.
type IOPriorityClass string
// Possible values for IOPriorityClass.
const (
IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT"
IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE"
IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE"
)
// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
type Box struct {
// Height is the vertical dimension of a box.
@@ -191,6 +239,8 @@ type Linux struct {
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
// Personality contains configuration for the Linux personality syscall
Personality *LinuxPersonality `json:"personality,omitempty"`
// TimeOffsets specifies the offset for supporting time namespaces.
TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"`
}
// LinuxNamespace is the configuration for a Linux namespace
@@ -220,6 +270,8 @@ const (
UserNamespace LinuxNamespaceType = "user"
// CgroupNamespace for isolating cgroup hierarchies
CgroupNamespace LinuxNamespaceType = "cgroup"
// TimeNamespace for isolating the clocks
TimeNamespace LinuxNamespaceType = "time"
)
// LinuxIDMapping specifies UID/GID mappings
@@ -232,6 +284,14 @@ type LinuxIDMapping struct {
Size uint32 `json:"size"`
}
// LinuxTimeOffset specifies the offset for Time Namespace
type LinuxTimeOffset struct {
// Secs is the offset of clock (in secs) in the container
Secs int64 `json:"secs,omitempty"`
// Nanosecs is the additional offset for Secs (in nanosecs)
Nanosecs uint32 `json:"nanosecs,omitempty"`
}
// POSIXRlimit type and restrictions
type POSIXRlimit struct {
// Type of the rlimit to set
@@ -242,12 +302,13 @@ type POSIXRlimit struct {
Soft uint64 `json:"soft"`
}
// LinuxHugepageLimit structure corresponds to limiting kernel hugepages
// LinuxHugepageLimit structure corresponds to limiting kernel hugepages.
// Default to reservation limits if supported. Otherwise fallback to page fault limits.
type LinuxHugepageLimit struct {
// Pagesize is the hugepage size
// Format: "<size><unit-prefix>B' (e.g. 64KB, 2MB, 1GB, etc.)
// Pagesize is the hugepage size.
// Format: "<size><unit-prefix>B' (e.g. 64KB, 2MB, 1GB, etc.).
Pagesize string `json:"pageSize"`
// Limit is the limit of "hugepagesize" hugetlb usage
// Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage.
Limit uint64 `json:"limit"`
}
@@ -382,7 +443,7 @@ type LinuxResources struct {
Pids *LinuxPids `json:"pids,omitempty"`
// BlockIO restriction configuration
BlockIO *LinuxBlockIO `json:"blockIO,omitempty"`
// Hugetlb limit (in bytes)
// Hugetlb limits (in bytes). Default to reservation limits if supported.
HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"`
// Network restriction configuration
Network *LinuxNetwork `json:"network,omitempty"`
@@ -776,3 +837,43 @@ type ZOSDevice struct {
// Gid of the device.
GID *uint32 `json:"gid,omitempty"`
}
// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler
type LinuxSchedulerPolicy string
const (
// SchedOther is the default scheduling policy
SchedOther LinuxSchedulerPolicy = "SCHED_OTHER"
// SchedFIFO is the First-In-First-Out scheduling policy
SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO"
// SchedRR is the Round-Robin scheduling policy
SchedRR LinuxSchedulerPolicy = "SCHED_RR"
// SchedBatch is the Batch scheduling policy
SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH"
// SchedISO is the Isolation scheduling policy
SchedISO LinuxSchedulerPolicy = "SCHED_ISO"
// SchedIdle is the Idle scheduling policy
SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE"
// SchedDeadline is the Deadline scheduling policy
SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE"
)
// LinuxSchedulerFlag represents the flags used by the Linux Scheduler.
type LinuxSchedulerFlag string
const (
// SchedFlagResetOnFork represents the reset on fork scheduling flag
SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK"
// SchedFlagReclaim represents the reclaim scheduling flag
SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM"
// SchedFlagDLOverrun represents the deadline overrun scheduling flag
SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN"
// SchedFlagKeepPolicy represents the keep policy scheduling flag
SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY"
// SchedFlagKeepParams represents the keep parameters scheduling flag
SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS"
// SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag
SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN"
// SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag
SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX"
)

View File

@@ -11,7 +11,7 @@ const (
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-rc.1"
VersionDev = ""
)
// Version is the specification version that the package types support.

6
vendor/modules.txt vendored
View File

@@ -1101,7 +1101,7 @@ github.com/google/pprof/profile
# github.com/google/renameio/v2 v2.0.0
## explicit; go 1.13
github.com/google/renameio/v2
# github.com/google/uuid v1.5.0
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
# github.com/gookit/color v1.5.4
@@ -1490,7 +1490,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge
github.com/onsi/gomega/matchers/support/goraph/node
github.com/onsi/gomega/matchers/support/goraph/util
github.com/onsi/gomega/types
# github.com/open-policy-agent/opa v0.60.0
# github.com/open-policy-agent/opa v0.61.0
## explicit; go 1.19
github.com/open-policy-agent/opa/ast
github.com/open-policy-agent/opa/ast/internal/scanner
@@ -1580,7 +1580,7 @@ github.com/open-policy-agent/opa/tracing
github.com/open-policy-agent/opa/types
github.com/open-policy-agent/opa/util
github.com/open-policy-agent/opa/version
# github.com/opencontainers/runtime-spec v1.1.0-rc.1
# github.com/opencontainers/runtime-spec v1.1.0
## explicit
github.com/opencontainers/runtime-spec/specs-go
# github.com/opentracing/opentracing-go v1.2.0