mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-31 01:10:20 -06:00
Bump github.com/open-policy-agent/opa from 0.69.0 to 0.70.0
Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 0.69.0 to 0.70.0. - [Release notes](https://github.com/open-policy-agent/opa/releases) - [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-policy-agent/opa/compare/v0.69.0...v0.70.0) --- updated-dependencies: - dependency-name: github.com/open-policy-agent/opa dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
Ralf Haferkamp
parent
b63841cd62
commit
8a4523b406
2
go.mod
2
go.mod
@@ -65,7 +65,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.21.0
|
||||
github.com/onsi/gomega v1.35.0
|
||||
github.com/open-policy-agent/opa v0.69.0
|
||||
github.com/open-policy-agent/opa v0.70.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -877,8 +877,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.35.0 h1:xuM1M/UvMp9BCdS4hojhS9/4jEuVqS9Er3bqupeaoPM=
|
||||
github.com/onsi/gomega v1.35.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/open-policy-agent/opa v0.69.0 h1:s2igLw2Z6IvGWGuXSfugWkVultDMsM9pXiDuMp7ckWw=
|
||||
github.com/open-policy-agent/opa v0.69.0/go.mod h1:+qyXJGkpEJ6kpB1kGo8JSwHtVXbTdsGdQYPWWNYNj+4=
|
||||
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
|
||||
github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
|
||||
3
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
3
vendor/github.com/open-policy-agent/opa/ast/check.go
generated
vendored
@@ -231,7 +231,8 @@ func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) {
|
||||
}
|
||||
|
||||
ref := schemaAnnot.Path
|
||||
if ref == nil && refType == nil {
|
||||
// if we do not have a ref or a reftype, we should not evaluate this rule.
|
||||
if ref == nil || refType == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
26
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
26
vendor/github.com/open-policy-agent/opa/ast/parser.go
generated
vendored
@@ -17,7 +17,7 @@ import (
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast/internal/scanner"
|
||||
"github.com/open-policy-agent/opa/ast/internal/tokens"
|
||||
@@ -780,6 +780,8 @@ func (p *Parser) parseRules() []*Rule {
|
||||
case usesContains:
|
||||
rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
|
||||
rule.generatedBody = true
|
||||
rule.Location = rule.Head.Location
|
||||
|
||||
return []*Rule{&rule}
|
||||
|
||||
default:
|
||||
@@ -2309,12 +2311,10 @@ type rawAnnotation struct {
|
||||
Organizations []string `yaml:"organizations"`
|
||||
RelatedResources []interface{} `yaml:"related_resources"`
|
||||
Authors []interface{} `yaml:"authors"`
|
||||
Schemas []rawSchemaAnnotation `yaml:"schemas"`
|
||||
Schemas []map[string]any `yaml:"schemas"`
|
||||
Custom map[string]interface{} `yaml:"custom"`
|
||||
}
|
||||
|
||||
type rawSchemaAnnotation map[string]interface{}
|
||||
|
||||
type metadataParser struct {
|
||||
buf *bytes.Buffer
|
||||
comments []*Comment
|
||||
@@ -2345,9 +2345,8 @@ func (b *metadataParser) Parse() (*Annotations, error) {
|
||||
var comment *Comment
|
||||
match := yamlLineErrRegex.FindStringSubmatch(err.Error())
|
||||
if len(match) == 2 {
|
||||
n, err2 := strconv.Atoi(match[1])
|
||||
index, err2 := strconv.Atoi(match[1])
|
||||
if err2 == nil {
|
||||
index := n - 1 // line numbering is 1-based so subtract one from row
|
||||
if index >= len(b.comments) {
|
||||
comment = b.comments[len(b.comments)-1]
|
||||
} else {
|
||||
@@ -2397,7 +2396,7 @@ func (b *metadataParser) Parse() (*Annotations, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case map[interface{}]interface{}:
|
||||
case map[string]any:
|
||||
w, err := convertYAMLMapKeyTypes(v, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid schema definition: %w", err)
|
||||
@@ -2446,8 +2445,9 @@ func (b *metadataParser) Parse() (*Annotations, error) {
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise cryptic error.
|
||||
// These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed to be correct.
|
||||
// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise
|
||||
// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed
|
||||
// to be correct.
|
||||
func augmentYamlError(err error, comments []*Comment) error {
|
||||
// Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol
|
||||
for _, comment := range comments {
|
||||
@@ -2601,11 +2601,11 @@ func parseAuthorString(s string) (*AuthorAnnotation, error) {
|
||||
return &AuthorAnnotation{Name: name, Email: email}, nil
|
||||
}
|
||||
|
||||
func convertYAMLMapKeyTypes(x interface{}, path []string) (interface{}, error) {
|
||||
func convertYAMLMapKeyTypes(x any, path []string) (any, error) {
|
||||
var err error
|
||||
switch x := x.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
result := make(map[string]interface{}, len(x))
|
||||
case map[any]any:
|
||||
result := make(map[string]any, len(x))
|
||||
for k, v := range x {
|
||||
str, ok := k.(string)
|
||||
if !ok {
|
||||
@@ -2617,7 +2617,7 @@ func convertYAMLMapKeyTypes(x interface{}, path []string) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
case []interface{}:
|
||||
case []any:
|
||||
for i := range x {
|
||||
x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i)))
|
||||
if err != nil {
|
||||
|
||||
21
vendor/github.com/open-policy-agent/opa/ast/term.go
generated
vendored
21
vendor/github.com/open-policy-agent/opa/ast/term.go
generated
vendored
@@ -1293,6 +1293,11 @@ func (arr *Array) Elem(i int) *Term {
|
||||
return arr.elems[i]
|
||||
}
|
||||
|
||||
// Set sets the element i of arr.
|
||||
func (arr *Array) Set(i int, v *Term) {
|
||||
arr.set(i, v)
|
||||
}
|
||||
|
||||
// rehash updates the cached hash of arr.
|
||||
func (arr *Array) rehash() {
|
||||
arr.hash = 0
|
||||
@@ -1306,6 +1311,7 @@ func (arr *Array) set(i int, v *Term) {
|
||||
arr.ground = arr.ground && v.IsGround()
|
||||
arr.elems[i] = v
|
||||
arr.hashs[i] = v.Value.Hash()
|
||||
arr.rehash()
|
||||
}
|
||||
|
||||
// Slice returns a slice of arr starting from i index to j. -1
|
||||
@@ -2560,6 +2566,8 @@ func (obj *object) insert(k, v *Term) {
|
||||
}
|
||||
|
||||
curr.value = v
|
||||
|
||||
obj.rehash()
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2584,6 +2592,19 @@ func (obj *object) insert(k, v *Term) {
|
||||
}
|
||||
}
|
||||
|
||||
func (obj *object) rehash() {
|
||||
// obj.keys is considered truth, from which obj.hash and obj.elems are recalculated.
|
||||
|
||||
obj.hash = 0
|
||||
obj.elems = make(map[int]*objectElem, len(obj.keys))
|
||||
|
||||
for _, elem := range obj.keys {
|
||||
hash := elem.key.Hash()
|
||||
obj.hash += hash + elem.value.Hash()
|
||||
obj.elems[hash] = elem
|
||||
}
|
||||
}
|
||||
|
||||
func filterObject(o Value, filter Value) (Value, error) {
|
||||
if filter.Compare(Null{}) == 0 {
|
||||
return o, nil
|
||||
|
||||
63
vendor/github.com/open-policy-agent/opa/bundle/store.go
generated
vendored
63
vendor/github.com/open-policy-agent/opa/bundle/store.go
generated
vendored
@@ -59,9 +59,25 @@ func metadataPath(name string) storage.Path {
|
||||
return append(BundlesBasePath, name, "manifest", "metadata")
|
||||
}
|
||||
|
||||
func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) {
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if astValue, ok := value.(ast.Value); ok {
|
||||
value, err = ast.JSON(astValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored.
|
||||
func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) {
|
||||
value, err := store.Read(ctx, txn, BundlesBasePath)
|
||||
value, err := read(ctx, store, txn, BundlesBasePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -153,7 +169,7 @@ func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn sto
|
||||
// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store.
|
||||
func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) {
|
||||
path := wasmEntrypointsPath(name)
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
value, err := read(ctx, store, txn, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -176,7 +192,7 @@ func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn sto
|
||||
// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store.
|
||||
func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) {
|
||||
path := wasmModulePath(name)
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
value, err := read(ctx, store, txn, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -205,7 +221,7 @@ func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn stor
|
||||
// If the bundle is not activated, this function will return
|
||||
// storage NotFound error.
|
||||
func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) {
|
||||
value, err := store.Read(ctx, txn, rootsPath(name))
|
||||
value, err := read(ctx, store, txn, rootsPath(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -235,7 +251,7 @@ func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn s
|
||||
}
|
||||
|
||||
func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
value, err := read(ctx, store, txn, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -256,7 +272,7 @@ func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn s
|
||||
}
|
||||
|
||||
func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) {
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
value, err := read(ctx, store, txn, path)
|
||||
if err != nil {
|
||||
return nil, suppressNotFound(err)
|
||||
}
|
||||
@@ -277,7 +293,7 @@ func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn stora
|
||||
}
|
||||
|
||||
func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
|
||||
value, err := store.Read(ctx, txn, path)
|
||||
value, err := read(ctx, store, txn, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -544,14 +560,7 @@ func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error
|
||||
return err
|
||||
}
|
||||
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("corrupt manifest data: %w", err)
|
||||
}
|
||||
|
||||
var manifest Manifest
|
||||
|
||||
err = util.UnmarshalJSON(bs, &manifest)
|
||||
manifest, err := valueToManifest(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("corrupt manifest data: %w", err)
|
||||
}
|
||||
@@ -585,6 +594,30 @@ func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func valueToManifest(v interface{}) (Manifest, error) {
|
||||
if astV, ok := v.(ast.Value); ok {
|
||||
var err error
|
||||
v, err = ast.JSON(astV)
|
||||
if err != nil {
|
||||
return Manifest{}, err
|
||||
}
|
||||
}
|
||||
|
||||
var manifest Manifest
|
||||
|
||||
bs, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return Manifest{}, err
|
||||
}
|
||||
|
||||
err = util.UnmarshalJSON(bs, &manifest)
|
||||
if err != nil {
|
||||
return Manifest{}, err
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// erase bundles by name and roots. This will clear all policies and data at its roots and remove its
|
||||
// manifest from storage.
|
||||
func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) {
|
||||
|
||||
4843
vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json
generated
vendored
Normal file
4843
vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
49
vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go
generated
vendored
49
vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go
generated
vendored
@@ -7,14 +7,16 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
defaultAPIVersion = "2018-02-01"
|
||||
defaultResource = "https://storage.azure.com/"
|
||||
timeout = 5 * time.Second
|
||||
azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
defaultAPIVersion = "2018-02-01"
|
||||
defaultResource = "https://storage.azure.com/"
|
||||
timeout = 5 * time.Second
|
||||
defaultAPIVersionForAppServiceMsi = "2019-08-01"
|
||||
)
|
||||
|
||||
// azureManagedIdentitiesToken holds a token for managed identities for Azure resources
|
||||
@@ -41,12 +43,13 @@ func (e *azureManagedIdentitiesError) Error() string {
|
||||
|
||||
// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization
|
||||
type azureManagedIdentitiesAuthPlugin struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
APIVersion string `json:"api_version"`
|
||||
Resource string `json:"resource"`
|
||||
ObjectID string `json:"object_id"`
|
||||
ClientID string `json:"client_id"`
|
||||
MiResID string `json:"mi_res_id"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
APIVersion string `json:"api_version"`
|
||||
Resource string `json:"resource"`
|
||||
ObjectID string `json:"object_id"`
|
||||
ClientID string `json:"client_id"`
|
||||
MiResID string `json:"mi_res_id"`
|
||||
UseAppServiceMsi bool `json:"use_app_service_msi,omitempty"`
|
||||
}
|
||||
|
||||
func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) {
|
||||
@@ -55,7 +58,13 @@ func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, e
|
||||
}
|
||||
|
||||
if ap.Endpoint == "" {
|
||||
ap.Endpoint = azureIMDSEndpoint
|
||||
identityEndpoint := os.Getenv("IDENTITY_ENDPOINT")
|
||||
if identityEndpoint != "" {
|
||||
ap.UseAppServiceMsi = true
|
||||
ap.Endpoint = identityEndpoint
|
||||
} else {
|
||||
ap.Endpoint = azureIMDSEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
if ap.Resource == "" {
|
||||
@@ -63,7 +72,11 @@ func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, e
|
||||
}
|
||||
|
||||
if ap.APIVersion == "" {
|
||||
ap.APIVersion = defaultAPIVersion
|
||||
if ap.UseAppServiceMsi {
|
||||
ap.APIVersion = defaultAPIVersionForAppServiceMsi
|
||||
} else {
|
||||
ap.APIVersion = defaultAPIVersion
|
||||
}
|
||||
}
|
||||
|
||||
t, err := DefaultTLSConfig(c)
|
||||
@@ -78,6 +91,7 @@ func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error {
|
||||
token, err := azureManagedIdentitiesTokenRequest(
|
||||
ap.Endpoint, ap.APIVersion, ap.Resource,
|
||||
ap.ObjectID, ap.ClientID, ap.MiResID,
|
||||
ap.UseAppServiceMsi,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -90,6 +104,7 @@ func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error {
|
||||
// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken
|
||||
func azureManagedIdentitiesTokenRequest(
|
||||
endpoint, apiVersion, resource, objectID, clientID, miResID string,
|
||||
useAppServiceMsi bool,
|
||||
) (azureManagedIdentitiesToken, error) {
|
||||
var token azureManagedIdentitiesToken
|
||||
e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID)
|
||||
@@ -98,7 +113,15 @@ func azureManagedIdentitiesTokenRequest(
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
request.Header.Add("Metadata", "true")
|
||||
if useAppServiceMsi {
|
||||
identityHeader := os.Getenv("IDENTITY_HEADER")
|
||||
if identityHeader == "" {
|
||||
return token, errors.New("azure managed identities auth: IDENTITY_HEADER env var not found")
|
||||
}
|
||||
request.Header.Add("x-identity-header", identityHeader)
|
||||
} else {
|
||||
request.Header.Add("Metadata", "true")
|
||||
}
|
||||
|
||||
httpClient := http.Client{Timeout: timeout}
|
||||
response, err := httpClient.Do(request)
|
||||
|
||||
12
vendor/github.com/open-policy-agent/opa/rego/rego.go
generated
vendored
12
vendor/github.com/open-policy-agent/opa/rego/rego.go
generated
vendored
@@ -579,6 +579,7 @@ type Rego struct {
|
||||
compiler *ast.Compiler
|
||||
store storage.Store
|
||||
ownStore bool
|
||||
ownStoreReadAst bool
|
||||
txn storage.Transaction
|
||||
metrics metrics.Metrics
|
||||
queryTracers []topdown.QueryTracer
|
||||
@@ -1007,6 +1008,15 @@ func Store(s storage.Store) func(r *Rego) {
|
||||
}
|
||||
}
|
||||
|
||||
// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values.
|
||||
//
|
||||
// Only applicable when no store has been set on the Rego object through the Store option.
|
||||
func StoreReadAST(enabled bool) func(r *Rego) {
|
||||
return func(r *Rego) {
|
||||
r.ownStoreReadAst = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction returns an argument that sets the transaction to use for storage
|
||||
// layer operations.
|
||||
//
|
||||
@@ -1266,7 +1276,7 @@ func New(options ...func(r *Rego)) *Rego {
|
||||
}
|
||||
|
||||
if r.store == nil {
|
||||
r.store = inmem.New()
|
||||
r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst))
|
||||
r.ownStore = true
|
||||
} else {
|
||||
r.ownStore = false
|
||||
|
||||
314
vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go
generated
vendored
Normal file
314
vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go
generated
vendored
Normal file
@@ -0,0 +1,314 @@
|
||||
// Copyright 2024 The OPA Authors. All rights reserved.
|
||||
// Use of this source code is governed by an Apache2
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package inmem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/internal/errors"
|
||||
"github.com/open-policy-agent/opa/storage/internal/ptr"
|
||||
)
|
||||
|
||||
type updateAST struct {
|
||||
path storage.Path // data path modified by update
|
||||
remove bool // indicates whether update removes the value at path
|
||||
value ast.Value // value to add/replace at path (ignored if remove is true)
|
||||
}
|
||||
|
||||
func (u *updateAST) Path() storage.Path {
|
||||
return u.path
|
||||
}
|
||||
|
||||
func (u *updateAST) Remove() bool {
|
||||
return u.remove
|
||||
}
|
||||
|
||||
func (u *updateAST) Set(v interface{}) {
|
||||
if v, ok := v.(ast.Value); ok {
|
||||
u.value = v
|
||||
} else {
|
||||
panic("illegal value type") // FIXME: do conversion?
|
||||
}
|
||||
}
|
||||
|
||||
func (u *updateAST) Value() interface{} {
|
||||
return u.value
|
||||
}
|
||||
|
||||
func (u *updateAST) Relative(path storage.Path) dataUpdate {
|
||||
cpy := *u
|
||||
cpy.path = cpy.path[len(path):]
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (u *updateAST) Apply(v interface{}) interface{} {
|
||||
if len(u.path) == 0 {
|
||||
return u.value
|
||||
}
|
||||
|
||||
data, ok := v.(ast.Value)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("illegal value type %T, expected ast.Value", v))
|
||||
}
|
||||
|
||||
if u.remove {
|
||||
newV, err := removeInAst(data, u.path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newV
|
||||
}
|
||||
|
||||
// If we're not removing, we're replacing (adds are turned into replaces during updateAST creation).
|
||||
newV, err := setInAst(data, u.path, u.value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newV
|
||||
}
|
||||
|
||||
func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
|
||||
|
||||
switch data.(type) {
|
||||
case ast.Null, ast.Boolean, ast.Number, ast.String:
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
|
||||
switch data := data.(type) {
|
||||
case ast.Object:
|
||||
return newUpdateObjectAST(data, op, path, idx, value)
|
||||
|
||||
case *ast.Array:
|
||||
return newUpdateArrayAST(data, op, path, idx, value)
|
||||
}
|
||||
|
||||
return nil, &storage.Error{
|
||||
Code: storage.InternalErr,
|
||||
Message: "invalid data value encountered",
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
|
||||
|
||||
if idx == len(path)-1 {
|
||||
if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) {
|
||||
if op != storage.AddOp {
|
||||
return nil, invalidPatchError("%v: invalid patch path", path)
|
||||
}
|
||||
|
||||
cpy := data.Copy()
|
||||
cpy = cpy.Append(ast.NewTerm(value))
|
||||
return &updateAST{path[:len(path)-1], false, cpy}, nil
|
||||
}
|
||||
|
||||
pos, err := ptr.ValidateASTArrayIndex(data, path[idx], path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch op {
|
||||
case storage.AddOp:
|
||||
var results []*ast.Term
|
||||
for i := 0; i < data.Len(); i++ {
|
||||
if i == pos {
|
||||
results = append(results, ast.NewTerm(value))
|
||||
}
|
||||
results = append(results, data.Elem(i))
|
||||
}
|
||||
|
||||
return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil
|
||||
|
||||
case storage.RemoveOp:
|
||||
var results []*ast.Term
|
||||
for i := 0; i < data.Len(); i++ {
|
||||
if i != pos {
|
||||
results = append(results, data.Elem(i))
|
||||
}
|
||||
}
|
||||
return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil
|
||||
|
||||
default:
|
||||
var results []*ast.Term
|
||||
for i := 0; i < data.Len(); i++ {
|
||||
if i == pos {
|
||||
results = append(results, ast.NewTerm(value))
|
||||
} else {
|
||||
results = append(results, data.Elem(i))
|
||||
}
|
||||
}
|
||||
|
||||
return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil
|
||||
}
|
||||
}
|
||||
|
||||
pos, err := ptr.ValidateASTArrayIndex(data, path[idx], path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newUpdateAST(data.Elem(pos).Value, op, path, idx+1, value)
|
||||
}
|
||||
|
||||
func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) {
|
||||
key := ast.StringTerm(path[idx])
|
||||
val := data.Get(key)
|
||||
|
||||
if idx == len(path)-1 {
|
||||
switch op {
|
||||
case storage.ReplaceOp, storage.RemoveOp:
|
||||
if val == nil {
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
}
|
||||
return &updateAST{path, op == storage.RemoveOp, value}, nil
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
return newUpdateAST(val.Value, op, path, idx+1, value)
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
|
||||
func interfaceToValue(v interface{}) (ast.Value, error) {
|
||||
if v, ok := v.(ast.Value); ok {
|
||||
return v, nil
|
||||
}
|
||||
return ast.InterfaceToValue(v)
|
||||
}
|
||||
|
||||
// setInAst updates the value in the AST at the given path with the given value.
|
||||
// Values can only be replaced in arrays, not added.
|
||||
// Values for new keys can be added to objects
|
||||
func setInAst(data ast.Value, path storage.Path, value ast.Value) (ast.Value, error) {
|
||||
if len(path) == 0 {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
switch data := data.(type) {
|
||||
case ast.Object:
|
||||
return setInAstObject(data, path, value)
|
||||
case *ast.Array:
|
||||
return setInAstArray(data, path, value)
|
||||
default:
|
||||
return nil, fmt.Errorf("illegal value type %T, expected ast.Object or ast.Array", data)
|
||||
}
|
||||
}
|
||||
|
||||
func setInAstObject(obj ast.Object, path storage.Path, value ast.Value) (ast.Value, error) {
|
||||
key := ast.StringTerm(path[0])
|
||||
|
||||
if len(path) == 1 {
|
||||
obj.Insert(key, ast.NewTerm(value))
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
child := obj.Get(key)
|
||||
newChild, err := setInAst(child.Value, path[1:], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj.Insert(key, ast.NewTerm(newChild))
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func setInAstArray(arr *ast.Array, path storage.Path, value ast.Value) (ast.Value, error) {
|
||||
idx, err := strconv.Atoi(path[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("illegal array index %v: %v", path[0], err)
|
||||
}
|
||||
|
||||
if idx < 0 || idx >= arr.Len() {
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
if len(path) == 1 {
|
||||
arr.Set(idx, ast.NewTerm(value))
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
child := arr.Elem(idx)
|
||||
newChild, err := setInAst(child.Value, path[1:], value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr.Set(idx, ast.NewTerm(newChild))
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
func removeInAst(value ast.Value, path storage.Path) (ast.Value, error) {
|
||||
if len(path) == 0 {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case ast.Object:
|
||||
return removeInAstObject(value, path)
|
||||
case *ast.Array:
|
||||
return removeInAstArray(value, path)
|
||||
default:
|
||||
return nil, fmt.Errorf("illegal value type %T, expected ast.Object or ast.Array", value)
|
||||
}
|
||||
}
|
||||
|
||||
func removeInAstObject(obj ast.Object, path storage.Path) (ast.Value, error) {
|
||||
key := ast.StringTerm(path[0])
|
||||
|
||||
if len(path) == 1 {
|
||||
var items [][2]*ast.Term
|
||||
// Note: possibly expensive operation for large data.
|
||||
obj.Foreach(func(k *ast.Term, v *ast.Term) {
|
||||
if k.Equal(key) {
|
||||
return
|
||||
}
|
||||
items = append(items, [2]*ast.Term{k, v})
|
||||
})
|
||||
return ast.NewObject(items...), nil
|
||||
}
|
||||
|
||||
if child := obj.Get(key); child != nil {
|
||||
updatedChild, err := removeInAst(child.Value, path[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj.Insert(key, ast.NewTerm(updatedChild))
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) {
|
||||
idx, err := strconv.Atoi(path[0])
|
||||
if err != nil {
|
||||
// We expect the path to be valid at this point.
|
||||
return arr, nil
|
||||
}
|
||||
|
||||
if idx < 0 || idx >= arr.Len() {
|
||||
return arr, err
|
||||
}
|
||||
|
||||
if len(path) == 1 {
|
||||
var elems []*ast.Term
|
||||
// Note: possibly expensive operation for large data.
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
if i == idx {
|
||||
continue
|
||||
}
|
||||
elems = append(elems, arr.Elem(i))
|
||||
}
|
||||
return ast.NewArray(elems...), nil
|
||||
}
|
||||
|
||||
updatedChild, err := removeInAst(arr.Elem(idx).Value, path[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr.Set(idx, ast.NewTerm(updatedChild))
|
||||
return arr, nil
|
||||
}
|
||||
66
vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
generated
vendored
66
vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
generated
vendored
@@ -24,6 +24,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/internal/merge"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
@@ -37,16 +38,22 @@ func New() storage.Store {
|
||||
// NewWithOpts returns an empty in-memory store, with extra options passed.
|
||||
func NewWithOpts(opts ...Opt) storage.Store {
|
||||
s := &store{
|
||||
data: map[string]interface{}{},
|
||||
triggers: map[*handle]storage.TriggerConfig{},
|
||||
policies: map[string][]byte{},
|
||||
roundTripOnWrite: true,
|
||||
triggers: map[*handle]storage.TriggerConfig{},
|
||||
policies: map[string][]byte{},
|
||||
roundTripOnWrite: true,
|
||||
returnASTValuesOnRead: false,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
|
||||
if s.returnASTValuesOnRead {
|
||||
s.data = ast.NewObject()
|
||||
} else {
|
||||
s.data = map[string]interface{}{}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -55,7 +62,7 @@ func NewFromObject(data map[string]interface{}) storage.Store {
|
||||
return NewFromObjectWithOpts(data)
|
||||
}
|
||||
|
||||
// NewFromObject returns a new in-memory store from the supplied data object, with the
|
||||
// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the
|
||||
// options passed.
|
||||
func NewFromObjectWithOpts(data map[string]interface{}, opts ...Opt) storage.Store {
|
||||
db := NewWithOpts(opts...)
|
||||
@@ -94,13 +101,18 @@ type store struct {
|
||||
rmu sync.RWMutex // reader-writer lock
|
||||
wmu sync.Mutex // writer lock
|
||||
xid uint64 // last generated transaction id
|
||||
data map[string]interface{} // raw data
|
||||
data interface{} // raw or AST data
|
||||
policies map[string][]byte // raw policies
|
||||
triggers map[*handle]storage.TriggerConfig // registered triggers
|
||||
|
||||
// roundTripOnWrite, if true, means that every call to Write round trips the
|
||||
// data through JSON before adding the data to the store. Defaults to true.
|
||||
roundTripOnWrite bool
|
||||
|
||||
// returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values,
|
||||
// and return them on Read.
|
||||
// FIXME: naming(?)
|
||||
returnASTValuesOnRead bool
|
||||
}
|
||||
|
||||
type handle struct {
|
||||
@@ -295,7 +307,13 @@ func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.P
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return underlying.Read(path)
|
||||
|
||||
v, err := underlying.Read(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error {
|
||||
@@ -327,11 +345,45 @@ func (h *handle) Unregister(_ context.Context, txn storage.Transaction) {
|
||||
}
|
||||
|
||||
func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) {
|
||||
if db.returnASTValuesOnRead && len(db.triggers) > 0 {
|
||||
// FIXME: Not very performant for large data.
|
||||
|
||||
dataEvents := make([]storage.DataEvent, 0, len(event.Data))
|
||||
|
||||
for _, dataEvent := range event.Data {
|
||||
if astData, ok := dataEvent.Data.(ast.Value); ok {
|
||||
jsn, err := ast.ValueToInterface(astData, illegalResolver{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dataEvents = append(dataEvents, storage.DataEvent{
|
||||
Path: dataEvent.Path,
|
||||
Data: jsn,
|
||||
Removed: dataEvent.Removed,
|
||||
})
|
||||
} else {
|
||||
dataEvents = append(dataEvents, dataEvent)
|
||||
}
|
||||
}
|
||||
|
||||
event = storage.TriggerEvent{
|
||||
Policy: event.Policy,
|
||||
Data: dataEvents,
|
||||
Context: event.Context,
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range db.triggers {
|
||||
t.OnCommit(ctx, txn, event)
|
||||
}
|
||||
}
|
||||
|
||||
type illegalResolver struct{}
|
||||
|
||||
func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) {
|
||||
return nil, fmt.Errorf("illegal value: %v", ref)
|
||||
}
|
||||
|
||||
func (db *store) underlying(txn storage.Transaction) (*transaction, error) {
|
||||
underlying, ok := txn.(*transaction)
|
||||
if !ok {
|
||||
|
||||
12
vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go
generated
vendored
12
vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go
generated
vendored
@@ -23,3 +23,15 @@ func OptRoundTripOnWrite(enabled bool) Opt {
|
||||
s.roundTripOnWrite = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// OptReturnASTValuesOnRead sets whether data values added to the store should be
|
||||
// eagerly converted to AST values, which are then returned on read.
|
||||
//
|
||||
// When enabled, this feature does not sanity check data before converting it to AST values,
|
||||
// which may result in panics if the data is not valid. Callers should ensure that passed data
|
||||
// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite.
|
||||
func OptReturnASTValuesOnRead(enabled bool) Opt {
|
||||
return func(s *store) {
|
||||
s.returnASTValuesOnRead = enabled
|
||||
}
|
||||
}
|
||||
|
||||
195
vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
generated
vendored
195
vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/internal/deepcopy"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/internal/errors"
|
||||
@@ -76,13 +77,13 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
|
||||
}
|
||||
|
||||
for curr := txn.updates.Front(); curr != nil; {
|
||||
update := curr.Value.(*update)
|
||||
update := curr.Value.(dataUpdate)
|
||||
|
||||
// Check if new update masks existing update exactly. In this case, the
|
||||
// existing update can be removed and no other updates have to be
|
||||
// visited (because no two updates overlap.)
|
||||
if update.path.Equal(path) {
|
||||
if update.remove {
|
||||
if update.Path().Equal(path) {
|
||||
if update.Remove() {
|
||||
if op != storage.AddOp {
|
||||
return errors.NewNotFoundError(path)
|
||||
}
|
||||
@@ -94,7 +95,7 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
|
||||
// Check if new update masks existing update. In this case, the
|
||||
// existing update has to be removed but other updates may overlap, so
|
||||
// we must continue.
|
||||
if update.path.HasPrefix(path) {
|
||||
if update.Path().HasPrefix(path) {
|
||||
remove := curr
|
||||
curr = curr.Next()
|
||||
txn.updates.Remove(remove)
|
||||
@@ -103,23 +104,23 @@ func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value inter
|
||||
|
||||
// Check if new update modifies existing update. In this case, the
|
||||
// existing update is mutated.
|
||||
if path.HasPrefix(update.path) {
|
||||
if update.remove {
|
||||
if path.HasPrefix(update.Path()) {
|
||||
if update.Remove() {
|
||||
return errors.NewNotFoundError(path)
|
||||
}
|
||||
suffix := path[len(update.path):]
|
||||
newUpdate, err := newUpdate(update.value, op, suffix, 0, value)
|
||||
suffix := path[len(update.Path()):]
|
||||
newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
update.value = newUpdate.Apply(update.value)
|
||||
update.Set(newUpdate.Apply(update.Value()))
|
||||
return nil
|
||||
}
|
||||
|
||||
curr = curr.Next()
|
||||
}
|
||||
|
||||
update, err := newUpdate(txn.db.data, op, path, 0, value)
|
||||
update, err := txn.db.newUpdate(txn.db.data, op, path, 0, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -132,72 +133,115 @@ func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error
|
||||
if op == storage.RemoveOp {
|
||||
return invalidPatchError(rootCannotBeRemovedMsg)
|
||||
}
|
||||
if _, ok := value.(map[string]interface{}); !ok {
|
||||
return invalidPatchError(rootMustBeObjectMsg)
|
||||
|
||||
var update any
|
||||
if txn.db.returnASTValuesOnRead {
|
||||
valueAST, err := interfaceToValue(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := valueAST.(ast.Object); !ok {
|
||||
return invalidPatchError(rootMustBeObjectMsg)
|
||||
}
|
||||
|
||||
update = &updateAST{
|
||||
path: storage.Path{},
|
||||
remove: false,
|
||||
value: valueAST,
|
||||
}
|
||||
} else {
|
||||
if _, ok := value.(map[string]interface{}); !ok {
|
||||
return invalidPatchError(rootMustBeObjectMsg)
|
||||
}
|
||||
|
||||
update = &updateRaw{
|
||||
path: storage.Path{},
|
||||
remove: false,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
txn.updates.Init()
|
||||
txn.updates.PushFront(&update{
|
||||
path: storage.Path{},
|
||||
remove: false,
|
||||
value: value,
|
||||
})
|
||||
txn.updates.PushFront(update)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txn *transaction) Commit() (result storage.TriggerEvent) {
|
||||
result.Context = txn.context
|
||||
for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
|
||||
action := curr.Value.(*update)
|
||||
updated := action.Apply(txn.db.data)
|
||||
txn.db.data = updated.(map[string]interface{})
|
||||
action := curr.Value.(dataUpdate)
|
||||
txn.db.data = action.Apply(txn.db.data)
|
||||
|
||||
result.Data = append(result.Data, storage.DataEvent{
|
||||
Path: action.path,
|
||||
Data: action.value,
|
||||
Removed: action.remove,
|
||||
Path: action.Path(),
|
||||
Data: action.Value(),
|
||||
Removed: action.Remove(),
|
||||
})
|
||||
}
|
||||
for id, update := range txn.policies {
|
||||
if update.remove {
|
||||
for id, upd := range txn.policies {
|
||||
if upd.remove {
|
||||
delete(txn.db.policies, id)
|
||||
} else {
|
||||
txn.db.policies[id] = update.value
|
||||
txn.db.policies[id] = upd.value
|
||||
}
|
||||
|
||||
result.Policy = append(result.Policy, storage.PolicyEvent{
|
||||
ID: id,
|
||||
Data: update.value,
|
||||
Removed: update.remove,
|
||||
Data: upd.value,
|
||||
Removed: upd.remove,
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func pointer(v interface{}, path storage.Path) (interface{}, error) {
|
||||
if v, ok := v.(ast.Value); ok {
|
||||
return ptr.ValuePtr(v, path)
|
||||
}
|
||||
return ptr.Ptr(v, path)
|
||||
}
|
||||
|
||||
func deepcpy(v interface{}) interface{} {
|
||||
if v, ok := v.(ast.Value); ok {
|
||||
var cpy ast.Value
|
||||
|
||||
switch data := v.(type) {
|
||||
case ast.Object:
|
||||
cpy = data.Copy()
|
||||
case *ast.Array:
|
||||
cpy = data.Copy()
|
||||
}
|
||||
|
||||
return cpy
|
||||
}
|
||||
return deepcopy.DeepCopy(v)
|
||||
}
|
||||
|
||||
func (txn *transaction) Read(path storage.Path) (interface{}, error) {
|
||||
|
||||
if !txn.write {
|
||||
return ptr.Ptr(txn.db.data, path)
|
||||
return pointer(txn.db.data, path)
|
||||
}
|
||||
|
||||
merge := []*update{}
|
||||
var merge []dataUpdate
|
||||
|
||||
for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
|
||||
|
||||
update := curr.Value.(*update)
|
||||
upd := curr.Value.(dataUpdate)
|
||||
|
||||
if path.HasPrefix(update.path) {
|
||||
if update.remove {
|
||||
if path.HasPrefix(upd.Path()) {
|
||||
if upd.Remove() {
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
return ptr.Ptr(update.value, path[len(update.path):])
|
||||
return pointer(upd.Value(), path[len(upd.Path()):])
|
||||
}
|
||||
|
||||
if update.path.HasPrefix(path) {
|
||||
merge = append(merge, update)
|
||||
if upd.Path().HasPrefix(path) {
|
||||
merge = append(merge, upd)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := ptr.Ptr(txn.db.data, path)
|
||||
data, err := pointer(txn.db.data, path)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -207,7 +251,7 @@ func (txn *transaction) Read(path storage.Path) (interface{}, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
cpy := deepcopy.DeepCopy(data)
|
||||
cpy := deepcpy(data)
|
||||
|
||||
for _, update := range merge {
|
||||
cpy = update.Relative(path).Apply(cpy)
|
||||
@@ -266,15 +310,44 @@ func (txn *transaction) DeletePolicy(id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type dataUpdate interface {
|
||||
Path() storage.Path
|
||||
Remove() bool
|
||||
Apply(interface{}) interface{}
|
||||
Relative(path storage.Path) dataUpdate
|
||||
Set(interface{})
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
// update contains state associated with an update to be applied to the
|
||||
// in-memory data store.
|
||||
type update struct {
|
||||
type updateRaw struct {
|
||||
path storage.Path // data path modified by update
|
||||
remove bool // indicates whether update removes the value at path
|
||||
value interface{} // value to add/replace at path (ignored if remove is true)
|
||||
}
|
||||
|
||||
func newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
|
||||
func (db *store) newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
|
||||
if db.returnASTValuesOnRead {
|
||||
astData, err := interfaceToValue(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
astValue, err := interfaceToValue(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newUpdateAST(astData, op, path, idx, astValue)
|
||||
}
|
||||
return newUpdateRaw(data, op, path, idx, value)
|
||||
}
|
||||
|
||||
func newUpdateRaw(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
|
||||
|
||||
switch data.(type) {
|
||||
case nil, bool, json.Number, string:
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
|
||||
switch data := data.(type) {
|
||||
case map[string]interface{}:
|
||||
@@ -282,9 +355,6 @@ func newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int,
|
||||
|
||||
case []interface{}:
|
||||
return newUpdateArray(data, op, path, idx, value)
|
||||
|
||||
case nil, bool, json.Number, string:
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
|
||||
return nil, &storage.Error{
|
||||
@@ -293,7 +363,7 @@ func newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int,
|
||||
}
|
||||
}
|
||||
|
||||
func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
|
||||
func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
|
||||
|
||||
if idx == len(path)-1 {
|
||||
if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) {
|
||||
@@ -303,7 +373,7 @@ func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, i
|
||||
cpy := make([]interface{}, len(data)+1)
|
||||
copy(cpy, data)
|
||||
cpy[len(data)] = value
|
||||
return &update{path[:len(path)-1], false, cpy}, nil
|
||||
return &updateRaw{path[:len(path)-1], false, cpy}, nil
|
||||
}
|
||||
|
||||
pos, err := ptr.ValidateArrayIndex(data, path[idx], path)
|
||||
@@ -317,19 +387,19 @@ func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, i
|
||||
copy(cpy[:pos], data[:pos])
|
||||
copy(cpy[pos+1:], data[pos:])
|
||||
cpy[pos] = value
|
||||
return &update{path[:len(path)-1], false, cpy}, nil
|
||||
return &updateRaw{path[:len(path)-1], false, cpy}, nil
|
||||
|
||||
case storage.RemoveOp:
|
||||
cpy := make([]interface{}, len(data)-1)
|
||||
copy(cpy[:pos], data[:pos])
|
||||
copy(cpy[pos:], data[pos+1:])
|
||||
return &update{path[:len(path)-1], false, cpy}, nil
|
||||
return &updateRaw{path[:len(path)-1], false, cpy}, nil
|
||||
|
||||
default:
|
||||
cpy := make([]interface{}, len(data))
|
||||
copy(cpy, data)
|
||||
cpy[pos] = value
|
||||
return &update{path[:len(path)-1], false, cpy}, nil
|
||||
return &updateRaw{path[:len(path)-1], false, cpy}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -338,10 +408,10 @@ func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, i
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newUpdate(data[pos], op, path, idx+1, value)
|
||||
return newUpdateRaw(data[pos], op, path, idx+1, value)
|
||||
}
|
||||
|
||||
func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
|
||||
func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) {
|
||||
|
||||
if idx == len(path)-1 {
|
||||
switch op {
|
||||
@@ -350,16 +420,25 @@ func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path stora
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
}
|
||||
return &update{path, op == storage.RemoveOp, value}, nil
|
||||
return &updateRaw{path, op == storage.RemoveOp, value}, nil
|
||||
}
|
||||
|
||||
if data, ok := data[path[idx]]; ok {
|
||||
return newUpdate(data, op, path, idx+1, value)
|
||||
return newUpdateRaw(data, op, path, idx+1, value)
|
||||
}
|
||||
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
func (u *update) Apply(data interface{}) interface{} {
|
||||
|
||||
func (u *updateRaw) Remove() bool {
|
||||
return u.remove
|
||||
}
|
||||
|
||||
func (u *updateRaw) Path() storage.Path {
|
||||
return u.path
|
||||
}
|
||||
|
||||
func (u *updateRaw) Apply(data interface{}) interface{} {
|
||||
if len(u.path) == 0 {
|
||||
return u.value
|
||||
}
|
||||
@@ -389,7 +468,15 @@ func (u *update) Apply(data interface{}) interface{} {
|
||||
return data
|
||||
}
|
||||
|
||||
func (u *update) Relative(path storage.Path) *update {
|
||||
func (u *updateRaw) Set(v interface{}) {
|
||||
u.value = v
|
||||
}
|
||||
|
||||
func (u *updateRaw) Value() interface{} {
|
||||
return u.value
|
||||
}
|
||||
|
||||
func (u *updateRaw) Relative(path storage.Path) dataUpdate {
|
||||
cpy := *u
|
||||
cpy.path = cpy.path[len(path):]
|
||||
return &cpy
|
||||
|
||||
49
vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go
generated
vendored
49
vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go
generated
vendored
@@ -8,6 +8,7 @@ package ptr
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/internal/errors"
|
||||
)
|
||||
@@ -36,6 +37,32 @@ func Ptr(data interface{}, path storage.Path) (interface{}, error) {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) {
|
||||
node := data
|
||||
for i := range path {
|
||||
key := path[i]
|
||||
switch curr := node.(type) {
|
||||
case ast.Object:
|
||||
keyTerm := ast.StringTerm(key)
|
||||
val := curr.Get(keyTerm)
|
||||
if val == nil {
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
node = val.Value
|
||||
case *ast.Array:
|
||||
pos, err := ValidateASTArrayIndex(curr, key, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node = curr.Elem(pos).Value
|
||||
default:
|
||||
return nil, errors.NewNotFoundError(path)
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) {
|
||||
idx, ok := isInt(s)
|
||||
if !ok {
|
||||
@@ -44,6 +71,14 @@ func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, er
|
||||
return inRange(idx, arr, path)
|
||||
}
|
||||
|
||||
func ValidateASTArrayIndex(arr *ast.Array, s string, path storage.Path) (int, error) {
|
||||
idx, ok := isInt(s)
|
||||
if !ok {
|
||||
return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg)
|
||||
}
|
||||
return inRange(idx, arr, path)
|
||||
}
|
||||
|
||||
// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an
|
||||
// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error
|
||||
// if it is not.
|
||||
@@ -60,8 +95,18 @@ func isInt(s string) (int, bool) {
|
||||
return idx, err == nil
|
||||
}
|
||||
|
||||
func inRange(i int, arr []interface{}, path storage.Path) (int, error) {
|
||||
if i < 0 || i >= len(arr) {
|
||||
func inRange(i int, arr interface{}, path storage.Path) (int, error) {
|
||||
|
||||
var arrLen int
|
||||
|
||||
switch v := arr.(type) {
|
||||
case []interface{}:
|
||||
arrLen = len(v)
|
||||
case *ast.Array:
|
||||
arrLen = v.Len()
|
||||
}
|
||||
|
||||
if i < 0 || i >= arrLen {
|
||||
return 0, errors.NewNotFoundErrorWithHint(path, errors.OutOfRangeMsg)
|
||||
}
|
||||
return i, nil
|
||||
|
||||
10
vendor/github.com/open-policy-agent/opa/storage/storage.go
generated
vendored
10
vendor/github.com/open-policy-agent/opa/storage/storage.go
generated
vendored
@@ -6,6 +6,8 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
)
|
||||
|
||||
// NewTransactionOrDie is a helper function to create a new transaction. If the
|
||||
@@ -78,6 +80,11 @@ func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error
|
||||
if _, ok := node.(map[string]interface{}); ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := node.(ast.Object); ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return writeConflictError(path)
|
||||
}
|
||||
|
||||
@@ -118,6 +125,9 @@ func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string)
|
||||
if _, ok := val.(map[string]interface{}); ok {
|
||||
return false, nil
|
||||
}
|
||||
if _, ok := val.(ast.Object); ok {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
7
vendor/github.com/open-policy-agent/opa/topdown/http.go
generated
vendored
7
vendor/github.com/open-policy-agent/opa/topdown/http.go
generated
vendored
@@ -412,7 +412,7 @@ func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *htt
|
||||
var tlsConfig tls.Config
|
||||
var customHeaders map[string]interface{}
|
||||
var tlsInsecureSkipVerify bool
|
||||
var timeout = defaultHTTPRequestTimeout
|
||||
timeout := defaultHTTPRequestTimeout
|
||||
|
||||
for _, val := range obj.Keys() {
|
||||
key, err := ast.JSON(val.Value)
|
||||
@@ -736,9 +736,12 @@ func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)
|
||||
timer, timerCancel := util.TimerWithCancel(delay)
|
||||
select {
|
||||
case <-time.After(util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)):
|
||||
case <-timer.C:
|
||||
case <-req.Context().Done():
|
||||
timerCancel() // explicitly cancel the timer.
|
||||
return nil, context.Canceled
|
||||
}
|
||||
}
|
||||
|
||||
35
vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go
generated
vendored
35
vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go
generated
vendored
@@ -67,7 +67,17 @@ func builtinJSONSchemaVerify(_ BuiltinContext, operands []*ast.Term, iter func(*
|
||||
// builtinJSONMatchSchema accepts 2 arguments both can be string or object and verifies if the document matches the JSON schema.
|
||||
// Returns an array where first element is a boolean indicating a successful match, and the second is an array of errors that is empty on success and populated on failure.
|
||||
// In case of internal error returns empty array.
|
||||
func builtinJSONMatchSchema(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
|
||||
func builtinJSONMatchSchema(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
|
||||
var schema *gojsonschema.Schema
|
||||
|
||||
if bctx.InterQueryBuiltinValueCache != nil {
|
||||
if val, ok := bctx.InterQueryBuiltinValueCache.Get(operands[1].Value); ok {
|
||||
if s, isSchema := val.(*gojsonschema.Schema); isSchema {
|
||||
schema = s
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Take first argument and make JSON Loader from it.
|
||||
// This is a JSON document made from Rego JSON string or object.
|
||||
documentLoader, err := astValueToJSONSchemaLoader(operands[0].Value)
|
||||
@@ -75,15 +85,26 @@ func builtinJSONMatchSchema(_ BuiltinContext, operands []*ast.Term, iter func(*a
|
||||
return err
|
||||
}
|
||||
|
||||
// Take second argument and make JSON Loader from it.
|
||||
// This is a JSON schema made from Rego JSON string or object.
|
||||
schemaLoader, err := astValueToJSONSchemaLoader(operands[1].Value)
|
||||
if err != nil {
|
||||
return err
|
||||
if schema == nil {
|
||||
// Take second argument and make JSON Loader from it.
|
||||
// This is a JSON schema made from Rego JSON string or object.
|
||||
schemaLoader, err := astValueToJSONSchemaLoader(operands[1].Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
schema, err = gojsonschema.NewSchema(schemaLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bctx.InterQueryBuiltinValueCache != nil {
|
||||
bctx.InterQueryBuiltinValueCache.Insert(operands[1].Value, schema)
|
||||
}
|
||||
}
|
||||
|
||||
// Use schema to validate document.
|
||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
||||
result, err := schema.Validate(documentLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
48
vendor/github.com/open-policy-agent/opa/util/time.go
generated
vendored
Normal file
48
vendor/github.com/open-policy-agent/opa/util/time.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package util
|
||||
|
||||
import "time"
|
||||
|
||||
// TimerWithCancel exists because of memory leaks when using
|
||||
// time.After in select statements. Instead, we now manually create timers,
|
||||
// wait on them, and manually free them.
|
||||
//
|
||||
// See this for more details:
|
||||
// https://www.arangodb.com/2020/09/a-story-of-a-memory-leak-in-go-how-to-properly-use-time-after/
|
||||
//
|
||||
// Note: This issue is fixed in Go 1.23, but this fix helps us until then.
|
||||
//
|
||||
// Warning: the cancel cannot be done concurrent to reading, everything should
|
||||
// work in the same goroutine.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for retries := 0; true; retries++ {
|
||||
//
|
||||
// ...main logic...
|
||||
//
|
||||
// timer, cancel := utils.TimerWithCancel(utils.Backoff(retries))
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// cancel()
|
||||
// return ctx.Err()
|
||||
// case <-timer.C:
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
func TimerWithCancel(delay time.Duration) (*time.Timer, func()) {
|
||||
timer := time.NewTimer(delay)
|
||||
|
||||
return timer, func() {
|
||||
// Note: The Stop function returns:
|
||||
// - true: if the timer is active. (no draining required)
|
||||
// - false: if the timer was already stopped or fired/expired.
|
||||
// In this case the channel should be drained to prevent memory
|
||||
// leaks only if it is not empty.
|
||||
// This operation is safe only if the cancel function is
|
||||
// used in same goroutine. Concurrent reading or canceling may
|
||||
// cause deadlock.
|
||||
if !timer.Stop() && len(timer.C) > 0 {
|
||||
<-timer.C
|
||||
}
|
||||
}
|
||||
}
|
||||
2
vendor/github.com/open-policy-agent/opa/version/version.go
generated
vendored
2
vendor/github.com/open-policy-agent/opa/version/version.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Version is the canonical version of OPA.
|
||||
var Version = "0.69.0"
|
||||
var Version = "0.70.0"
|
||||
|
||||
// GoVersion is the version of Go this was built with
|
||||
var GoVersion = runtime.Version()
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1495,7 +1495,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge
|
||||
github.com/onsi/gomega/matchers/support/goraph/node
|
||||
github.com/onsi/gomega/matchers/support/goraph/util
|
||||
github.com/onsi/gomega/types
|
||||
# github.com/open-policy-agent/opa v0.69.0
|
||||
# github.com/open-policy-agent/opa v0.70.0
|
||||
## explicit; go 1.21
|
||||
github.com/open-policy-agent/opa/ast
|
||||
github.com/open-policy-agent/opa/ast/internal/scanner
|
||||
|
||||
Reference in New Issue
Block a user