mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-02-06 12:08:50 -06:00
migrate oc init command from urfave/cli to spf13/cobra
Signed-off-by: Christian Richter <c.richter@opencloud.eu>
This commit is contained in:
committed by
Florian Schade
parent
35900f8875
commit
1e970499af
6
go.mod
6
go.mod
@@ -78,6 +78,7 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/test-go/testify v1.1.4
|
||||
github.com/testcontainers/testcontainers-go v0.40.0
|
||||
@@ -319,6 +320,7 @@ require (
|
||||
github.com/pablodz/inotifywaitgo v0.0.9 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.2 // indirect
|
||||
@@ -335,6 +337,7 @@ require (
|
||||
github.com/russellhaering/goxmldsig v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/samber/lo v1.51.0 // indirect
|
||||
github.com/samber/slog-common v0.19.0 // indirect
|
||||
github.com/samber/slog-zerolog/v2 v2.9.0 // indirect
|
||||
@@ -350,10 +353,13 @@ require (
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
github.com/skeema/knownhosts v1.3.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/studio-b12/gowebdav v0.9.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
|
||||
14
go.sum
14
go.sum
@@ -352,6 +352,8 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI=
|
||||
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
@@ -988,6 +990,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
|
||||
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
|
||||
@@ -1093,6 +1097,8 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK
|
||||
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sacloud/libsacloud v1.36.2/go.mod h1:P7YAOVmnIn3DKHqCZcUKYUXmSwGBm3yS7IBEjKVSrjg=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
|
||||
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||
github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI=
|
||||
@@ -1142,6 +1148,8 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/gunit v1.0.4/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784 h1:0jjO3HdJfOn6gYHD/ZNZh0LLMxEAqkYX7xoDPQReEgs=
|
||||
github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784/go.mod h1:ff/5myEGgtsAwf26goQCO905GrEm5ugEZSd6OWTsrhM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
@@ -1151,6 +1159,8 @@ github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
@@ -1163,6 +1173,8 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
@@ -1184,6 +1196,8 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE=
|
||||
|
||||
@@ -11,49 +11,18 @@ import (
|
||||
"github.com/opencloud-eu/opencloud/opencloud/pkg/register"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config/defaults"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// InitCommand is the entrypoint for the init command
|
||||
func InitCommand(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "init",
|
||||
Usage: "initialise an OpenCloud config",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "insecure",
|
||||
EnvVars: []string{"OC_INSECURE"},
|
||||
Value: "ask",
|
||||
Usage: "Allow insecure OpenCloud config",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "diff",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "Show the difference between the current config and the new one",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "force-overwrite",
|
||||
Aliases: []string{"f"},
|
||||
EnvVars: []string{"OC_FORCE_CONFIG_OVERWRITE"},
|
||||
Value: false,
|
||||
Usage: "Force overwrite existing config file",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-path",
|
||||
Value: defaults.BaseConfigPath(),
|
||||
Usage: "Config path for the OpenCloud runtime",
|
||||
EnvVars: []string{"OC_CONFIG_DIR", "OC_BASE_DATA_PATH"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "admin-password",
|
||||
Aliases: []string{"ap"},
|
||||
EnvVars: []string{"ADMIN_PASSWORD", "IDM_ADMIN_PASSWORD"},
|
||||
Usage: "Set admin password instead of using a random generated one",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
insecureFlag := c.String("insecure")
|
||||
func InitCommand(cfg *config.Config) *cobra.Command {
|
||||
initCmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialise an OpenCloud config",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
insecureFlag := cmd.Flag("insecure").Value.String()
|
||||
insecure := false
|
||||
if insecureFlag == "ask" {
|
||||
answer := strings.ToLower(stringPrompt("Do you want to configure OpenCloud with certificate checking disabled?\n This is not recommended for public instances! [yes | no = default]"))
|
||||
@@ -63,13 +32,59 @@ func InitCommand(cfg *config.Config) *cli.Command {
|
||||
} else if insecureFlag == strings.ToLower("true") || insecureFlag == strings.ToLower("yes") || insecureFlag == strings.ToLower("y") {
|
||||
insecure = true
|
||||
}
|
||||
err := ocinit.CreateConfig(insecure, c.Bool("force-overwrite"), c.Bool("diff"), c.String("config-path"), c.String("admin-password"))
|
||||
err := ocinit.CreateConfig(insecure, cmd.Flag("force-overwrite").Changed,
|
||||
cmd.Flag("diff").Changed, cmd.Flag("config-path").Value.String(),
|
||||
cmd.Flag("admin-password").Value.String())
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create config: %s", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
initCmd.Flags().String("insecure", "ask", "Allow insecure OpenCloud config")
|
||||
err := viper.BindEnv("insecure", "OC_INSECURE")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable OC_INSECURE: %s", err)
|
||||
}
|
||||
err = viper.BindPFlag("insecure", initCmd.Flags().Lookup("insecure"))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind flag OC_INSECURE: %s", err)
|
||||
}
|
||||
|
||||
initCmd.Flags().BoolP("diff", "d", false, "Show the difference between the current config and the new one")
|
||||
|
||||
initCmd.Flags().BoolP("force-overwrite", "f", false, "Force overwrite existing config file")
|
||||
err = viper.BindEnv("force-overwrite", "OC_FORCE_CONFIG_OVERWRITE")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable OC_FORCE_CONFIG_OVERWRITE: %s", err)
|
||||
}
|
||||
err = viper.BindPFlag("force-overwrite", initCmd.Flags().Lookup("force-overwrite"))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind flag OC_FORCE_CONFIG_OVERWRITE: %s", err)
|
||||
}
|
||||
|
||||
initCmd.Flags().String("config-path", defaults.BaseConfigPath(), "Config path for the OpenCloud runtime")
|
||||
err = viper.BindEnv("config-path", "OC_CONFIG_DIR")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable OC_CONFIG_DIR: %s", err)
|
||||
}
|
||||
err = viper.BindEnv("config-path", "OC_BASE_DATA_PATH")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable OC_BASE_DATA_PATH: %s", err)
|
||||
}
|
||||
err = viper.BindPFlag("config-path", initCmd.Flags().Lookup("config-path"))
|
||||
|
||||
initCmd.Flags().String("admin-password", "", "Set admin password instead of using a random generated one")
|
||||
err = viper.BindEnv("admin-password", "ADMIN_PASSWORD")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable ADMIN_PASSWORD: %s", err)
|
||||
}
|
||||
err = viper.BindEnv("admin-password", "IDM_ADMIN_PASSWORD")
|
||||
if err != nil {
|
||||
log.Fatalf("Could not bind environment variable IDM_ADMIN_PASSWORD: %s", err)
|
||||
}
|
||||
err = viper.BindPFlag("admin-password", initCmd.Flags().Lookup("admin-password"))
|
||||
return initCmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
2
vendor/github.com/pelletier/go-toml/v2/.dockerignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
4
vendor/github.com/pelletier/go-toml/v2/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
* text=auto
|
||||
|
||||
benchmark/benchmark.toml text eol=lf
|
||||
testdata/** text eol=lf
|
||||
7
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
7
vendor/github.com/pelletier/go-toml/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
test_program/test_program_bin
|
||||
fuzz/
|
||||
cmd/tomll/tomll
|
||||
cmd/tomljson/tomljson
|
||||
cmd/tomltestgen/tomltestgen
|
||||
dist
|
||||
tests/
|
||||
84
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
84
vendor/github.com/pelletier/go-toml/v2/.golangci.toml
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
[service]
|
||||
golangci-lint-version = "1.39.0"
|
||||
|
||||
[linters-settings.wsl]
|
||||
allow-assign-and-anything = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bodyclose",
|
||||
"cyclop",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"dogsled",
|
||||
"dupl",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errorlint",
|
||||
"exhaustive",
|
||||
# "exhaustivestruct",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
# "forcetypeassert",
|
||||
"funlen",
|
||||
"gci",
|
||||
# "gochecknoglobals",
|
||||
"gochecknoinits",
|
||||
"gocognit",
|
||||
"goconst",
|
||||
"gocritic",
|
||||
"gocyclo",
|
||||
"godot",
|
||||
"godox",
|
||||
# "goerr113",
|
||||
"gofmt",
|
||||
"gofumpt",
|
||||
"goheader",
|
||||
"goimports",
|
||||
"golint",
|
||||
"gomnd",
|
||||
# "gomoddirectives",
|
||||
"gomodguard",
|
||||
"goprintffuncname",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
# "ifshort",
|
||||
"importas",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nestif",
|
||||
"nilerr",
|
||||
# "nlreturn",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
#"paralleltest",
|
||||
"prealloc",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
# "testpackage",
|
||||
"thelper",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"wastedassign",
|
||||
"whitespace",
|
||||
# "wrapcheck",
|
||||
# "wsl"
|
||||
]
|
||||
127
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
127
vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
version: 2
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go fmt ./...
|
||||
- go test ./...
|
||||
builds:
|
||||
- id: tomll
|
||||
main: ./cmd/tomll
|
||||
binary: tomll
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: tomljson
|
||||
main: ./cmd/tomljson
|
||||
binary: tomljson
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm
|
||||
- linux_riscv64
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- id: jsontoml
|
||||
main: ./cmd/jsontoml
|
||||
binary: jsontoml
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
targets:
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_riscv64
|
||||
- linux_arm
|
||||
- windows_amd64
|
||||
- windows_arm64
|
||||
- windows_arm
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
universal_binaries:
|
||||
- id: tomll
|
||||
replace: true
|
||||
name_template: tomll
|
||||
- id: tomljson
|
||||
replace: true
|
||||
name_template: tomljson
|
||||
- id: jsontoml
|
||||
replace: true
|
||||
name_template: jsontoml
|
||||
archives:
|
||||
- id: jsontoml
|
||||
format: tar.xz
|
||||
builds:
|
||||
- jsontoml
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomljson
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomljson
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
- id: tomll
|
||||
format: tar.xz
|
||||
builds:
|
||||
- tomll
|
||||
files:
|
||||
- none*
|
||||
name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}"
|
||||
dockers:
|
||||
- id: tools
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
ids:
|
||||
- jsontoml
|
||||
- tomljson
|
||||
- tomll
|
||||
image_templates:
|
||||
- "ghcr.io/pelletier/go-toml:latest"
|
||||
- "ghcr.io/pelletier/go-toml:{{ .Tag }}"
|
||||
- "ghcr.io/pelletier/go-toml:v{{ .Major }}"
|
||||
skip_push: false
|
||||
checksum:
|
||||
name_template: 'sha256sums.txt'
|
||||
snapshot:
|
||||
version_template: "{{ incpatch .Version }}-next"
|
||||
release:
|
||||
github:
|
||||
owner: pelletier
|
||||
name: go-toml
|
||||
draft: true
|
||||
prerelease: auto
|
||||
mode: replace
|
||||
changelog:
|
||||
use: github-native
|
||||
announce:
|
||||
skip: true
|
||||
193
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
193
vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for your interest in go-toml! We appreciate you considering
|
||||
contributing to go-toml!
|
||||
|
||||
The main goal is the project is to provide an easy-to-use and efficient TOML
|
||||
implementation for Go that gets the job done and gets out of your way – dealing
|
||||
with TOML is probably not the central piece of your project.
|
||||
|
||||
As the single maintainer of go-toml, time is scarce. All help, big or small, is
|
||||
more than welcomed!
|
||||
|
||||
## Ask questions
|
||||
|
||||
Any question you may have, somebody else might have it too. Always feel free to
|
||||
ask them on the [discussion board][discussions]. We will try to answer them as
|
||||
clearly and quickly as possible, time permitting.
|
||||
|
||||
Asking questions also helps us identify areas where the documentation needs
|
||||
improvement, or new features that weren't envisioned before. Sometimes, a
|
||||
seemingly innocent question leads to the fix of a bug. Don't hesitate and ask
|
||||
away!
|
||||
|
||||
[discussions]: https://github.com/pelletier/go-toml/discussions
|
||||
|
||||
## Improve the documentation
|
||||
|
||||
The best way to share your knowledge and experience with go-toml is to improve
|
||||
the documentation. Fix a typo, clarify an interface, add an example, anything
|
||||
goes!
|
||||
|
||||
The documentation is present in the [README][readme] and thorough the source
|
||||
code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change
|
||||
to the documentation, create a pull request with your proposed changes. For
|
||||
simple changes like that, the easiest way to go is probably the "Fork this
|
||||
project and edit the file" button on Github, displayed at the top right of the
|
||||
file. Unless it's a trivial change (for example a typo), provide a little bit of
|
||||
context in your pull request description or commit message.
|
||||
|
||||
## Report a bug
|
||||
|
||||
Found a bug! Sorry to hear that :(. Help us and other track them down and fix by
|
||||
reporting it. [File a new bug report][bug-report] on the [issues
|
||||
tracker][issues-tracker]. The template should provide enough guidance on what to
|
||||
include. When in doubt: add more details! By reducing ambiguity and providing
|
||||
more information, it decreases back and forth and saves everyone time.
|
||||
|
||||
## Code changes
|
||||
|
||||
Want to contribute a patch? Very happy to hear that!
|
||||
|
||||
First, some high-level rules:
|
||||
|
||||
- A short proposal with some POC code is better than a lengthy piece of text
|
||||
with no code. Code speaks louder than words. That being said, bigger changes
|
||||
should probably start with a [discussion][discussions].
|
||||
- No backward-incompatible patch will be accepted unless discussed. Sometimes
|
||||
it's hard, but we try not to break people's programs unless we absolutely have
|
||||
to.
|
||||
- If you are writing a new feature or extending an existing one, make sure to
|
||||
write some documentation.
|
||||
- Bug fixes need to be accompanied with regression tests.
|
||||
- New code needs to be tested.
|
||||
- Your commit messages need to explain why the change is needed, even if already
|
||||
included in the PR description.
|
||||
|
||||
It does sound like a lot, but those best practices are here to save time overall
|
||||
and continuously improve the quality of the project, which is something everyone
|
||||
benefits from.
|
||||
|
||||
### Get started
|
||||
|
||||
The fairly standard code contribution process looks like that:
|
||||
|
||||
1. [Fork the project][fork].
|
||||
2. Make your changes, commit on any branch you like.
|
||||
3. [Open up a pull request][pull-request]
|
||||
4. Review, potential ask for changes.
|
||||
5. Merge.
|
||||
|
||||
Feel free to ask for help! You can create draft pull requests to gather
|
||||
some early feedback!
|
||||
|
||||
### Run the tests
|
||||
|
||||
You can run tests for go-toml using Go's test tool: `go test -race ./...`.
|
||||
|
||||
During the pull request process, all tests will be ran on Linux, Windows, and
|
||||
MacOS on the last two versions of Go.
|
||||
|
||||
However, given GitHub's new policy to _not_ run Actions on pull requests until a
|
||||
maintainer clicks on button, it is highly recommended that you run them locally
|
||||
as you make changes.
|
||||
|
||||
### Check coverage
|
||||
|
||||
We use `go tool cover` to compute test coverage. Most code editors have a way to
|
||||
run and display code coverage, but at the end of the day, we do this:
|
||||
|
||||
```
|
||||
go test -covermode=atomic -coverprofile=coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
```
|
||||
|
||||
and verify that the overall percentage of tested code does not go down. This is
|
||||
a requirement. As a rule of thumb, all lines of code touched by your changes
|
||||
should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your
|
||||
code lowers the coverage.
|
||||
|
||||
### Verify performance
|
||||
|
||||
Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's
|
||||
builtin benchmark systems. Because of their noisy nature, containers provided by
|
||||
Github Actions cannot be reliably used for benchmarking. As a result, you are
|
||||
responsible for checking that your changes do not incur a performance penalty.
|
||||
You can run their following to execute benchmarks:
|
||||
|
||||
```
|
||||
go test ./... -bench=. -count=10
|
||||
```
|
||||
|
||||
Benchmark results should be compared against each other with
|
||||
[benchstat][benchstat]. Typical flow looks like this:
|
||||
|
||||
1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to
|
||||
a file (for example `old.txt`).
|
||||
2. Make some code changes.
|
||||
3. Run `go test ....` again, and save the output to an other file (for example
|
||||
`new.txt`).
|
||||
4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any
|
||||
test.
|
||||
|
||||
On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts
|
||||
performance.
|
||||
|
||||
It is highly encouraged to add the benchstat results to your pull request
|
||||
description. Pull requests that lower performance will receive more scrutiny.
|
||||
|
||||
[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat
|
||||
|
||||
### Style
|
||||
|
||||
Try to look around and follow the same format and structure as the rest of the
|
||||
code. We enforce using `go fmt` on the whole code base.
|
||||
|
||||
---
|
||||
|
||||
## Maintainers-only
|
||||
|
||||
### Merge pull request
|
||||
|
||||
Checklist:
|
||||
|
||||
- Passing CI.
|
||||
- Does not introduce backward-incompatible changes (unless discussed).
|
||||
- Has relevant doc changes.
|
||||
- Benchstat does not show performance regression.
|
||||
- Pull request is [labeled appropriately][pr-labels].
|
||||
- Title will be understandable in the changelog.
|
||||
|
||||
1. Merge using "squash and merge".
|
||||
2. Make sure to edit the commit message to keep all the useful information
|
||||
nice and clean.
|
||||
3. Make sure the commit title is clear and contains the PR number (#123).
|
||||
|
||||
### New release
|
||||
|
||||
1. Decide on the next version number. Use semver. Review commits since last
|
||||
version to assess.
|
||||
2. Tag release. For example:
|
||||
```
|
||||
git checkout v2
|
||||
git pull
|
||||
git tag v2.2.0
|
||||
git push --tags
|
||||
```
|
||||
3. CI automatically builds a draft Github release. Review it and edit as
|
||||
necessary. Look for "Other changes". That would indicate a pull request not
|
||||
labeled properly. Tweak labels and pull request titles until changelog looks
|
||||
good for users.
|
||||
4. Check "create discussion" box, in the "Releases" category.
|
||||
5. If new version is an alpha or beta only, check pre-release box.
|
||||
|
||||
|
||||
[issues-tracker]: https://github.com/pelletier/go-toml/issues
|
||||
[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
|
||||
[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml
|
||||
[readme]: ./README.md
|
||||
[fork]: https://help.github.com/articles/fork-a-repo
|
||||
[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
|
||||
[new-release]: https://github.com/pelletier/go-toml/releases/new
|
||||
[gh]: https://github.com/cli/cli
|
||||
[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml
|
||||
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
5
vendor/github.com/pelletier/go-toml/v2/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM scratch
|
||||
ENV PATH "$PATH:/bin"
|
||||
COPY tomll /bin/tomll
|
||||
COPY tomljson /bin/tomljson
|
||||
COPY jsontoml /bin/jsontoml
|
||||
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
22
vendor/github.com/pelletier/go-toml/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
go-toml v2
|
||||
Copyright (c) 2021 - 2023 Thomas Pelletier
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
576
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
Normal file
576
vendor/github.com/pelletier/go-toml/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,576 @@
|
||||
# go-toml v2
|
||||
|
||||
Go library for the [TOML](https://toml.io/en/) format.
|
||||
|
||||
This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues)
|
||||
|
||||
[💬 Anything else](https://github.com/pelletier/go-toml/discussions)
|
||||
|
||||
## Documentation
|
||||
|
||||
Full API, examples, and implementation notes are available in the Go
|
||||
documentation.
|
||||
|
||||
[](https://pkg.go.dev/github.com/pelletier/go-toml/v2)
|
||||
|
||||
## Import
|
||||
|
||||
```go
|
||||
import "github.com/pelletier/go-toml/v2"
|
||||
```
|
||||
|
||||
See [Modules](#Modules).
|
||||
|
||||
## Features
|
||||
|
||||
### Stdlib behavior
|
||||
|
||||
As much as possible, this library is designed to behave similarly as the
|
||||
standard library's `encoding/json`.
|
||||
|
||||
### Performance
|
||||
|
||||
While go-toml favors usability, it is written with performance in mind. Most
|
||||
operations should not be shockingly slow. See [benchmarks](#benchmarks).
|
||||
|
||||
### Strict mode
|
||||
|
||||
`Decoder` can be set to "strict mode", which makes it error when some parts of
|
||||
the TOML document was not present in the target structure. This is a great way
|
||||
to check for typos. [See example in the documentation][strict].
|
||||
|
||||
[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields
|
||||
|
||||
### Contextualized errors
|
||||
|
||||
When most decoding errors occur, go-toml returns [`DecodeError`][decode-err],
|
||||
which contains a human readable contextualized version of the error. For
|
||||
example:
|
||||
|
||||
```
|
||||
1| [server]
|
||||
2| path = 100
|
||||
| ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string
|
||||
3| port = 50
|
||||
```
|
||||
|
||||
[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError
|
||||
|
||||
### Local date and time support
|
||||
|
||||
TOML supports native [local date/times][ldt]. It allows to represent a given
|
||||
date, time, or date-time without relation to a timezone or offset. To support
|
||||
this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and
|
||||
[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`,
|
||||
making them convenient yet unambiguous structures for their respective TOML
|
||||
representation.
|
||||
|
||||
[ldt]: https://toml.io/en/v1.0.0#local-date-time
|
||||
[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate
|
||||
[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime
|
||||
[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime
|
||||
|
||||
### Commented config
|
||||
|
||||
Since TOML is often used for configuration files, go-toml can emit documents
|
||||
annotated with [comments and commented-out values][comments-example]. For
|
||||
example, it can generate the following file:
|
||||
|
||||
```toml
|
||||
# Host IP to connect to.
|
||||
host = '127.0.0.1'
|
||||
# Port of the remote server.
|
||||
port = 4242
|
||||
|
||||
# Encryption parameters (optional)
|
||||
# [TLS]
|
||||
# cipher = 'AEAD-AES128-GCM-SHA256'
|
||||
# version = 'TLS 1.3'
|
||||
```
|
||||
|
||||
[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented
|
||||
|
||||
## Getting started
|
||||
|
||||
Given the following struct, let's see how to read it and write it as TOML:
|
||||
|
||||
```go
|
||||
type MyConfig struct {
|
||||
Version int
|
||||
Name string
|
||||
Tags []string
|
||||
}
|
||||
```
|
||||
|
||||
### Unmarshaling
|
||||
|
||||
[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its
|
||||
content. For example:
|
||||
|
||||
```go
|
||||
doc := `
|
||||
version = 2
|
||||
name = "go-toml"
|
||||
tags = ["go", "toml"]
|
||||
`
|
||||
|
||||
var cfg MyConfig
|
||||
err := toml.Unmarshal([]byte(doc), &cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println("version:", cfg.Version)
|
||||
fmt.Println("name:", cfg.Name)
|
||||
fmt.Println("tags:", cfg.Tags)
|
||||
|
||||
// Output:
|
||||
// version: 2
|
||||
// name: go-toml
|
||||
// tags: [go toml]
|
||||
```
|
||||
|
||||
[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal
|
||||
|
||||
### Marshaling
|
||||
|
||||
[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure
|
||||
as a TOML document:
|
||||
|
||||
```go
|
||||
cfg := MyConfig{
|
||||
Version: 2,
|
||||
Name: "go-toml",
|
||||
Tags: []string{"go", "toml"},
|
||||
}
|
||||
|
||||
b, err := toml.Marshal(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
|
||||
// Output:
|
||||
// Version = 2
|
||||
// Name = 'go-toml'
|
||||
// Tags = ['go', 'toml']
|
||||
```
|
||||
|
||||
[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal
|
||||
|
||||
## Unstable API
|
||||
|
||||
This API does not yet follow the backward compatibility guarantees of this
|
||||
library. They provide early access to features that may have rough edges or an
|
||||
API subject to change.
|
||||
|
||||
### Parser
|
||||
|
||||
Parser is the unstable API that allows iterative parsing of a TOML document at
|
||||
the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Execution time speedup compared to other Go TOML libraries:
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/HugoFrontMatter-2</td><td>1.9x</td><td>2.2x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/map-2</td><td>1.7x</td><td>2.1x</td></tr>
|
||||
<tr><td>Marshal/ReferenceFile/struct-2</td><td>2.2x</td><td>3.0x</td></tr>
|
||||
<tr><td>Unmarshal/HugoFrontMatter-2</td><td>2.9x</td><td>2.7x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/map-2</td><td>2.6x</td><td>2.7x</td></tr>
|
||||
<tr><td>Unmarshal/ReferenceFile/struct-2</td><td>4.6x</td><td>5.1x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<details><summary>See more</summary>
|
||||
<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr><td>Marshal/SimpleDocument/map-2</td><td>1.8x</td><td>2.7x</td></tr>
|
||||
<tr><td>Marshal/SimpleDocument/struct-2</td><td>2.7x</td><td>3.8x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/map-2</td><td>3.8x</td><td>3.0x</td></tr>
|
||||
<tr><td>Unmarshal/SimpleDocument/struct-2</td><td>5.6x</td><td>4.1x</td></tr>
|
||||
<tr><td>UnmarshalDataset/example-2</td><td>3.0x</td><td>3.2x</td></tr>
|
||||
<tr><td>UnmarshalDataset/code-2</td><td>2.3x</td><td>2.9x</td></tr>
|
||||
<tr><td>UnmarshalDataset/twitter-2</td><td>2.6x</td><td>2.7x</td></tr>
|
||||
<tr><td>UnmarshalDataset/citm_catalog-2</td><td>2.2x</td><td>2.3x</td></tr>
|
||||
<tr><td>UnmarshalDataset/canada-2</td><td>1.8x</td><td>1.5x</td></tr>
|
||||
<tr><td>UnmarshalDataset/config-2</td><td>4.1x</td><td>2.9x</td></tr>
|
||||
<tr><td>geomean</td><td>2.7x</td><td>2.8x</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>
|
||||
</details>
|
||||
|
||||
## Modules
|
||||
|
||||
go-toml uses Go's standard modules system.
|
||||
|
||||
Installation instructions:
|
||||
|
||||
- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals
|
||||
with it automatically.
|
||||
- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`.
|
||||
|
||||
In case of trouble: [Go Modules FAQ][mod-faq].
|
||||
|
||||
[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module
|
||||
|
||||
## Tools
|
||||
|
||||
Go-toml provides three handy command line tools:
|
||||
|
||||
* `tomljson`: Reads a TOML file and outputs its JSON representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest
|
||||
$ tomljson --help
|
||||
```
|
||||
|
||||
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest
|
||||
$ jsontoml --help
|
||||
```
|
||||
|
||||
* `tomll`: Lints and reformats a TOML file.
|
||||
|
||||
```
|
||||
$ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest
|
||||
$ tomll --help
|
||||
```
|
||||
|
||||
### Docker image
|
||||
|
||||
Those tools are also available as a [Docker image][docker]. For example, to use
|
||||
`tomljson`:
|
||||
|
||||
```
|
||||
docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml
|
||||
```
|
||||
|
||||
Multiple versions are available on [ghcr.io][docker].
|
||||
|
||||
[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml
|
||||
|
||||
## Migrating from v1
|
||||
|
||||
This section describes the differences between v1 and v2, with some pointers on
|
||||
how to get the original behavior when possible.
|
||||
|
||||
### Decoding / Unmarshal
|
||||
|
||||
#### Automatic field name guessing
|
||||
|
||||
When unmarshaling to a struct, if a key in the TOML document does not exactly
|
||||
match the name of a struct field or any of the `toml`-tagged field, v1 tries
|
||||
multiple variations of the key ([code][v1-keys]).
|
||||
|
||||
V2 instead does a case-insensitive matching, like `encoding/json`.
|
||||
|
||||
This could impact you if you are relying on casing to differentiate two fields,
|
||||
and one of them is a not using the `toml` struct tag. The recommended solution
|
||||
is to be specific about tag names for those fields using the `toml` struct tag.
|
||||
|
||||
[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781
|
||||
|
||||
#### Ignore preexisting value in interface
|
||||
|
||||
When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the
|
||||
element in the interface to decode the object. For example:
|
||||
|
||||
```go
|
||||
type inner struct {
|
||||
B interface{}
|
||||
}
|
||||
type doc struct {
|
||||
A interface{}
|
||||
}
|
||||
|
||||
d := doc{
|
||||
A: inner{
|
||||
B: "Before",
|
||||
},
|
||||
}
|
||||
|
||||
data := `
|
||||
[A]
|
||||
B = "After"
|
||||
`
|
||||
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v1: %#v\n", d)
|
||||
|
||||
// toml v1: main.doc{A:main.inner{B:"After"}}
|
||||
```
|
||||
|
||||
In this case, field `A` is of type `interface{}`, containing a `inner` struct.
|
||||
V1 sees that type and uses it when decoding the object.
|
||||
|
||||
When decoding an object into an `interface{}`, V2 instead disregards whatever
|
||||
value the `interface{}` may contain and replaces it with a
|
||||
`map[string]interface{}`. With the same data structure as above, here is what
|
||||
the result looks like:
|
||||
|
||||
```go
|
||||
toml.Unmarshal([]byte(data), &d)
|
||||
fmt.Printf("toml v2: %#v\n", d)
|
||||
|
||||
// toml v2: main.doc{A:map[string]interface {}{"B":"After"}}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Values out of array bounds ignored
|
||||
|
||||
When decoding into an array, v1 returns an error when the number of elements
|
||||
contained in the doc is superior to the capacity of the array. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
A [2]string
|
||||
}
|
||||
d := doc{}
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println(err)
|
||||
|
||||
// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2)
|
||||
```
|
||||
|
||||
In the same situation, v2 ignores the last value:
|
||||
|
||||
```go
|
||||
err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d)
|
||||
fmt.Println("err:", err, "d:", d)
|
||||
// err: <nil> d: {[one two]}
|
||||
```
|
||||
|
||||
This is to match `encoding/json`'s behavior. There is no way to make the v2
|
||||
decoder behave like v1.
|
||||
|
||||
#### Support for `toml.Unmarshaler` has been dropped
|
||||
|
||||
This method was not widely used, poorly defined, and added a lot of complexity.
|
||||
A similar effect can be achieved by implementing the `encoding.TextUnmarshaler`
|
||||
interface and use strings.
|
||||
|
||||
#### Support for `default` struct tag has been dropped
|
||||
|
||||
This feature adds complexity and a poorly defined API for an effect that can be
|
||||
accomplished outside of the library.
|
||||
|
||||
It does not seem like other format parsers in Go support that feature (the
|
||||
project referenced in the original ticket #202 has not been updated since 2017).
|
||||
Given that go-toml v2 should not touch values not in the document, the same
|
||||
effect can be achieved by pre-filling the struct with defaults (libraries like
|
||||
[go-defaults][go-defaults] can help). Also, string representation is not well
|
||||
defined for all types: it creates issues like #278.
|
||||
|
||||
The recommended replacement is pre-filling the struct before unmarshaling.
|
||||
|
||||
[go-defaults]: https://github.com/mcuadros/go-defaults
|
||||
|
||||
#### `toml.Tree` replacement
|
||||
|
||||
This structure was the initial attempt at providing a document model for
|
||||
go-toml. It allows manipulating the structure of any document, encoding and
|
||||
decoding from their TOML representation. While a more robust feature was
|
||||
initially planned in go-toml v2, this has been ultimately [removed from
|
||||
scope][nodoc] of this library, with no plan to add it back at the moment. The
|
||||
closest equivalent at the moment would be to unmarshal into an `interface{}` and
|
||||
use type assertions and/or reflection to manipulate the arbitrary
|
||||
structure. However this would fall short of providing all of the TOML features
|
||||
such as adding comments and be specific about whitespace.
|
||||
|
||||
|
||||
#### `toml.Position` are not retrievable anymore
|
||||
|
||||
The API for retrieving the position (line, column) of a specific TOML element do
|
||||
not exist anymore. This was done to minimize the amount of concepts introduced
|
||||
by the library (query path), and avoid the performance hit related to storing
|
||||
positions in the absence of a document model, for a feature that seemed to have
|
||||
little use. Errors however have gained more detailed position
|
||||
information. Position retrieval seems better fitted for a document model, which
|
||||
has been [removed from the scope][nodoc] of go-toml v2 at the moment.
|
||||
|
||||
### Encoding / Marshal
|
||||
|
||||
#### Default struct fields order
|
||||
|
||||
V1 emits struct fields order alphabetically by default. V2 struct fields are
|
||||
emitted in order they are defined. For example:
|
||||
|
||||
```go
|
||||
type S struct {
|
||||
B string
|
||||
A string
|
||||
}
|
||||
|
||||
data := S{
|
||||
B: "B",
|
||||
A: "A",
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
// A = "A"
|
||||
// B = "B"
|
||||
|
||||
// v2:
|
||||
// B = 'B'
|
||||
// A = 'A'
|
||||
```
|
||||
|
||||
There is no way to make v2 encoder behave like v1. A workaround could be to
|
||||
manually sort the fields alphabetically in the struct definition, or generate
|
||||
struct types using `reflect.StructOf`.
|
||||
|
||||
#### No indentation by default
|
||||
|
||||
V1 automatically indents content of tables by default. V2 does not. However the
|
||||
same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example:
|
||||
|
||||
```go
|
||||
data := map[string]interface{}{
|
||||
"table": map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
}
|
||||
|
||||
b, _ := tomlv1.Marshal(data)
|
||||
fmt.Println("v1:\n" + string(b))
|
||||
|
||||
b, _ = tomlv2.Marshal(data)
|
||||
fmt.Println("v2:\n" + string(b))
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
enc := tomlv2.NewEncoder(&buf)
|
||||
enc.SetIndentTables(true)
|
||||
enc.Encode(data)
|
||||
fmt.Println("v2 Encoder:\n" + string(buf.Bytes()))
|
||||
|
||||
// Output:
|
||||
// v1:
|
||||
//
|
||||
// [table]
|
||||
// key = "value"
|
||||
//
|
||||
// v2:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
//
|
||||
//
|
||||
// v2 Encoder:
|
||||
// [table]
|
||||
// key = 'value'
|
||||
```
|
||||
|
||||
[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables
|
||||
|
||||
#### Keys and strings are single quoted
|
||||
|
||||
V1 always uses double quotes (`"`) around strings and keys that cannot be
|
||||
represented bare (unquoted). V2 uses single quotes instead by default (`'`),
|
||||
unless a character cannot be represented, then falls back to double quotes. As a
|
||||
result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not
|
||||
useful anymore.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
#### `TextMarshaler` emits as a string, not TOML
|
||||
|
||||
Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in
|
||||
v1. The encoder would append the result to the output directly. In v2 the result
|
||||
is wrapped in a string. As a result, this interface cannot be implemented by the
|
||||
root object.
|
||||
|
||||
There is no way to make v2 encoder behave like v1.
|
||||
|
||||
[tm]: https://golang.org/pkg/encoding/#TextMarshaler
|
||||
|
||||
#### `Encoder.CompactComments` has been removed
|
||||
|
||||
Emitting compact comments is now the default behavior of go-toml. This option
|
||||
is not necessary anymore.
|
||||
|
||||
#### Struct tags have been merged
|
||||
|
||||
V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`,
|
||||
`toml`, and `omitempty`. To behave more like the standard library, v2 has merged
|
||||
`toml`, `multiline`, `commented`, and `omitempty`. For example:
|
||||
|
||||
```go
|
||||
type doc struct {
|
||||
// v1
|
||||
F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"`
|
||||
// v2
|
||||
F string `toml:"field,multiline,omitempty,commented"`
|
||||
}
|
||||
```
|
||||
|
||||
Has a result, the `Encoder.SetTag*` methods have been removed, as there is just
|
||||
one tag now.
|
||||
|
||||
#### `Encoder.ArraysWithOneElementPerLine` has been renamed
|
||||
|
||||
The new name is `Encoder.SetArraysMultiline`. The behavior should be the same.
|
||||
|
||||
#### `Encoder.Indentation` has been renamed
|
||||
|
||||
The new name is `Encoder.SetIndentSymbol`. The behavior should be the same.
|
||||
|
||||
|
||||
#### Embedded structs behave like stdlib
|
||||
|
||||
V1 defaults to merging embedded struct fields into the embedding struct. This
|
||||
behavior was unexpected because it does not follow the standard library. To
|
||||
avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was
|
||||
added to make the encoder behave correctly. Given backward compatibility is not
|
||||
a problem anymore, v2 does the right thing by default: it follows the behavior
|
||||
of `encoding/json`. `Encoder.PromoteAnonymous` has been removed.
|
||||
|
||||
[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038
|
||||
|
||||
### `query`
|
||||
|
||||
go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run
|
||||
JSONPath-style queries on TOML files. This feature is not available in v2. For a
|
||||
replacement, check out [dasel][dasel].
|
||||
|
||||
This package has been removed because it was essentially not supported anymore
|
||||
(last commit May 2020), increased the complexity of the code base, and more
|
||||
complete solutions exist out there.
|
||||
|
||||
[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query
|
||||
[dasel]: https://github.com/TomWright/dasel
|
||||
|
||||
## Versioning
|
||||
|
||||
Expect for parts explicitly marked otherwise, go-toml follows [Semantic
|
||||
Versioning](https://semver.org). The supported version of
|
||||
[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this
|
||||
document. The last two major versions of Go are supported (see [Go Release
|
||||
Policy](https://golang.org/doc/devel/release.html#policy)).
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT). Read [LICENSE](LICENSE).
|
||||
16
vendor/github.com/pelletier/go-toml/v2/SECURITY.md
generated
vendored
Normal file
16
vendor/github.com/pelletier/go-toml/v2/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
||||
284
vendor/github.com/pelletier/go-toml/v2/ci.sh
generated
vendored
Normal file
284
vendor/github.com/pelletier/go-toml/v2/ci.sh
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
|
||||
stderr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
|
||||
usage() {
|
||||
b=$(basename "$0")
|
||||
echo $b: ERROR: "$@" 1>&2
|
||||
|
||||
cat 1>&2 <<EOF
|
||||
|
||||
DESCRIPTION
|
||||
|
||||
$(basename "$0") is the script to run continuous integration commands for
|
||||
go-toml on unix.
|
||||
|
||||
Requires Go and Git to be available in the PATH. Expects to be ran from the
|
||||
root of go-toml's Git repository.
|
||||
|
||||
USAGE
|
||||
|
||||
$b COMMAND [OPTIONS...]
|
||||
|
||||
COMMANDS
|
||||
|
||||
benchmark [OPTIONS...] [BRANCH]
|
||||
|
||||
Run benchmarks.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when running
|
||||
benchmarks.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare benchmarks of HEAD with BRANCH using benchstats. In
|
||||
this form the BRANCH argument is required.
|
||||
|
||||
-a Compare benchmarks of HEAD against go-toml v1 and
|
||||
BurntSushi/toml.
|
||||
|
||||
-html When used with -a, emits the output as HTML, ready to be
|
||||
embedded in the README.
|
||||
|
||||
coverage [OPTIONS...] [BRANCH]
|
||||
|
||||
Generates code coverage.
|
||||
|
||||
ARGUMENTS
|
||||
|
||||
BRANCH Optional. Defines which Git branch to use when reporting
|
||||
coverage. Defaults to HEAD.
|
||||
|
||||
OPTIONS
|
||||
|
||||
-d Compare coverage of HEAD with the one of BRANCH. In this form,
|
||||
the BRANCH argument is required. Exit code is non-zero when
|
||||
coverage percentage decreased.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
cover() {
|
||||
branch="${1}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing coverage for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./...
|
||||
grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out
|
||||
go tool cover -func=coverage.out
|
||||
echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
coverage() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
output_dir="$(mktemp -d)"
|
||||
target_out="${output_dir}/target.txt"
|
||||
head_out="${output_dir}/head.txt"
|
||||
|
||||
cover "${target}" > "${target_out}"
|
||||
cover "HEAD" > "${head_out}"
|
||||
|
||||
cat "${target_out}"
|
||||
cat "${head_out}"
|
||||
|
||||
echo ""
|
||||
|
||||
target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')"
|
||||
head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')"
|
||||
echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%"
|
||||
|
||||
delta_pct=$(echo "$head_pct - $target_pct" | bc -l)
|
||||
echo "Delta: ${delta_pct}"
|
||||
|
||||
if [[ $delta_pct = \-* ]]; then
|
||||
echo "Regression!";
|
||||
|
||||
target_diff="${output_dir}/target.diff.txt"
|
||||
head_diff="${output_dir}/head.diff.txt"
|
||||
cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}"
|
||||
cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}"
|
||||
|
||||
diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
cover "${1-HEAD}"
|
||||
}
|
||||
|
||||
bench() {
|
||||
branch="${1}"
|
||||
out="${2}"
|
||||
replace="${3}"
|
||||
dir="$(mktemp -d)"
|
||||
|
||||
stderr "Executing benchmark for ${branch} at ${dir}"
|
||||
|
||||
if [ "${branch}" = "HEAD" ]; then
|
||||
cp -r . "${dir}/"
|
||||
else
|
||||
git worktree add "$dir" "$branch"
|
||||
fi
|
||||
|
||||
pushd "$dir"
|
||||
|
||||
if [ "${replace}" != "" ]; then
|
||||
find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \;
|
||||
go get "${replace}"
|
||||
fi
|
||||
|
||||
export GOMAXPROCS=2
|
||||
go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}"
|
||||
popd
|
||||
|
||||
if [ "${branch}" != "HEAD" ]; then
|
||||
git worktree remove --force "$dir"
|
||||
fi
|
||||
}
|
||||
|
||||
fmktemp() {
|
||||
if mktemp --version &> /dev/null; then
|
||||
# GNU
|
||||
mktemp --suffix=-$1
|
||||
else
|
||||
# BSD
|
||||
mktemp -t $1
|
||||
fi
|
||||
}
|
||||
|
||||
benchstathtml() {
|
||||
python3 - $1 <<'EOF'
|
||||
import sys
|
||||
|
||||
lines = []
|
||||
stop = False
|
||||
|
||||
with open(sys.argv[1]) as f:
|
||||
for line in f.readlines():
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
stop = True
|
||||
if not stop:
|
||||
lines.append(line.split(','))
|
||||
|
||||
results = []
|
||||
for line in reversed(lines[2:]):
|
||||
if len(line) < 8 or line[0] == "":
|
||||
continue
|
||||
v2 = float(line[1])
|
||||
results.append([
|
||||
line[0].replace("-32", ""),
|
||||
"%.1fx" % (float(line[3])/v2), # v1
|
||||
"%.1fx" % (float(line[7])/v2), # bs
|
||||
])
|
||||
# move geomean to the end
|
||||
results.append(results[0])
|
||||
del results[0]
|
||||
|
||||
|
||||
def printtable(data):
|
||||
print("""
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Benchmark</th><th>go-toml v1</th><th>BurntSushi/toml</th></tr>
|
||||
</thead>
|
||||
<tbody>""")
|
||||
|
||||
for r in data:
|
||||
print(" <tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*r))
|
||||
|
||||
print(""" </tbody>
|
||||
</table>""")
|
||||
|
||||
|
||||
def match(x):
|
||||
return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0]
|
||||
|
||||
above = [x for x in results if match(x)]
|
||||
below = [x for x in results if not match(x)]
|
||||
|
||||
printtable(above)
|
||||
print("<details><summary>See more</summary>")
|
||||
print("""<p>The table above has the results of the most common use-cases. The table below
|
||||
contains the results of all benchmarks, including unrealistic ones. It is
|
||||
provided for completeness.</p>""")
|
||||
printtable(below)
|
||||
print('<p>This table can be generated with <code>./ci.sh benchmark -a -html</code>.</p>')
|
||||
print("</details>")
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
benchmark() {
|
||||
case "$1" in
|
||||
-d)
|
||||
shift
|
||||
target="${1?Need to provide a target branch argument}"
|
||||
|
||||
old=`fmktemp ${target}`
|
||||
bench "${target}" "${old}"
|
||||
|
||||
new=`fmktemp HEAD`
|
||||
bench HEAD "${new}"
|
||||
|
||||
benchstat "${old}" "${new}"
|
||||
return 0
|
||||
;;
|
||||
-a)
|
||||
shift
|
||||
|
||||
v2stats=`fmktemp go-toml-v2`
|
||||
bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2"
|
||||
v1stats=`fmktemp go-toml-v1`
|
||||
bench HEAD "${v1stats}" "github.com/pelletier/go-toml"
|
||||
bsstats=`fmktemp bs-toml`
|
||||
bench HEAD "${bsstats}" "github.com/BurntSushi/toml"
|
||||
|
||||
cp "${v2stats}" go-toml-v2.txt
|
||||
cp "${v1stats}" go-toml-v1.txt
|
||||
cp "${bsstats}" bs-toml.txt
|
||||
|
||||
if [ "$1" = "-html" ]; then
|
||||
tmpcsv=`fmktemp csv`
|
||||
benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv
|
||||
benchstathtml $tmpcsv
|
||||
else
|
||||
benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
fi
|
||||
|
||||
rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt
|
||||
return $?
|
||||
esac
|
||||
|
||||
bench "${1-HEAD}" `mktemp`
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
coverage) shift; coverage $@;;
|
||||
benchmark) shift; benchmark $@;;
|
||||
*) usage "bad argument $1";;
|
||||
esac
|
||||
550
vendor/github.com/pelletier/go-toml/v2/decode.go
generated
vendored
Normal file
550
vendor/github.com/pelletier/go-toml/v2/decode.go
generated
vendored
Normal file
@@ -0,0 +1,550 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
func parseInteger(b []byte) (int64, error) {
|
||||
if len(b) > 2 && b[0] == '0' {
|
||||
switch b[1] {
|
||||
case 'x':
|
||||
return parseIntHex(b)
|
||||
case 'b':
|
||||
return parseIntBin(b)
|
||||
case 'o':
|
||||
return parseIntOct(b)
|
||||
default:
|
||||
panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1]))
|
||||
}
|
||||
}
|
||||
|
||||
return parseIntDec(b)
|
||||
}
|
||||
|
||||
func parseLocalDate(b []byte) (LocalDate, error) {
|
||||
// full-date = date-fullyear "-" date-month "-" date-mday
|
||||
// date-fullyear = 4DIGIT
|
||||
// date-month = 2DIGIT ; 01-12
|
||||
// date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
var date LocalDate
|
||||
|
||||
if len(b) != 10 || b[4] != '-' || b[7] != '-' {
|
||||
return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
date.Year, err = parseDecimalDigits(b[0:4])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Month, err = parseDecimalDigits(b[5:7])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
date.Day, err = parseDecimalDigits(b[8:10])
|
||||
if err != nil {
|
||||
return LocalDate{}, err
|
||||
}
|
||||
|
||||
if !isValidDate(date.Year, date.Month, date.Day) {
|
||||
return LocalDate{}, unstable.NewParserError(b, "impossible date")
|
||||
}
|
||||
|
||||
return date, nil
|
||||
}
|
||||
|
||||
func parseDecimalDigits(b []byte) (int, error) {
|
||||
v := 0
|
||||
|
||||
for i, c := range b {
|
||||
if c < '0' || c > '9' {
|
||||
return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)")
|
||||
}
|
||||
v *= 10
|
||||
v += int(c - '0')
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func parseDateTime(b []byte) (time.Time, error) {
|
||||
// offset-date-time = full-date time-delim full-time
|
||||
// full-time = partial-time time-offset
|
||||
// time-offset = "Z" / time-numoffset
|
||||
// time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
|
||||
|
||||
dt, b, err := parseLocalDateTime(b)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var zone *time.Location
|
||||
|
||||
if len(b) == 0 {
|
||||
// parser should have checked that when assigning the date time node
|
||||
panic("date time should have a timezone")
|
||||
}
|
||||
|
||||
if b[0] == 'Z' || b[0] == 'z' {
|
||||
b = b[1:]
|
||||
zone = time.UTC
|
||||
} else {
|
||||
const dateTimeByteLen = 6
|
||||
if len(b) != dateTimeByteLen {
|
||||
return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone")
|
||||
}
|
||||
var direction int
|
||||
switch b[0] {
|
||||
case '-':
|
||||
direction = -1
|
||||
case '+':
|
||||
direction = +1
|
||||
default:
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character")
|
||||
}
|
||||
|
||||
if b[3] != ':' {
|
||||
return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator")
|
||||
}
|
||||
|
||||
hours, err := parseDecimalDigits(b[1:3])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if hours > 23 {
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours")
|
||||
}
|
||||
|
||||
minutes, err := parseDecimalDigits(b[4:6])
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if minutes > 59 {
|
||||
return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes")
|
||||
}
|
||||
|
||||
seconds := direction * (hours*3600 + minutes*60)
|
||||
if seconds == 0 {
|
||||
zone = time.UTC
|
||||
} else {
|
||||
zone = time.FixedZone("", seconds)
|
||||
}
|
||||
b = b[dateTimeByteLen:]
|
||||
}
|
||||
|
||||
if len(b) > 0 {
|
||||
return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone")
|
||||
}
|
||||
|
||||
t := time.Date(
|
||||
dt.Year,
|
||||
time.Month(dt.Month),
|
||||
dt.Day,
|
||||
dt.Hour,
|
||||
dt.Minute,
|
||||
dt.Second,
|
||||
dt.Nanosecond,
|
||||
zone)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) {
|
||||
var dt LocalDateTime
|
||||
|
||||
const localDateTimeByteMinLen = 11
|
||||
if len(b) < localDateTimeByteMinLen {
|
||||
return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]")
|
||||
}
|
||||
|
||||
date, err := parseLocalDate(b[:10])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalDate = date
|
||||
|
||||
sep := b[10]
|
||||
if sep != 'T' && sep != ' ' && sep != 't' {
|
||||
return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space")
|
||||
}
|
||||
|
||||
t, rest, err := parseLocalTime(b[11:])
|
||||
if err != nil {
|
||||
return dt, nil, err
|
||||
}
|
||||
dt.LocalTime = t
|
||||
|
||||
return dt, rest, nil
|
||||
}
|
||||
|
||||
// parseLocalTime is a bit different because it also returns the remaining
|
||||
// []byte that is didn't need. This is to allow parseDateTime to parse those
|
||||
// remaining bytes as a timezone.
|
||||
func parseLocalTime(b []byte) (LocalTime, []byte, error) {
|
||||
var (
|
||||
nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0}
|
||||
t LocalTime
|
||||
)
|
||||
|
||||
// check if b matches to have expected format HH:MM:SS[.NNNNNN]
|
||||
const localTimeByteLen = 8
|
||||
if len(b) < localTimeByteLen {
|
||||
return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]")
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
t.Hour, err = parseDecimalDigits(b[0:2])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Hour > 23 {
|
||||
return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23")
|
||||
}
|
||||
if b[2] != ':' {
|
||||
return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes")
|
||||
}
|
||||
|
||||
t.Minute, err = parseDecimalDigits(b[3:5])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
if t.Minute > 59 {
|
||||
return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59")
|
||||
}
|
||||
if b[5] != ':' {
|
||||
return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds")
|
||||
}
|
||||
|
||||
t.Second, err = parseDecimalDigits(b[6:8])
|
||||
if err != nil {
|
||||
return t, nil, err
|
||||
}
|
||||
|
||||
if t.Second > 60 {
|
||||
return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60")
|
||||
}
|
||||
|
||||
b = b[8:]
|
||||
|
||||
if len(b) >= 1 && b[0] == '.' {
|
||||
frac := 0
|
||||
precision := 0
|
||||
digits := 0
|
||||
|
||||
for i, c := range b[1:] {
|
||||
if !isDigit(c) {
|
||||
if i == 0 {
|
||||
return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point")
|
||||
}
|
||||
break
|
||||
}
|
||||
digits++
|
||||
|
||||
const maxFracPrecision = 9
|
||||
if i >= maxFracPrecision {
|
||||
// go-toml allows decoding fractional seconds
|
||||
// beyond the supported precision of 9
|
||||
// digits. It truncates the fractional component
|
||||
// to the supported precision and ignores the
|
||||
// remaining digits.
|
||||
//
|
||||
// https://github.com/pelletier/go-toml/discussions/707
|
||||
continue
|
||||
}
|
||||
|
||||
frac *= 10
|
||||
frac += int(c - '0')
|
||||
precision++
|
||||
}
|
||||
|
||||
if precision == 0 {
|
||||
return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit")
|
||||
}
|
||||
|
||||
t.Nanosecond = frac * nspow[precision]
|
||||
t.Precision = precision
|
||||
|
||||
return t, b[1+digits:], nil
|
||||
}
|
||||
return t, b, nil
|
||||
}
|
||||
|
||||
//nolint:cyclop
|
||||
func parseFloat(b []byte) (float64, error) {
|
||||
if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' {
|
||||
return math.NaN(), nil
|
||||
}
|
||||
|
||||
cleaned, err := checkAndRemoveUnderscoresFloats(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cleaned[0] == '.' {
|
||||
return 0, unstable.NewParserError(b, "float cannot start with a dot")
|
||||
}
|
||||
|
||||
if cleaned[len(cleaned)-1] == '.' {
|
||||
return 0, unstable.NewParserError(b, "float cannot end with a dot")
|
||||
}
|
||||
|
||||
dotAlreadySeen := false
|
||||
for i, c := range cleaned {
|
||||
if c == '.' {
|
||||
if dotAlreadySeen {
|
||||
return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point")
|
||||
}
|
||||
if !isDigit(cleaned[i-1]) {
|
||||
return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit")
|
||||
}
|
||||
if !isDigit(cleaned[i+1]) {
|
||||
return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit")
|
||||
}
|
||||
dotAlreadySeen = true
|
||||
}
|
||||
}
|
||||
|
||||
start := 0
|
||||
if cleaned[0] == '+' || cleaned[0] == '-' {
|
||||
start = 1
|
||||
}
|
||||
if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) {
|
||||
return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes")
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(string(cleaned), 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "unable to parse float: %w", err)
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func parseIntHex(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 16, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntOct(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 8, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func parseIntBin(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 2, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func isSign(b byte) bool {
|
||||
return b == '+' || b == '-'
|
||||
}
|
||||
|
||||
func parseIntDec(b []byte) (int64, error) {
|
||||
cleaned, err := checkAndRemoveUnderscoresIntegers(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
startIdx := 0
|
||||
|
||||
if isSign(cleaned[0]) {
|
||||
startIdx++
|
||||
}
|
||||
|
||||
if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' {
|
||||
return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number")
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(string(cleaned), 10, 64)
|
||||
if err != nil {
|
||||
return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err)
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) {
|
||||
start := 0
|
||||
if b[start] == '+' || b[start] == '-' {
|
||||
start++
|
||||
}
|
||||
|
||||
if len(b) == start {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
if b[start] == '_' {
|
||||
return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, i, len(b))
|
||||
copy(cleaned, b)
|
||||
|
||||
for i++; i < len(b); i++ {
|
||||
c := b[i]
|
||||
if c == '_' {
|
||||
if !before {
|
||||
return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
before = false
|
||||
} else {
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) {
|
||||
if b[0] == '_' {
|
||||
return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore")
|
||||
}
|
||||
|
||||
if b[len(b)-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore")
|
||||
}
|
||||
|
||||
// fast path
|
||||
i := 0
|
||||
for ; i < len(b); i++ {
|
||||
if b[i] == '_' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(b) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
before := false
|
||||
cleaned := make([]byte, 0, len(b))
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
switch c {
|
||||
case '_':
|
||||
if !before {
|
||||
return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores")
|
||||
}
|
||||
if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent")
|
||||
}
|
||||
before = false
|
||||
case '+', '-':
|
||||
// signed exponents
|
||||
cleaned = append(cleaned, c)
|
||||
before = false
|
||||
case 'e', 'E':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
case '.':
|
||||
if i < len(b)-1 && b[i+1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point")
|
||||
}
|
||||
if i > 0 && b[i-1] == '_' {
|
||||
return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point")
|
||||
}
|
||||
cleaned = append(cleaned, c)
|
||||
default:
|
||||
before = true
|
||||
cleaned = append(cleaned, c)
|
||||
}
|
||||
}
|
||||
|
||||
return cleaned, nil
|
||||
}
|
||||
|
||||
// isValidDate checks if a provided date is a date that exists.
|
||||
func isValidDate(year int, month int, day int) bool {
|
||||
return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year)
|
||||
}
|
||||
|
||||
// daysBefore[m] counts the number of days in a non-leap year
|
||||
// before month m begins. There is an entry for m=12, counting
|
||||
// the number of days before January of next year (365).
|
||||
var daysBefore = [...]int32{
|
||||
0,
|
||||
31,
|
||||
31 + 28,
|
||||
31 + 28 + 31,
|
||||
31 + 28 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30,
|
||||
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
|
||||
}
|
||||
|
||||
func daysIn(m int, year int) int {
|
||||
if m == 2 && isLeap(year) {
|
||||
return 29
|
||||
}
|
||||
return int(daysBefore[m] - daysBefore[m-1])
|
||||
}
|
||||
|
||||
func isLeap(year int) bool {
|
||||
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
|
||||
}
|
||||
|
||||
func isDigit(r byte) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
2
vendor/github.com/pelletier/go-toml/v2/doc.go
generated
vendored
Normal file
2
vendor/github.com/pelletier/go-toml/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package toml is a library to read and write TOML documents.
|
||||
package toml
|
||||
252
vendor/github.com/pelletier/go-toml/v2/errors.go
generated
vendored
Normal file
252
vendor/github.com/pelletier/go-toml/v2/errors.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
// DecodeError represents an error encountered during the parsing or decoding
|
||||
// of a TOML document.
|
||||
//
|
||||
// In addition to the error message, it contains the position in the document
|
||||
// where it happened, as well as a human-readable representation that shows
|
||||
// where the error occurred in the document.
|
||||
type DecodeError struct {
|
||||
message string
|
||||
line int
|
||||
column int
|
||||
key Key
|
||||
|
||||
human string
|
||||
}
|
||||
|
||||
// StrictMissingError occurs in a TOML document that does not have a
|
||||
// corresponding field in the target value. It contains all the missing fields
|
||||
// in Errors.
|
||||
//
|
||||
// Emitted by Decoder when DisallowUnknownFields() was called.
|
||||
type StrictMissingError struct {
|
||||
// One error per field that could not be found.
|
||||
Errors []DecodeError
|
||||
}
|
||||
|
||||
// Error returns the canonical string for this error.
|
||||
func (s *StrictMissingError) Error() string {
|
||||
return "strict mode: fields in the document are missing in the target struct"
|
||||
}
|
||||
|
||||
// String returns a human readable description of all errors.
|
||||
func (s *StrictMissingError) String() string {
|
||||
var buf strings.Builder
|
||||
|
||||
for i, e := range s.Errors {
|
||||
if i > 0 {
|
||||
buf.WriteString("\n---\n")
|
||||
}
|
||||
|
||||
buf.WriteString(e.String())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type Key []string
|
||||
|
||||
// Error returns the error message contained in the DecodeError.
|
||||
func (e *DecodeError) Error() string {
|
||||
return "toml: " + e.message
|
||||
}
|
||||
|
||||
// String returns the human-readable contextualized error. This string is multi-line.
|
||||
func (e *DecodeError) String() string {
|
||||
return e.human
|
||||
}
|
||||
|
||||
// Position returns the (line, column) pair indicating where the error
|
||||
// occurred in the document. Positions are 1-indexed.
|
||||
func (e *DecodeError) Position() (row int, column int) {
|
||||
return e.line, e.column
|
||||
}
|
||||
|
||||
// Key that was being processed when the error occurred. The key is present only
|
||||
// if this DecodeError is part of a StrictMissingError.
|
||||
func (e *DecodeError) Key() Key {
|
||||
return e.key
|
||||
}
|
||||
|
||||
// decodeErrorFromHighlight creates a DecodeError referencing a highlighted
|
||||
// range of bytes from document.
|
||||
//
|
||||
// highlight needs to be a sub-slice of document, or this function panics.
|
||||
//
|
||||
// The function copies all bytes used in DecodeError, so that document and
|
||||
// highlight can be freely deallocated.
|
||||
//
|
||||
//nolint:funlen
|
||||
func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError {
|
||||
offset := danger.SubsliceOffset(document, de.Highlight)
|
||||
|
||||
errMessage := de.Error()
|
||||
errLine, errColumn := positionAtEnd(document[:offset])
|
||||
before, after := linesOfContext(document, de.Highlight, offset, 3)
|
||||
|
||||
var buf strings.Builder
|
||||
|
||||
maxLine := errLine + len(after) - 1
|
||||
lineColumnWidth := len(strconv.Itoa(maxLine))
|
||||
|
||||
// Write the lines of context strictly before the error.
|
||||
for i := len(before) - 1; i > 0; i-- {
|
||||
line := errLine - i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(before[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(before[i])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
|
||||
// Write the document line that contains the error.
|
||||
|
||||
buf.WriteString(formatLineNumber(errLine, lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.Write(before[0])
|
||||
}
|
||||
|
||||
buf.Write(de.Highlight)
|
||||
|
||||
if len(after) > 0 {
|
||||
buf.Write(after[0])
|
||||
}
|
||||
|
||||
buf.WriteRune('\n')
|
||||
|
||||
// Write the line with the error message itself (so it does not have a line
|
||||
// number).
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", lineColumnWidth))
|
||||
buf.WriteString("| ")
|
||||
|
||||
if len(before) > 0 {
|
||||
buf.WriteString(strings.Repeat(" ", len(before[0])))
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat("~", len(de.Highlight)))
|
||||
|
||||
if len(errMessage) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(errMessage)
|
||||
}
|
||||
|
||||
// Write the lines of context strictly after the error.
|
||||
|
||||
for i := 1; i < len(after); i++ {
|
||||
buf.WriteRune('\n')
|
||||
line := errLine + i
|
||||
buf.WriteString(formatLineNumber(line, lineColumnWidth))
|
||||
buf.WriteString("|")
|
||||
|
||||
if len(after[i]) > 0 {
|
||||
buf.WriteString(" ")
|
||||
buf.Write(after[i])
|
||||
}
|
||||
}
|
||||
|
||||
return &DecodeError{
|
||||
message: errMessage,
|
||||
line: errLine,
|
||||
column: errColumn,
|
||||
key: de.Key,
|
||||
human: buf.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func formatLineNumber(line int, width int) string {
|
||||
format := "%" + strconv.Itoa(width) + "d"
|
||||
|
||||
return fmt.Sprintf(format, line)
|
||||
}
|
||||
|
||||
func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) {
|
||||
return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround)
|
||||
}
|
||||
|
||||
func beforeLines(document []byte, offset int, linesAround int) [][]byte {
|
||||
var beforeLines [][]byte
|
||||
|
||||
// Walk the document backward from the highlight to find previous lines
|
||||
// of context.
|
||||
rest := document[:offset]
|
||||
backward:
|
||||
for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
beforeLines = append(beforeLines, rest[o+1:])
|
||||
rest = rest[:o]
|
||||
o = len(rest) - 1
|
||||
case o == 0:
|
||||
// add the first line only if it's non-empty
|
||||
beforeLines = append(beforeLines, rest)
|
||||
|
||||
break backward
|
||||
default:
|
||||
o--
|
||||
}
|
||||
}
|
||||
|
||||
return beforeLines
|
||||
}
|
||||
|
||||
func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte {
|
||||
var afterLines [][]byte
|
||||
|
||||
// Walk the document forward from the highlight to find the following
|
||||
// lines of context.
|
||||
rest := document[offset+len(highlight):]
|
||||
forward:
|
||||
for o := 0; o < len(rest) && len(afterLines) <= linesAround; {
|
||||
switch {
|
||||
case rest[o] == '\n':
|
||||
// handle individual lines
|
||||
afterLines = append(afterLines, rest[:o])
|
||||
rest = rest[o+1:]
|
||||
o = 0
|
||||
|
||||
case o == len(rest)-1:
|
||||
// add last line only if it's non-empty
|
||||
afterLines = append(afterLines, rest)
|
||||
|
||||
break forward
|
||||
default:
|
||||
o++
|
||||
}
|
||||
}
|
||||
|
||||
return afterLines
|
||||
}
|
||||
|
||||
func positionAtEnd(b []byte) (row int, column int) {
|
||||
row = 1
|
||||
column = 1
|
||||
|
||||
for _, c := range b {
|
||||
if c == '\n' {
|
||||
row++
|
||||
column = 1
|
||||
} else {
|
||||
column++
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
42
vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
generated
vendored
Normal file
42
vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package characters
|
||||
|
||||
var invalidAsciiTable = [256]bool{
|
||||
0x00: true,
|
||||
0x01: true,
|
||||
0x02: true,
|
||||
0x03: true,
|
||||
0x04: true,
|
||||
0x05: true,
|
||||
0x06: true,
|
||||
0x07: true,
|
||||
0x08: true,
|
||||
// 0x09 TAB
|
||||
// 0x0A LF
|
||||
0x0B: true,
|
||||
0x0C: true,
|
||||
// 0x0D CR
|
||||
0x0E: true,
|
||||
0x0F: true,
|
||||
0x10: true,
|
||||
0x11: true,
|
||||
0x12: true,
|
||||
0x13: true,
|
||||
0x14: true,
|
||||
0x15: true,
|
||||
0x16: true,
|
||||
0x17: true,
|
||||
0x18: true,
|
||||
0x19: true,
|
||||
0x1A: true,
|
||||
0x1B: true,
|
||||
0x1C: true,
|
||||
0x1D: true,
|
||||
0x1E: true,
|
||||
0x1F: true,
|
||||
// 0x20 - 0x7E Printable ASCII characters
|
||||
0x7F: true,
|
||||
}
|
||||
|
||||
func InvalidAscii(b byte) bool {
|
||||
return invalidAsciiTable[b]
|
||||
}
|
||||
199
vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
generated
vendored
Normal file
199
vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
package characters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type utf8Err struct {
|
||||
Index int
|
||||
Size int
|
||||
}
|
||||
|
||||
func (u utf8Err) Zero() bool {
|
||||
return u.Size == 0
|
||||
}
|
||||
|
||||
// Verified that a given string is only made of valid UTF-8 characters allowed
|
||||
// by the TOML spec:
|
||||
//
|
||||
// Any Unicode character may be used except those that must be escaped:
|
||||
// quotation mark, backslash, and the control characters other than tab (U+0000
|
||||
// to U+0008, U+000A to U+001F, U+007F).
|
||||
//
|
||||
// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early
|
||||
// when a character is not allowed.
|
||||
//
|
||||
// The returned utf8Err is Zero() if the string is valid, or contains the byte
|
||||
// index and size of the invalid character.
|
||||
//
|
||||
// quotation mark => already checked
|
||||
// backslash => already checked
|
||||
// 0-0x8 => invalid
|
||||
// 0x9 => tab, ok
|
||||
// 0xA - 0x1F => invalid
|
||||
// 0x7F => invalid
|
||||
func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) {
|
||||
// Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
|
||||
offset := 0
|
||||
for len(p) >= 8 {
|
||||
// Combining two 32 bit loads allows the same code to be used
|
||||
// for 32 and 64 bit platforms.
|
||||
// The compiler can generate a 32bit load for first32 and second32
|
||||
// on many platforms. See test/codegen/memcombine.go.
|
||||
first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||
second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
|
||||
if (first32|second32)&0x80808080 != 0 {
|
||||
// Found a non ASCII byte (>= RuneSelf).
|
||||
break
|
||||
}
|
||||
|
||||
for i, b := range p[:8] {
|
||||
if InvalidAscii(b) {
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p = p[8:]
|
||||
offset += 8
|
||||
}
|
||||
n := len(p)
|
||||
for i := 0; i < n; {
|
||||
pi := p[i]
|
||||
if pi < utf8.RuneSelf {
|
||||
if InvalidAscii(pi) {
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
x := first[pi]
|
||||
if x == xx {
|
||||
// Illegal starter byte.
|
||||
err.Index = offset + i
|
||||
err.Size = 1
|
||||
return
|
||||
}
|
||||
size := int(x & 7)
|
||||
if i+size > n {
|
||||
// Short or invalid.
|
||||
err.Index = offset + i
|
||||
err.Size = n - i
|
||||
return
|
||||
}
|
||||
accept := acceptRanges[x>>4]
|
||||
if c := p[i+1]; c < accept.lo || accept.hi < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 2
|
||||
return
|
||||
} else if size == 2 {
|
||||
} else if c := p[i+2]; c < locb || hicb < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 3
|
||||
return
|
||||
} else if size == 3 {
|
||||
} else if c := p[i+3]; c < locb || hicb < c {
|
||||
err.Index = offset + i
|
||||
err.Size = 4
|
||||
return
|
||||
}
|
||||
i += size
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Return the size of the next rune if valid, 0 otherwise.
|
||||
func Utf8ValidNext(p []byte) int {
|
||||
c := p[0]
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
if InvalidAscii(c) {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
x := first[c]
|
||||
if x == xx {
|
||||
// Illegal starter byte.
|
||||
return 0
|
||||
}
|
||||
size := int(x & 7)
|
||||
if size > len(p) {
|
||||
// Short or invalid.
|
||||
return 0
|
||||
}
|
||||
accept := acceptRanges[x>>4]
|
||||
if c := p[1]; c < accept.lo || accept.hi < c {
|
||||
return 0
|
||||
} else if size == 2 {
|
||||
} else if c := p[2]; c < locb || hicb < c {
|
||||
return 0
|
||||
} else if size == 3 {
|
||||
} else if c := p[3]; c < locb || hicb < c {
|
||||
return 0
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// acceptRange gives the range of valid values for the second byte in a UTF-8
|
||||
// sequence.
|
||||
type acceptRange struct {
|
||||
lo uint8 // lowest value for second byte.
|
||||
hi uint8 // highest value for second byte.
|
||||
}
|
||||
|
||||
// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
|
||||
var acceptRanges = [16]acceptRange{
|
||||
0: {locb, hicb},
|
||||
1: {0xA0, hicb},
|
||||
2: {locb, 0x9F},
|
||||
3: {0x90, hicb},
|
||||
4: {locb, 0x8F},
|
||||
}
|
||||
|
||||
// first is information about the first byte in a UTF-8 sequence.
|
||||
var first = [256]uint8{
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
|
||||
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
|
||||
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
|
||||
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
|
||||
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
|
||||
}
|
||||
|
||||
const (
|
||||
// The default lowest and highest continuation byte.
|
||||
locb = 0b10000000
|
||||
hicb = 0b10111111
|
||||
|
||||
// These names of these constants are chosen to give nice alignment in the
|
||||
// table below. The first nibble is an index into acceptRanges or F for
|
||||
// special one-byte cases. The second nibble is the Rune length or the
|
||||
// Status for the special one-byte case.
|
||||
xx = 0xF1 // invalid: size 1
|
||||
as = 0xF0 // ASCII: size 1
|
||||
s1 = 0x02 // accept 0, size 2
|
||||
s2 = 0x13 // accept 1, size 3
|
||||
s3 = 0x03 // accept 0, size 3
|
||||
s4 = 0x23 // accept 2, size 3
|
||||
s5 = 0x34 // accept 3, size 4
|
||||
s6 = 0x04 // accept 0, size 4
|
||||
s7 = 0x44 // accept 4, size 4
|
||||
)
|
||||
65
vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
generated
vendored
Normal file
65
vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxInt = uintptr(int(^uint(0) >> 1))
|
||||
|
||||
func SubsliceOffset(data []byte, subslice []byte) int {
|
||||
datap := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
||||
hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice))
|
||||
|
||||
if hlp.Data < datap.Data {
|
||||
panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data))
|
||||
}
|
||||
offset := hlp.Data - datap.Data
|
||||
|
||||
if offset > maxInt {
|
||||
panic(fmt.Errorf("slice offset larger than int (%d)", offset))
|
||||
}
|
||||
|
||||
intoffset := int(offset)
|
||||
|
||||
if intoffset > datap.Len {
|
||||
panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len))
|
||||
}
|
||||
|
||||
if intoffset+hlp.Len > datap.Len {
|
||||
panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len))
|
||||
}
|
||||
|
||||
return intoffset
|
||||
}
|
||||
|
||||
func BytesRange(start []byte, end []byte) []byte {
|
||||
if start == nil || end == nil {
|
||||
panic("cannot call BytesRange with nil")
|
||||
}
|
||||
startp := (*reflect.SliceHeader)(unsafe.Pointer(&start))
|
||||
endp := (*reflect.SliceHeader)(unsafe.Pointer(&end))
|
||||
|
||||
if startp.Data > endp.Data {
|
||||
panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data))
|
||||
}
|
||||
|
||||
l := startp.Len
|
||||
endLen := int(endp.Data-startp.Data) + endp.Len
|
||||
if endLen > l {
|
||||
l = endLen
|
||||
}
|
||||
|
||||
if l > startp.Cap {
|
||||
panic(fmt.Errorf("range length is larger than capacity"))
|
||||
}
|
||||
|
||||
return start[:l]
|
||||
}
|
||||
|
||||
func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer {
|
||||
// TODO: replace with unsafe.Add when Go 1.17 is released
|
||||
// https://github.com/golang/go/issues/40481
|
||||
return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset))
|
||||
}
|
||||
23
vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
generated
vendored
Normal file
23
vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package danger
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// typeID is used as key in encoder and decoder caches to enable using
|
||||
// the optimize runtime.mapaccess2_fast64 function instead of the more
|
||||
// expensive lookup if we were to use reflect.Type as map key.
|
||||
//
|
||||
// typeID holds the pointer to the reflect.Type value, which is unique
|
||||
// in the program.
|
||||
//
|
||||
// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61
|
||||
type TypeID unsafe.Pointer
|
||||
|
||||
func MakeTypeID(t reflect.Type) TypeID {
|
||||
// reflect.Type has the fields:
|
||||
// typ unsafe.Pointer
|
||||
// ptr unsafe.Pointer
|
||||
return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1])
|
||||
}
|
||||
48
vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
generated
vendored
Normal file
48
vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package tracker
|
||||
|
||||
import "github.com/pelletier/go-toml/v2/unstable"
|
||||
|
||||
// KeyTracker is a tracker that keeps track of the current Key as the AST is
|
||||
// walked.
|
||||
type KeyTracker struct {
|
||||
k []string
|
||||
}
|
||||
|
||||
// UpdateTable sets the state of the tracker with the AST table node.
|
||||
func (t *KeyTracker) UpdateTable(node *unstable.Node) {
|
||||
t.reset()
|
||||
t.Push(node)
|
||||
}
|
||||
|
||||
// UpdateArrayTable sets the state of the tracker with the AST array table node.
|
||||
func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) {
|
||||
t.reset()
|
||||
t.Push(node)
|
||||
}
|
||||
|
||||
// Push the given key on the stack.
|
||||
func (t *KeyTracker) Push(node *unstable.Node) {
|
||||
it := node.Key()
|
||||
for it.Next() {
|
||||
t.k = append(t.k, string(it.Node().Data))
|
||||
}
|
||||
}
|
||||
|
||||
// Pop key from stack.
|
||||
func (t *KeyTracker) Pop(node *unstable.Node) {
|
||||
it := node.Key()
|
||||
for it.Next() {
|
||||
t.k = t.k[:len(t.k)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the current key
|
||||
func (t *KeyTracker) Key() []string {
|
||||
k := make([]string, len(t.k))
|
||||
copy(k, t.k)
|
||||
return k
|
||||
}
|
||||
|
||||
func (t *KeyTracker) reset() {
|
||||
t.k = t.k[:0]
|
||||
}
|
||||
358
vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
generated
vendored
Normal file
358
vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
generated
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
type keyKind uint8
|
||||
|
||||
const (
|
||||
invalidKind keyKind = iota
|
||||
valueKind
|
||||
tableKind
|
||||
arrayTableKind
|
||||
)
|
||||
|
||||
func (k keyKind) String() string {
|
||||
switch k {
|
||||
case invalidKind:
|
||||
return "invalid"
|
||||
case valueKind:
|
||||
return "value"
|
||||
case tableKind:
|
||||
return "table"
|
||||
case arrayTableKind:
|
||||
return "array table"
|
||||
}
|
||||
panic("missing keyKind string mapping")
|
||||
}
|
||||
|
||||
// SeenTracker tracks which keys have been seen with which TOML type to flag
|
||||
// duplicates and mismatches according to the spec.
|
||||
//
|
||||
// Each node in the visited tree is represented by an entry. Each entry has an
|
||||
// identifier, which is provided by a counter. Entries are stored in the array
|
||||
// entries. As new nodes are discovered (referenced for the first time in the
|
||||
// TOML document), entries are created and appended to the array. An entry
|
||||
// points to its parent using its id.
|
||||
//
|
||||
// To find whether a given key (sequence of []byte) has already been visited,
|
||||
// the entries are linearly searched, looking for one with the right name and
|
||||
// parent id.
|
||||
//
|
||||
// Given that all keys appear in the document after their parent, it is
|
||||
// guaranteed that all descendants of a node are stored after the node, this
|
||||
// speeds up the search process.
|
||||
//
|
||||
// When encountering [[array tables]], the descendants of that node are removed
|
||||
// to allow that branch of the tree to be "rediscovered". To maintain the
|
||||
// invariant above, the deletion process needs to keep the order of entries.
|
||||
// This results in more copies in that case.
|
||||
type SeenTracker struct {
|
||||
entries []entry
|
||||
currentIdx int
|
||||
}
|
||||
|
||||
var pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &SeenTracker{}
|
||||
},
|
||||
}
|
||||
|
||||
func (s *SeenTracker) reset() {
|
||||
// Always contains a root element at index 0.
|
||||
s.currentIdx = 0
|
||||
if len(s.entries) == 0 {
|
||||
s.entries = make([]entry, 1, 2)
|
||||
} else {
|
||||
s.entries = s.entries[:1]
|
||||
}
|
||||
s.entries[0].child = -1
|
||||
s.entries[0].next = -1
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
// Use -1 to indicate no child or no sibling.
|
||||
child int
|
||||
next int
|
||||
|
||||
name []byte
|
||||
kind keyKind
|
||||
explicit bool
|
||||
kv bool
|
||||
}
|
||||
|
||||
// Find the index of the child of parentIdx with key k. Returns -1 if
|
||||
// it does not exist.
|
||||
func (s *SeenTracker) find(parentIdx int, k []byte) int {
|
||||
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
|
||||
if bytes.Equal(s.entries[i].name, k) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Remove all descendants of node at position idx.
|
||||
func (s *SeenTracker) clear(idx int) {
|
||||
if idx >= len(s.entries) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := s.entries[idx].child; i >= 0; {
|
||||
next := s.entries[i].next
|
||||
n := s.entries[0].next
|
||||
s.entries[0].next = i
|
||||
s.entries[i].next = n
|
||||
s.entries[i].name = nil
|
||||
s.clear(i)
|
||||
i = next
|
||||
}
|
||||
|
||||
s.entries[idx].child = -1
|
||||
}
|
||||
|
||||
func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int {
|
||||
e := entry{
|
||||
child: -1,
|
||||
next: s.entries[parentIdx].child,
|
||||
|
||||
name: name,
|
||||
kind: kind,
|
||||
explicit: explicit,
|
||||
kv: kv,
|
||||
}
|
||||
var idx int
|
||||
if s.entries[0].next >= 0 {
|
||||
idx = s.entries[0].next
|
||||
s.entries[0].next = s.entries[idx].next
|
||||
s.entries[idx] = e
|
||||
} else {
|
||||
idx = len(s.entries)
|
||||
s.entries = append(s.entries, e)
|
||||
}
|
||||
|
||||
s.entries[parentIdx].child = idx
|
||||
|
||||
return idx
|
||||
}
|
||||
|
||||
func (s *SeenTracker) setExplicitFlag(parentIdx int) {
|
||||
for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next {
|
||||
if s.entries[i].kv {
|
||||
s.entries[i].explicit = true
|
||||
s.entries[i].kv = false
|
||||
}
|
||||
s.setExplicitFlag(i)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckExpression takes a top-level node and checks that it does not contain
|
||||
// keys that have been seen in previous calls, and validates that types are
|
||||
// consistent. It returns true if it is the first time this node's key is seen.
|
||||
// Useful to clear array tables on first use.
|
||||
func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) {
|
||||
if s.entries == nil {
|
||||
s.reset()
|
||||
}
|
||||
switch node.Kind {
|
||||
case unstable.KeyValue:
|
||||
return s.checkKeyValue(node)
|
||||
case unstable.Table:
|
||||
return s.checkTable(node)
|
||||
case unstable.ArrayTable:
|
||||
return s.checkArrayTable(node)
|
||||
default:
|
||||
panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) {
|
||||
if s.currentIdx >= 0 {
|
||||
s.setExplicitFlag(s.currentIdx)
|
||||
}
|
||||
|
||||
it := node.Key()
|
||||
|
||||
parentIdx := 0
|
||||
|
||||
// This code is duplicated in checkArrayTable. This is because factoring
|
||||
// it in a function requires to copy the iterator, or allocate it to the
|
||||
// heap, which is not cheap.
|
||||
for it.Next() {
|
||||
if it.IsLast() {
|
||||
break
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, false)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if entry.kind == valueKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
}
|
||||
}
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
first := false
|
||||
if idx >= 0 {
|
||||
kind := s.entries[idx].kind
|
||||
if kind != tableKind {
|
||||
return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind)
|
||||
}
|
||||
if s.entries[idx].explicit {
|
||||
return false, fmt.Errorf("toml: table %s already exists", string(k))
|
||||
}
|
||||
s.entries[idx].explicit = true
|
||||
} else {
|
||||
idx = s.create(parentIdx, k, tableKind, true, false)
|
||||
first = true
|
||||
}
|
||||
|
||||
s.currentIdx = idx
|
||||
|
||||
return first, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) {
|
||||
if s.currentIdx >= 0 {
|
||||
s.setExplicitFlag(s.currentIdx)
|
||||
}
|
||||
|
||||
it := node.Key()
|
||||
|
||||
parentIdx := 0
|
||||
|
||||
for it.Next() {
|
||||
if it.IsLast() {
|
||||
break
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, false)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if entry.kind == valueKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
}
|
||||
}
|
||||
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
k := it.Node().Data
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
firstTime := idx < 0
|
||||
if firstTime {
|
||||
idx = s.create(parentIdx, k, arrayTableKind, true, false)
|
||||
} else {
|
||||
kind := s.entries[idx].kind
|
||||
if kind != arrayTableKind {
|
||||
return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k))
|
||||
}
|
||||
s.clear(idx)
|
||||
}
|
||||
|
||||
s.currentIdx = idx
|
||||
|
||||
return firstTime, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) {
|
||||
parentIdx := s.currentIdx
|
||||
it := node.Key()
|
||||
|
||||
for it.Next() {
|
||||
k := it.Node().Data
|
||||
|
||||
idx := s.find(parentIdx, k)
|
||||
|
||||
if idx < 0 {
|
||||
idx = s.create(parentIdx, k, tableKind, false, true)
|
||||
} else {
|
||||
entry := s.entries[idx]
|
||||
if it.IsLast() {
|
||||
return false, fmt.Errorf("toml: key %s is already defined", string(k))
|
||||
} else if entry.kind != tableKind {
|
||||
return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind)
|
||||
} else if entry.explicit {
|
||||
return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k))
|
||||
}
|
||||
}
|
||||
|
||||
parentIdx = idx
|
||||
}
|
||||
|
||||
s.entries[parentIdx].kind = valueKind
|
||||
|
||||
value := node.Value()
|
||||
|
||||
switch value.Kind {
|
||||
case unstable.InlineTable:
|
||||
return s.checkInlineTable(value)
|
||||
case unstable.Array:
|
||||
return s.checkArray(value)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) {
|
||||
it := node.Children()
|
||||
for it.Next() {
|
||||
n := it.Node()
|
||||
switch n.Kind {
|
||||
case unstable.InlineTable:
|
||||
first, err = s.checkInlineTable(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
case unstable.Array:
|
||||
first, err = s.checkArray(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return first, nil
|
||||
}
|
||||
|
||||
func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) {
|
||||
s = pool.Get().(*SeenTracker)
|
||||
s.reset()
|
||||
|
||||
it := node.Children()
|
||||
for it.Next() {
|
||||
n := it.Node()
|
||||
first, err = s.checkKeyValue(n)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// As inline tables are self-contained, the tracker does not
|
||||
// need to retain the details of what they contain. The
|
||||
// keyValue element that creates the inline table is kept to
|
||||
// mark the presence of the inline table and prevent
|
||||
// redefinition of its keys: check* functions cannot walk into
|
||||
// a value.
|
||||
pool.Put(s)
|
||||
return first, nil
|
||||
}
|
||||
1
vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
generated
vendored
Normal file
1
vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
package tracker
|
||||
122
vendor/github.com/pelletier/go-toml/v2/localtime.go
generated
vendored
Normal file
122
vendor/github.com/pelletier/go-toml/v2/localtime.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
// LocalDate represents a calendar day in no specific timezone.
|
||||
type LocalDate struct {
|
||||
Year int
|
||||
Month int
|
||||
Day int
|
||||
}
|
||||
|
||||
// AsTime converts d into a specific time instance at midnight in zone.
|
||||
func (d LocalDate) AsTime(zone *time.Location) time.Time {
|
||||
return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone)
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
func (d LocalDate) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalDate) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalDate) UnmarshalText(b []byte) error {
|
||||
res, err := parseLocalDate(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalTime represents a time of day of no specific day in no specific
|
||||
// timezone.
|
||||
type LocalTime struct {
|
||||
Hour int // Hour of the day: [0; 24[
|
||||
Minute int // Minute of the hour: [0; 60[
|
||||
Second int // Second of the minute: [0; 60[
|
||||
Nanosecond int // Nanoseconds within the second: [0, 1000000000[
|
||||
Precision int // Number of digits to display for Nanosecond.
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond
|
||||
// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number
|
||||
// of digits for nanoseconds is provided.
|
||||
func (d LocalTime) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second)
|
||||
|
||||
if d.Precision > 0 {
|
||||
s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1]
|
||||
} else if d.Nanosecond > 0 {
|
||||
// Nanoseconds are specified, but precision is not provided. Use the
|
||||
// minimum.
|
||||
s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0")
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalTime) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalTime) UnmarshalText(b []byte) error {
|
||||
res, left, err := parseLocalTime(b)
|
||||
if err == nil && len(left) != 0 {
|
||||
err = unstable.NewParserError(left, "extra characters")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalDateTime represents a time of a specific day in no specific timezone.
|
||||
type LocalDateTime struct {
|
||||
LocalDate
|
||||
LocalTime
|
||||
}
|
||||
|
||||
// AsTime converts d into a specific time instance in zone.
|
||||
func (d LocalDateTime) AsTime(zone *time.Location) time.Time {
|
||||
return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone)
|
||||
}
|
||||
|
||||
// String returns RFC 3339 representation of d.
|
||||
func (d LocalDateTime) String() string {
|
||||
return d.LocalDate.String() + "T" + d.LocalTime.String()
|
||||
}
|
||||
|
||||
// MarshalText returns RFC 3339 representation of d.
|
||||
func (d LocalDateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses b using RFC 3339 to fill d.
|
||||
func (d *LocalDateTime) UnmarshalText(data []byte) error {
|
||||
res, left, err := parseLocalDateTime(data)
|
||||
if err == nil && len(left) != 0 {
|
||||
err = unstable.NewParserError(left, "extra characters")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*d = res
|
||||
return nil
|
||||
}
|
||||
1133
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
Normal file
1133
vendor/github.com/pelletier/go-toml/v2/marshaler.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
107
vendor/github.com/pelletier/go-toml/v2/strict.go
generated
vendored
Normal file
107
vendor/github.com/pelletier/go-toml/v2/strict.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
"github.com/pelletier/go-toml/v2/internal/tracker"
|
||||
"github.com/pelletier/go-toml/v2/unstable"
|
||||
)
|
||||
|
||||
type strict struct {
|
||||
Enabled bool
|
||||
|
||||
// Tracks the current key being processed.
|
||||
key tracker.KeyTracker
|
||||
|
||||
missing []unstable.ParserError
|
||||
}
|
||||
|
||||
func (s *strict) EnterTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.UpdateTable(node)
|
||||
}
|
||||
|
||||
func (s *strict) EnterArrayTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.UpdateArrayTable(node)
|
||||
}
|
||||
|
||||
func (s *strict) EnterKeyValue(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.Push(node)
|
||||
}
|
||||
|
||||
func (s *strict) ExitKeyValue(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.key.Pop(node)
|
||||
}
|
||||
|
||||
func (s *strict) MissingTable(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.missing = append(s.missing, unstable.ParserError{
|
||||
Highlight: keyLocation(node),
|
||||
Message: "missing table",
|
||||
Key: s.key.Key(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *strict) MissingField(node *unstable.Node) {
|
||||
if !s.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
s.missing = append(s.missing, unstable.ParserError{
|
||||
Highlight: keyLocation(node),
|
||||
Message: "missing field",
|
||||
Key: s.key.Key(),
|
||||
})
|
||||
}
|
||||
|
||||
func (s *strict) Error(doc []byte) error {
|
||||
if !s.Enabled || len(s.missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := &StrictMissingError{
|
||||
Errors: make([]DecodeError, 0, len(s.missing)),
|
||||
}
|
||||
|
||||
for _, derr := range s.missing {
|
||||
derr := derr
|
||||
err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func keyLocation(node *unstable.Node) []byte {
|
||||
k := node.Key()
|
||||
|
||||
hasOne := k.Next()
|
||||
if !hasOne {
|
||||
panic("should not be called with empty key")
|
||||
}
|
||||
|
||||
start := k.Node().Data
|
||||
end := k.Node().Data
|
||||
|
||||
for k.Next() {
|
||||
end = k.Node().Data
|
||||
}
|
||||
|
||||
return danger.BytesRange(start, end)
|
||||
}
|
||||
243
vendor/github.com/pelletier/go-toml/v2/toml.abnf
generated
vendored
Normal file
243
vendor/github.com/pelletier/go-toml/v2/toml.abnf
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
;; This document describes TOML's syntax, using the ABNF format (defined in
|
||||
;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt).
|
||||
;;
|
||||
;; All valid TOML documents will match this description, however certain
|
||||
;; invalid documents would need to be rejected as per the semantics described
|
||||
;; in the supporting text description.
|
||||
|
||||
;; It is possible to try this grammar interactively, using instaparse.
|
||||
;; http://instaparse.mojombo.com/
|
||||
;;
|
||||
;; To do so, in the lower right, click on Options and change `:input-format` to
|
||||
;; ':abnf'. Then paste this entire ABNF document into the grammar entry box
|
||||
;; (above the options). Then you can type or paste a sample TOML document into
|
||||
;; the beige box on the left. Tada!
|
||||
|
||||
;; Overall Structure
|
||||
|
||||
toml = expression *( newline expression )
|
||||
|
||||
expression = ws [ comment ]
|
||||
expression =/ ws keyval ws [ comment ]
|
||||
expression =/ ws table ws [ comment ]
|
||||
|
||||
;; Whitespace
|
||||
|
||||
ws = *wschar
|
||||
wschar = %x20 ; Space
|
||||
wschar =/ %x09 ; Horizontal tab
|
||||
|
||||
;; Newline
|
||||
|
||||
newline = %x0A ; LF
|
||||
newline =/ %x0D.0A ; CRLF
|
||||
|
||||
;; Comment
|
||||
|
||||
comment-start-symbol = %x23 ; #
|
||||
non-ascii = %x80-D7FF / %xE000-10FFFF
|
||||
non-eol = %x09 / %x20-7F / non-ascii
|
||||
|
||||
comment = comment-start-symbol *non-eol
|
||||
|
||||
;; Key-Value pairs
|
||||
|
||||
keyval = key keyval-sep val
|
||||
|
||||
key = simple-key / dotted-key
|
||||
simple-key = quoted-key / unquoted-key
|
||||
|
||||
unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
|
||||
quoted-key = basic-string / literal-string
|
||||
dotted-key = simple-key 1*( dot-sep simple-key )
|
||||
|
||||
dot-sep = ws %x2E ws ; . Period
|
||||
keyval-sep = ws %x3D ws ; =
|
||||
|
||||
val = string / boolean / array / inline-table / date-time / float / integer
|
||||
|
||||
;; String
|
||||
|
||||
string = ml-basic-string / basic-string / ml-literal-string / literal-string
|
||||
|
||||
;; Basic String
|
||||
|
||||
basic-string = quotation-mark *basic-char quotation-mark
|
||||
|
||||
quotation-mark = %x22 ; "
|
||||
|
||||
basic-char = basic-unescaped / escaped
|
||||
basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
escaped = escape escape-seq-char
|
||||
|
||||
escape = %x5C ; \
|
||||
escape-seq-char = %x22 ; " quotation mark U+0022
|
||||
escape-seq-char =/ %x5C ; \ reverse solidus U+005C
|
||||
escape-seq-char =/ %x62 ; b backspace U+0008
|
||||
escape-seq-char =/ %x66 ; f form feed U+000C
|
||||
escape-seq-char =/ %x6E ; n line feed U+000A
|
||||
escape-seq-char =/ %x72 ; r carriage return U+000D
|
||||
escape-seq-char =/ %x74 ; t tab U+0009
|
||||
escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX
|
||||
escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX
|
||||
|
||||
;; Multiline Basic String
|
||||
|
||||
ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
|
||||
ml-basic-string-delim
|
||||
ml-basic-string-delim = 3quotation-mark
|
||||
ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
|
||||
|
||||
mlb-content = mlb-char / newline / mlb-escaped-nl
|
||||
mlb-char = mlb-unescaped / escaped
|
||||
mlb-quotes = 1*2quotation-mark
|
||||
mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
mlb-escaped-nl = escape ws newline *( wschar / newline )
|
||||
|
||||
;; Literal String
|
||||
|
||||
literal-string = apostrophe *literal-char apostrophe
|
||||
|
||||
apostrophe = %x27 ; ' apostrophe
|
||||
|
||||
literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
|
||||
;; Multiline Literal String
|
||||
|
||||
ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
|
||||
ml-literal-string-delim
|
||||
ml-literal-string-delim = 3apostrophe
|
||||
ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
|
||||
|
||||
mll-content = mll-char / newline
|
||||
mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
mll-quotes = 1*2apostrophe
|
||||
|
||||
;; Integer
|
||||
|
||||
integer = dec-int / hex-int / oct-int / bin-int
|
||||
|
||||
minus = %x2D ; -
|
||||
plus = %x2B ; +
|
||||
underscore = %x5F ; _
|
||||
digit1-9 = %x31-39 ; 1-9
|
||||
digit0-7 = %x30-37 ; 0-7
|
||||
digit0-1 = %x30-31 ; 0-1
|
||||
|
||||
hex-prefix = %x30.78 ; 0x
|
||||
oct-prefix = %x30.6F ; 0o
|
||||
bin-prefix = %x30.62 ; 0b
|
||||
|
||||
dec-int = [ minus / plus ] unsigned-dec-int
|
||||
unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT )
|
||||
|
||||
hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG )
|
||||
oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 )
|
||||
bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 )
|
||||
|
||||
;; Float
|
||||
|
||||
float = float-int-part ( exp / frac [ exp ] )
|
||||
float =/ special-float
|
||||
|
||||
float-int-part = dec-int
|
||||
frac = decimal-point zero-prefixable-int
|
||||
decimal-point = %x2E ; .
|
||||
zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT )
|
||||
|
||||
exp = "e" float-exp-part
|
||||
float-exp-part = [ minus / plus ] zero-prefixable-int
|
||||
|
||||
special-float = [ minus / plus ] ( inf / nan )
|
||||
inf = %x69.6e.66 ; inf
|
||||
nan = %x6e.61.6e ; nan
|
||||
|
||||
;; Boolean
|
||||
|
||||
boolean = true / false
|
||||
|
||||
true = %x74.72.75.65 ; true
|
||||
false = %x66.61.6C.73.65 ; false
|
||||
|
||||
;; Date and Time (as defined in RFC 3339)
|
||||
|
||||
date-time = offset-date-time / local-date-time / local-date / local-time
|
||||
|
||||
date-fullyear = 4DIGIT
|
||||
date-month = 2DIGIT ; 01-12
|
||||
date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
time-delim = "T" / %x20 ; T, t, or space
|
||||
time-hour = 2DIGIT ; 00-23
|
||||
time-minute = 2DIGIT ; 00-59
|
||||
time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules
|
||||
time-secfrac = "." 1*DIGIT
|
||||
time-numoffset = ( "+" / "-" ) time-hour ":" time-minute
|
||||
time-offset = "Z" / time-numoffset
|
||||
|
||||
partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ]
|
||||
full-date = date-fullyear "-" date-month "-" date-mday
|
||||
full-time = partial-time time-offset
|
||||
|
||||
;; Offset Date-Time
|
||||
|
||||
offset-date-time = full-date time-delim full-time
|
||||
|
||||
;; Local Date-Time
|
||||
|
||||
local-date-time = full-date time-delim partial-time
|
||||
|
||||
;; Local Date
|
||||
|
||||
local-date = full-date
|
||||
|
||||
;; Local Time
|
||||
|
||||
local-time = partial-time
|
||||
|
||||
;; Array
|
||||
|
||||
array = array-open [ array-values ] ws-comment-newline array-close
|
||||
|
||||
array-open = %x5B ; [
|
||||
array-close = %x5D ; ]
|
||||
|
||||
array-values = ws-comment-newline val ws-comment-newline array-sep array-values
|
||||
array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ]
|
||||
|
||||
array-sep = %x2C ; , Comma
|
||||
|
||||
ws-comment-newline = *( wschar / [ comment ] newline )
|
||||
|
||||
;; Table
|
||||
|
||||
table = std-table / array-table
|
||||
|
||||
;; Standard Table
|
||||
|
||||
std-table = std-table-open key std-table-close
|
||||
|
||||
std-table-open = %x5B ws ; [ Left square bracket
|
||||
std-table-close = ws %x5D ; ] Right square bracket
|
||||
|
||||
;; Inline Table
|
||||
|
||||
inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close
|
||||
|
||||
inline-table-open = %x7B ws ; {
|
||||
inline-table-close = ws %x7D ; }
|
||||
inline-table-sep = ws %x2C ws ; , Comma
|
||||
|
||||
inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ]
|
||||
|
||||
;; Array Table
|
||||
|
||||
array-table = array-table-open key array-table-close
|
||||
|
||||
array-table-open = %x5B.5B ws ; [[ Double left square bracket
|
||||
array-table-close = ws %x5D.5D ; ]] Double right square bracket
|
||||
|
||||
;; Built-in ABNF terms, reproduced here for clarity
|
||||
|
||||
ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
|
||||
DIGIT = %x30-39 ; 0-9
|
||||
HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
|
||||
14
vendor/github.com/pelletier/go-toml/v2/types.go
generated
vendored
Normal file
14
vendor/github.com/pelletier/go-toml/v2/types.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
var sliceInterfaceType = reflect.TypeOf([]interface{}(nil))
|
||||
var stringType = reflect.TypeOf("")
|
||||
1334
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
Normal file
1334
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
136
vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
generated
vendored
Normal file
136
vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package unstable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pelletier/go-toml/v2/internal/danger"
|
||||
)
|
||||
|
||||
// Iterator over a sequence of nodes.
|
||||
//
|
||||
// Starts uninitialized, you need to call Next() first.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// it := n.Children()
|
||||
// for it.Next() {
|
||||
// n := it.Node()
|
||||
// // do something with n
|
||||
// }
|
||||
type Iterator struct {
|
||||
started bool
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Next moves the iterator forward and returns true if points to a
|
||||
// node, false otherwise.
|
||||
func (c *Iterator) Next() bool {
|
||||
if !c.started {
|
||||
c.started = true
|
||||
} else if c.node.Valid() {
|
||||
c.node = c.node.Next()
|
||||
}
|
||||
return c.node.Valid()
|
||||
}
|
||||
|
||||
// IsLast returns true if the current node of the iterator is the last
|
||||
// one. Subsequent calls to Next() will return false.
|
||||
func (c *Iterator) IsLast() bool {
|
||||
return c.node.next == 0
|
||||
}
|
||||
|
||||
// Node returns a pointer to the node pointed at by the iterator.
|
||||
func (c *Iterator) Node() *Node {
|
||||
return c.node
|
||||
}
|
||||
|
||||
// Node in a TOML expression AST.
|
||||
//
|
||||
// Depending on Kind, its sequence of children should be interpreted
|
||||
// differently.
|
||||
//
|
||||
// - Array have one child per element in the array.
|
||||
// - InlineTable have one child per key-value in the table (each of kind
|
||||
// InlineTable).
|
||||
// - KeyValue have at least two children. The first one is the value. The rest
|
||||
// make a potentially dotted key.
|
||||
// - Table and ArrayTable's children represent a dotted key (same as
|
||||
// KeyValue, but without the first node being the value).
|
||||
//
|
||||
// When relevant, Raw describes the range of bytes this node is referring to in
|
||||
// the input document. Use Parser.Raw() to retrieve the actual bytes.
|
||||
type Node struct {
|
||||
Kind Kind
|
||||
Raw Range // Raw bytes from the input.
|
||||
Data []byte // Node value (either allocated or referencing the input).
|
||||
|
||||
// References to other nodes, as offsets in the backing array
|
||||
// from this node. References can go backward, so those can be
|
||||
// negative.
|
||||
next int // 0 if last element
|
||||
child int // 0 if no child
|
||||
}
|
||||
|
||||
// Range of bytes in the document.
|
||||
type Range struct {
|
||||
Offset uint32
|
||||
Length uint32
|
||||
}
|
||||
|
||||
// Next returns a pointer to the next node, or nil if there is no next node.
|
||||
func (n *Node) Next() *Node {
|
||||
if n.next == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.next))
|
||||
}
|
||||
|
||||
// Child returns a pointer to the first child node of this node. Other children
|
||||
// can be accessed calling Next on the first child. Returns an nil if this Node
|
||||
// has no child.
|
||||
func (n *Node) Child() *Node {
|
||||
if n.child == 0 {
|
||||
return nil
|
||||
}
|
||||
ptr := unsafe.Pointer(n)
|
||||
size := unsafe.Sizeof(Node{})
|
||||
return (*Node)(danger.Stride(ptr, size, n.child))
|
||||
}
|
||||
|
||||
// Valid returns true if the node's kind is set (not to Invalid).
|
||||
func (n *Node) Valid() bool {
|
||||
return n != nil
|
||||
}
|
||||
|
||||
// Key returns the children nodes making the Key on a supported node. Panics
|
||||
// otherwise. They are guaranteed to be all be of the Kind Key. A simple key
|
||||
// would return just one element.
|
||||
func (n *Node) Key() Iterator {
|
||||
switch n.Kind {
|
||||
case KeyValue:
|
||||
value := n.Child()
|
||||
if !value.Valid() {
|
||||
panic(fmt.Errorf("KeyValue should have at least two children"))
|
||||
}
|
||||
return Iterator{node: value.Next()}
|
||||
case Table, ArrayTable:
|
||||
return Iterator{node: n.Child()}
|
||||
default:
|
||||
panic(fmt.Errorf("Key() is not supported on a %s", n.Kind))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns a pointer to the value node of a KeyValue.
|
||||
// Guaranteed to be non-nil. Panics if not called on a KeyValue node,
|
||||
// or if the Children are malformed.
|
||||
func (n *Node) Value() *Node {
|
||||
return n.Child()
|
||||
}
|
||||
|
||||
// Children returns an iterator over a node's children.
|
||||
func (n *Node) Children() Iterator {
|
||||
return Iterator{node: n.Child()}
|
||||
}
|
||||
71
vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
generated
vendored
Normal file
71
vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package unstable
|
||||
|
||||
// root contains a full AST.
|
||||
//
|
||||
// It is immutable once constructed with Builder.
|
||||
type root struct {
|
||||
nodes []Node
|
||||
}
|
||||
|
||||
// Iterator over the top level nodes.
|
||||
func (r *root) Iterator() Iterator {
|
||||
it := Iterator{}
|
||||
if len(r.nodes) > 0 {
|
||||
it.node = &r.nodes[0]
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (r *root) at(idx reference) *Node {
|
||||
return &r.nodes[idx]
|
||||
}
|
||||
|
||||
type reference int
|
||||
|
||||
const invalidReference reference = -1
|
||||
|
||||
func (r reference) Valid() bool {
|
||||
return r != invalidReference
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
tree root
|
||||
lastIdx int
|
||||
}
|
||||
|
||||
func (b *builder) Tree() *root {
|
||||
return &b.tree
|
||||
}
|
||||
|
||||
func (b *builder) NodeAt(ref reference) *Node {
|
||||
return b.tree.at(ref)
|
||||
}
|
||||
|
||||
func (b *builder) Reset() {
|
||||
b.tree.nodes = b.tree.nodes[:0]
|
||||
b.lastIdx = 0
|
||||
}
|
||||
|
||||
func (b *builder) Push(n Node) reference {
|
||||
b.lastIdx = len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
return reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *builder) PushAndChain(n Node) reference {
|
||||
newIdx := len(b.tree.nodes)
|
||||
b.tree.nodes = append(b.tree.nodes, n)
|
||||
if b.lastIdx >= 0 {
|
||||
b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx
|
||||
}
|
||||
b.lastIdx = newIdx
|
||||
return reference(b.lastIdx)
|
||||
}
|
||||
|
||||
func (b *builder) AttachChild(parent reference, child reference) {
|
||||
b.tree.nodes[parent].child = int(child) - int(parent)
|
||||
}
|
||||
|
||||
func (b *builder) Chain(from reference, to reference) {
|
||||
b.tree.nodes[from].next = int(to) - int(from)
|
||||
}
|
||||
3
vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
generated
vendored
Normal file
3
vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package unstable provides APIs that do not meet the backward compatibility
|
||||
// guarantees yet.
|
||||
package unstable
|
||||
71
vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
generated
vendored
Normal file
71
vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package unstable
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Kind represents the type of TOML structure contained in a given Node.
|
||||
type Kind int
|
||||
|
||||
const (
|
||||
// Meta
|
||||
Invalid Kind = iota
|
||||
Comment
|
||||
Key
|
||||
|
||||
// Top level structures
|
||||
Table
|
||||
ArrayTable
|
||||
KeyValue
|
||||
|
||||
// Containers values
|
||||
Array
|
||||
InlineTable
|
||||
|
||||
// Values
|
||||
String
|
||||
Bool
|
||||
Float
|
||||
Integer
|
||||
LocalDate
|
||||
LocalTime
|
||||
LocalDateTime
|
||||
DateTime
|
||||
)
|
||||
|
||||
// String implementation of fmt.Stringer.
|
||||
func (k Kind) String() string {
|
||||
switch k {
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
case Comment:
|
||||
return "Comment"
|
||||
case Key:
|
||||
return "Key"
|
||||
case Table:
|
||||
return "Table"
|
||||
case ArrayTable:
|
||||
return "ArrayTable"
|
||||
case KeyValue:
|
||||
return "KeyValue"
|
||||
case Array:
|
||||
return "Array"
|
||||
case InlineTable:
|
||||
return "InlineTable"
|
||||
case String:
|
||||
return "String"
|
||||
case Bool:
|
||||
return "Bool"
|
||||
case Float:
|
||||
return "Float"
|
||||
case Integer:
|
||||
return "Integer"
|
||||
case LocalDate:
|
||||
return "LocalDate"
|
||||
case LocalTime:
|
||||
return "LocalTime"
|
||||
case LocalDateTime:
|
||||
return "LocalDateTime"
|
||||
case DateTime:
|
||||
return "DateTime"
|
||||
}
|
||||
panic(fmt.Errorf("Kind.String() not implemented for '%d'", k))
|
||||
}
|
||||
1245
vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
generated
vendored
Normal file
1245
vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
270
vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
generated
vendored
Normal file
270
vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
package unstable
|
||||
|
||||
import "github.com/pelletier/go-toml/v2/internal/characters"
|
||||
|
||||
func scanFollows(b []byte, pattern string) bool {
|
||||
n := len(pattern)
|
||||
|
||||
return len(b) >= n && string(b[:n]) == pattern
|
||||
}
|
||||
|
||||
func scanFollowsMultilineBasicStringDelimiter(b []byte) bool {
|
||||
return scanFollows(b, `"""`)
|
||||
}
|
||||
|
||||
func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool {
|
||||
return scanFollows(b, `'''`)
|
||||
}
|
||||
|
||||
func scanFollowsTrue(b []byte) bool {
|
||||
return scanFollows(b, `true`)
|
||||
}
|
||||
|
||||
func scanFollowsFalse(b []byte) bool {
|
||||
return scanFollows(b, `false`)
|
||||
}
|
||||
|
||||
func scanFollowsInf(b []byte) bool {
|
||||
return scanFollows(b, `inf`)
|
||||
}
|
||||
|
||||
func scanFollowsNan(b []byte) bool {
|
||||
return scanFollows(b, `nan`)
|
||||
}
|
||||
|
||||
func scanUnquotedKey(b []byte) ([]byte, []byte) {
|
||||
// unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _
|
||||
for i := 0; i < len(b); i++ {
|
||||
if !isUnquotedKeyChar(b[i]) {
|
||||
return b[:i], b[i:]
|
||||
}
|
||||
}
|
||||
|
||||
return b, b[len(b):]
|
||||
}
|
||||
|
||||
func isUnquotedKeyChar(r byte) bool {
|
||||
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_'
|
||||
}
|
||||
|
||||
func scanLiteralString(b []byte) ([]byte, []byte, error) {
|
||||
// literal-string = apostrophe *literal-char apostrophe
|
||||
// apostrophe = %x27 ; ' apostrophe
|
||||
// literal-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
for i := 1; i < len(b); {
|
||||
switch b[i] {
|
||||
case '\'':
|
||||
return b[:i+1], b[i+1:], nil
|
||||
case '\n', '\r':
|
||||
return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines")
|
||||
}
|
||||
size := characters.Utf8ValidNext(b[i:])
|
||||
if size == 0 {
|
||||
return nil, nil, NewParserError(b[i:i+1], "invalid character")
|
||||
}
|
||||
i += size
|
||||
}
|
||||
|
||||
return nil, nil, NewParserError(b[len(b):], "unterminated literal string")
|
||||
}
|
||||
|
||||
func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) {
|
||||
// ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body
|
||||
// ml-literal-string-delim
|
||||
// ml-literal-string-delim = 3apostrophe
|
||||
// ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ]
|
||||
//
|
||||
// mll-content = mll-char / newline
|
||||
// mll-char = %x09 / %x20-26 / %x28-7E / non-ascii
|
||||
// mll-quotes = 1*2apostrophe
|
||||
for i := 3; i < len(b); {
|
||||
switch b[i] {
|
||||
case '\'':
|
||||
if scanFollowsMultilineLiteralStringDelimiter(b[i:]) {
|
||||
i += 3
|
||||
|
||||
// At that point we found 3 apostrophe, and i is the
|
||||
// index of the byte after the third one. The scanner
|
||||
// needs to be eager, because there can be an extra 2
|
||||
// apostrophe that can be accepted at the end of the
|
||||
// string.
|
||||
|
||||
if i >= len(b) || b[i] != '\'' {
|
||||
return b[:i], b[i:], nil
|
||||
}
|
||||
i++
|
||||
|
||||
if i >= len(b) || b[i] != '\'' {
|
||||
return b[:i], b[i:], nil
|
||||
}
|
||||
i++
|
||||
|
||||
if i < len(b) && b[i] == '\'' {
|
||||
return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string")
|
||||
}
|
||||
|
||||
return b[:i], b[i:], nil
|
||||
}
|
||||
case '\r':
|
||||
if len(b) < i+2 {
|
||||
return nil, nil, NewParserError(b[len(b):], `need a \n after \r`)
|
||||
}
|
||||
if b[i+1] != '\n' {
|
||||
return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`)
|
||||
}
|
||||
i += 2 // skip the \n
|
||||
continue
|
||||
}
|
||||
size := characters.Utf8ValidNext(b[i:])
|
||||
if size == 0 {
|
||||
return nil, nil, NewParserError(b[i:i+1], "invalid character")
|
||||
}
|
||||
i += size
|
||||
}
|
||||
|
||||
return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`)
|
||||
}
|
||||
|
||||
func scanWindowsNewline(b []byte) ([]byte, []byte, error) {
|
||||
const lenCRLF = 2
|
||||
if len(b) < lenCRLF {
|
||||
return nil, nil, NewParserError(b, "windows new line expected")
|
||||
}
|
||||
|
||||
if b[1] != '\n' {
|
||||
return nil, nil, NewParserError(b, `windows new line should be \r\n`)
|
||||
}
|
||||
|
||||
return b[:lenCRLF], b[lenCRLF:], nil
|
||||
}
|
||||
|
||||
func scanWhitespace(b []byte) ([]byte, []byte) {
|
||||
for i := 0; i < len(b); i++ {
|
||||
switch b[i] {
|
||||
case ' ', '\t':
|
||||
continue
|
||||
default:
|
||||
return b[:i], b[i:]
|
||||
}
|
||||
}
|
||||
|
||||
return b, b[len(b):]
|
||||
}
|
||||
|
||||
func scanComment(b []byte) ([]byte, []byte, error) {
|
||||
// comment-start-symbol = %x23 ; #
|
||||
// non-ascii = %x80-D7FF / %xE000-10FFFF
|
||||
// non-eol = %x09 / %x20-7F / non-ascii
|
||||
//
|
||||
// comment = comment-start-symbol *non-eol
|
||||
|
||||
for i := 1; i < len(b); {
|
||||
if b[i] == '\n' {
|
||||
return b[:i], b[i:], nil
|
||||
}
|
||||
if b[i] == '\r' {
|
||||
if i+1 < len(b) && b[i+1] == '\n' {
|
||||
return b[:i+1], b[i+1:], nil
|
||||
}
|
||||
return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
|
||||
}
|
||||
size := characters.Utf8ValidNext(b[i:])
|
||||
if size == 0 {
|
||||
return nil, nil, NewParserError(b[i:i+1], "invalid character in comment")
|
||||
}
|
||||
|
||||
i += size
|
||||
}
|
||||
|
||||
return b, b[len(b):], nil
|
||||
}
|
||||
|
||||
func scanBasicString(b []byte) ([]byte, bool, []byte, error) {
|
||||
// basic-string = quotation-mark *basic-char quotation-mark
|
||||
// quotation-mark = %x22 ; "
|
||||
// basic-char = basic-unescaped / escaped
|
||||
// basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
// escaped = escape escape-seq-char
|
||||
escaped := false
|
||||
i := 1
|
||||
|
||||
for ; i < len(b); i++ {
|
||||
switch b[i] {
|
||||
case '"':
|
||||
return b[:i+1], escaped, b[i+1:], nil
|
||||
case '\n', '\r':
|
||||
return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines")
|
||||
case '\\':
|
||||
if len(b) < i+2 {
|
||||
return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\")
|
||||
}
|
||||
escaped = true
|
||||
i++ // skip the next character
|
||||
}
|
||||
}
|
||||
|
||||
return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`)
|
||||
}
|
||||
|
||||
func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) {
|
||||
// ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body
|
||||
// ml-basic-string-delim
|
||||
// ml-basic-string-delim = 3quotation-mark
|
||||
// ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ]
|
||||
//
|
||||
// mlb-content = mlb-char / newline / mlb-escaped-nl
|
||||
// mlb-char = mlb-unescaped / escaped
|
||||
// mlb-quotes = 1*2quotation-mark
|
||||
// mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii
|
||||
// mlb-escaped-nl = escape ws newline *( wschar / newline )
|
||||
|
||||
escaped := false
|
||||
i := 3
|
||||
|
||||
for ; i < len(b); i++ {
|
||||
switch b[i] {
|
||||
case '"':
|
||||
if scanFollowsMultilineBasicStringDelimiter(b[i:]) {
|
||||
i += 3
|
||||
|
||||
// At that point we found 3 apostrophe, and i is the
|
||||
// index of the byte after the third one. The scanner
|
||||
// needs to be eager, because there can be an extra 2
|
||||
// apostrophe that can be accepted at the end of the
|
||||
// string.
|
||||
|
||||
if i >= len(b) || b[i] != '"' {
|
||||
return b[:i], escaped, b[i:], nil
|
||||
}
|
||||
i++
|
||||
|
||||
if i >= len(b) || b[i] != '"' {
|
||||
return b[:i], escaped, b[i:], nil
|
||||
}
|
||||
i++
|
||||
|
||||
if i < len(b) && b[i] == '"' {
|
||||
return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`)
|
||||
}
|
||||
|
||||
return b[:i], escaped, b[i:], nil
|
||||
}
|
||||
case '\\':
|
||||
if len(b) < i+2 {
|
||||
return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\")
|
||||
}
|
||||
escaped = true
|
||||
i++ // skip the next character
|
||||
case '\r':
|
||||
if len(b) < i+2 {
|
||||
return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`)
|
||||
}
|
||||
if b[i+1] != '\n' {
|
||||
return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`)
|
||||
}
|
||||
i++ // skip the \n
|
||||
}
|
||||
}
|
||||
|
||||
return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`)
|
||||
}
|
||||
7
vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
generated
vendored
Normal file
7
vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package unstable
|
||||
|
||||
// The Unmarshaler interface may be implemented by types to customize their
|
||||
// behavior when being unmarshaled from a TOML document.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(value *Node) error
|
||||
}
|
||||
21
vendor/github.com/sagikazarmark/locafero/.editorconfig
generated
vendored
Normal file
21
vendor/github.com/sagikazarmark/locafero/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[{Makefile,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.nix]
|
||||
indent_size = 2
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[{*.yml,*.yaml}]
|
||||
indent_size = 2
|
||||
8
vendor/github.com/sagikazarmark/locafero/.gitignore
generated
vendored
Normal file
8
vendor/github.com/sagikazarmark/locafero/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
/.devenv/
|
||||
/.direnv/
|
||||
/.task/
|
||||
/bin/
|
||||
/build/
|
||||
/tmp/
|
||||
/var/
|
||||
/vendor/
|
||||
37
vendor/github.com/sagikazarmark/locafero/.golangci.yaml
generated
vendored
Normal file
37
vendor/github.com/sagikazarmark/locafero/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
version: "2"
|
||||
|
||||
run:
|
||||
timeout: 10m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
- revive
|
||||
- staticcheck
|
||||
- unused
|
||||
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- golines
|
||||
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
19
vendor/github.com/sagikazarmark/locafero/LICENSE
generated
vendored
Normal file
19
vendor/github.com/sagikazarmark/locafero/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (c) 2023 Márk Sági-Kazár <mark.sagikazar@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is furnished
|
||||
to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
37
vendor/github.com/sagikazarmark/locafero/README.md
generated
vendored
Normal file
37
vendor/github.com/sagikazarmark/locafero/README.md
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# Finder library for [Afero](https://github.com/spf13/afero)
|
||||
|
||||
[](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml)
|
||||
[](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero)
|
||||

|
||||
[](https://deps.dev/go/github.com%252Fsagikazarmark%252Flocafero)
|
||||
|
||||
**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).**
|
||||
|
||||
> [!WARNING]
|
||||
> This is an experimental library under development.
|
||||
>
|
||||
> **Backwards compatibility is not guaranteed, expect breaking changes.**
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
go get github.com/sagikazarmark/locafero
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev.
|
||||
|
||||
## Development
|
||||
|
||||
**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).**
|
||||
|
||||
Run the test suite:
|
||||
|
||||
```shell
|
||||
just test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The project is licensed under the [MIT License](LICENSE).
|
||||
32
vendor/github.com/sagikazarmark/locafero/file_type.go
generated
vendored
Normal file
32
vendor/github.com/sagikazarmark/locafero/file_type.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package locafero
|
||||
|
||||
import "io/fs"
|
||||
|
||||
// FileType represents the kind of entries [Finder] can return.
|
||||
type FileType int
|
||||
|
||||
// FileType represents the kind of entries [Finder] can return.
|
||||
const (
|
||||
FileTypeAny FileType = iota
|
||||
FileTypeFile
|
||||
FileTypeDir
|
||||
|
||||
// Deprecated: Use [FileTypeAny] instead.
|
||||
FileTypeAll = FileTypeAny
|
||||
)
|
||||
|
||||
func (ft FileType) match(info fs.FileInfo) bool {
|
||||
switch ft {
|
||||
case FileTypeAny:
|
||||
return true
|
||||
|
||||
case FileTypeFile:
|
||||
return info.Mode().IsRegular()
|
||||
|
||||
case FileTypeDir:
|
||||
return info.IsDir()
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
171
vendor/github.com/sagikazarmark/locafero/finder.go
generated
vendored
Normal file
171
vendor/github.com/sagikazarmark/locafero/finder.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Package locafero looks for files and directories in an {fs.Fs} filesystem.
|
||||
package locafero
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// Finder looks for files and directories in an [afero.Fs] filesystem.
|
||||
type Finder struct {
|
||||
// Paths represents a list of locations that the [Finder] will search in.
|
||||
//
|
||||
// They are essentially the root directories or starting points for the search.
|
||||
//
|
||||
// Examples:
|
||||
// - home/user
|
||||
// - etc
|
||||
Paths []string
|
||||
|
||||
// Names are specific entries that the [Finder] will look for within the given Paths.
|
||||
//
|
||||
// It provides the capability to search for entries with depth,
|
||||
// meaning it can target deeper locations within the directory structure.
|
||||
//
|
||||
// It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns.
|
||||
//
|
||||
// Examples:
|
||||
// - config.yaml
|
||||
// - home/*/config.yaml
|
||||
// - home/*/config.*
|
||||
Names []string
|
||||
|
||||
// Type restricts the kind of entries returned by the [Finder].
|
||||
//
|
||||
// This parameter helps in differentiating and filtering out files from directories or vice versa.
|
||||
Type FileType
|
||||
}
|
||||
|
||||
// Find looks for files and directories in an [afero.Fs] filesystem.
|
||||
func (f Finder) Find(fsys afero.Fs) ([]string, error) {
|
||||
// Arbitrary go routine limit (TODO: make this a parameter)
|
||||
p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError()
|
||||
|
||||
for _, searchPath := range f.Paths {
|
||||
for _, searchName := range f.Names {
|
||||
p.Go(func() ([]searchResult, error) {
|
||||
// If the name contains any glob character, perform a glob match
|
||||
if strings.ContainsAny(searchName, globMatch) {
|
||||
return globWalkSearch(fsys, searchPath, searchName, f.Type)
|
||||
}
|
||||
|
||||
return statSearch(fsys, searchPath, searchName, f.Type)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
searchResults, err := flatten(p.Wait())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return early if no results were found
|
||||
if len(searchResults) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
results := make([]string, 0, len(searchResults))
|
||||
|
||||
for _, searchResult := range searchResults {
|
||||
results = append(results, searchResult.path)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type searchResult struct {
|
||||
path string
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func flatten[T any](results [][]T, err error) ([]T, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flattened []T
|
||||
|
||||
for _, r := range results {
|
||||
flattened = append(flattened, r...)
|
||||
}
|
||||
|
||||
return flattened, nil
|
||||
}
|
||||
|
||||
func globWalkSearch(
|
||||
fsys afero.Fs,
|
||||
searchPath string,
|
||||
searchName string,
|
||||
searchType FileType,
|
||||
) ([]searchResult, error) {
|
||||
var results []searchResult
|
||||
|
||||
err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip the root path
|
||||
if p == searchPath {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result error
|
||||
|
||||
// Stop reading subdirectories
|
||||
// TODO: add depth detection here
|
||||
if fileInfo.IsDir() && filepath.Dir(p) == searchPath {
|
||||
result = fs.SkipDir
|
||||
}
|
||||
|
||||
// Skip unmatching type
|
||||
if !searchType.match(fileInfo) {
|
||||
return result
|
||||
}
|
||||
|
||||
match, err := filepath.Match(searchName, fileInfo.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if match {
|
||||
results = append(results, searchResult{p, fileInfo})
|
||||
}
|
||||
|
||||
return result
|
||||
})
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func statSearch(
|
||||
fsys afero.Fs,
|
||||
searchPath string,
|
||||
searchName string,
|
||||
searchType FileType,
|
||||
) ([]searchResult, error) {
|
||||
filePath := filepath.Join(searchPath, searchName)
|
||||
|
||||
fileInfo, err := fsys.Stat(filePath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Skip unmatching type
|
||||
if !searchType.match(fileInfo) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return []searchResult{{filePath, fileInfo}}, nil
|
||||
}
|
||||
255
vendor/github.com/sagikazarmark/locafero/flake.lock
generated
vendored
Normal file
255
vendor/github.com/sagikazarmark/locafero/flake.lock
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
{
|
||||
"nodes": {
|
||||
"cachix": {
|
||||
"inputs": {
|
||||
"devenv": [
|
||||
"devenv"
|
||||
],
|
||||
"flake-compat": [
|
||||
"devenv"
|
||||
],
|
||||
"git-hooks": [
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748883665,
|
||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "latest",
|
||||
"repo": "cachix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"cachix": "cachix",
|
||||
"flake-compat": "flake-compat",
|
||||
"git-hooks": "git-hooks",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1753981111,
|
||||
"narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"devenv",
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733312601,
|
||||
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts_2": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1753121425,
|
||||
"narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "644e0fc48951a860279da645ba77fe4a6e814c5e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1750779888,
|
||||
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"git-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-parts": "flake-parts",
|
||||
"git-hooks-nix": [
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-23-11": [
|
||||
"devenv"
|
||||
],
|
||||
"nixpkgs-regression": [
|
||||
"devenv"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1752773918,
|
||||
"narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=",
|
||||
"owner": "cachix",
|
||||
"repo": "nix",
|
||||
"rev": "031c3cf42d2e9391eee373507d8c12e0f9606779",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "devenv-2.30",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1750441195,
|
||||
"narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"rev": "0ceffe312871b443929ff3006960d29b120dc627",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "rolling",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1751159883,
|
||||
"narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1753939845,
|
||||
"narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "94def634a20494ee057c76998843c015909d6311",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
42
vendor/github.com/sagikazarmark/locafero/flake.nix
generated
vendored
Normal file
42
vendor/github.com/sagikazarmark/locafero/flake.nix
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
devenv.url = "github:cachix/devenv";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
imports = [
|
||||
inputs.devenv.flakeModule
|
||||
];
|
||||
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
devenv.shells = {
|
||||
default = {
|
||||
languages = {
|
||||
go.enable = true;
|
||||
go.package = pkgs.lib.mkDefault pkgs.go_1_24;
|
||||
};
|
||||
|
||||
packages = with pkgs; [
|
||||
just
|
||||
|
||||
golangci-lint
|
||||
];
|
||||
|
||||
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
|
||||
containers = pkgs.lib.mkForce { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
5
vendor/github.com/sagikazarmark/locafero/glob.go
generated
vendored
Normal file
5
vendor/github.com/sagikazarmark/locafero/glob.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
//go:build !windows
|
||||
|
||||
package locafero
|
||||
|
||||
const globMatch = "*?[]\\^"
|
||||
8
vendor/github.com/sagikazarmark/locafero/glob_windows.go
generated
vendored
Normal file
8
vendor/github.com/sagikazarmark/locafero/glob_windows.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build windows
|
||||
|
||||
package locafero
|
||||
|
||||
// See [filepath.Match]:
|
||||
//
|
||||
// On Windows, escaping is disabled. Instead, '\\' is treated as path separator.
|
||||
const globMatch = "*?[]^"
|
||||
41
vendor/github.com/sagikazarmark/locafero/helpers.go
generated
vendored
Normal file
41
vendor/github.com/sagikazarmark/locafero/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package locafero
|
||||
|
||||
import "fmt"
|
||||
|
||||
// NameWithExtensions creates a list of names from a base name and a list of extensions.
|
||||
//
|
||||
// TODO: find a better name for this function.
|
||||
func NameWithExtensions(baseName string, extensions ...string) []string {
|
||||
var names []string
|
||||
|
||||
if baseName == "" {
|
||||
return names
|
||||
}
|
||||
|
||||
for _, ext := range extensions {
|
||||
if ext == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
names = append(names, fmt.Sprintf("%s.%s", baseName, ext))
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions,
|
||||
// plus it adds the base name (without any extensions) to the end of the list.
|
||||
//
|
||||
// TODO: find a better name for this function.
|
||||
func NameWithOptionalExtensions(baseName string, extensions ...string) []string {
|
||||
var names []string
|
||||
|
||||
if baseName == "" {
|
||||
return names
|
||||
}
|
||||
|
||||
names = NameWithExtensions(baseName, extensions...)
|
||||
names = append(names, baseName)
|
||||
|
||||
return names
|
||||
}
|
||||
14
vendor/github.com/sagikazarmark/locafero/justfile
generated
vendored
Normal file
14
vendor/github.com/sagikazarmark/locafero/justfile
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
default:
|
||||
just --list
|
||||
|
||||
test:
|
||||
go test -count 10 -shuffle on -race -v ./...
|
||||
|
||||
fuzz:
|
||||
go test -race -v -fuzz=Fuzz -fuzztime=60s ./...
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
|
||||
fmt:
|
||||
golangci-lint fmt
|
||||
11
vendor/github.com/sourcegraph/conc/.golangci.yml
generated
vendored
Normal file
11
vendor/github.com/sourcegraph/conc/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- errcheck
|
||||
- godot
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
21
vendor/github.com/sourcegraph/conc/LICENSE
generated
vendored
Normal file
21
vendor/github.com/sourcegraph/conc/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Sourcegraph
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
24
vendor/github.com/sourcegraph/conc/Makefile
generated
vendored
Normal file
24
vendor/github.com/sourcegraph/conc/Makefile
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
GO_BIN ?= $(shell go env GOPATH)/bin
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
$(GO_BIN)/golangci-lint:
|
||||
@echo "==> Installing golangci-lint within "${GO_BIN}""
|
||||
@go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
.PHONY: lint
|
||||
lint: $(GO_BIN)/golangci-lint ## Run linting on Go files
|
||||
@echo "==> Linting Go source files"
|
||||
@golangci-lint run -v --fix -c .golangci.yml ./...
|
||||
|
||||
.PHONY: test
|
||||
test: ## Run tests
|
||||
go test -race -v ./... -coverprofile ./coverage.txt
|
||||
|
||||
.PHONY: bench
|
||||
bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags
|
||||
go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem
|
||||
464
vendor/github.com/sourcegraph/conc/README.md
generated
vendored
Normal file
464
vendor/github.com/sourcegraph/conc/README.md
generated
vendored
Normal file
@@ -0,0 +1,464 @@
|
||||

|
||||
|
||||
# `conc`: better structured concurrency for go
|
||||
|
||||
[](https://pkg.go.dev/github.com/sourcegraph/conc)
|
||||
[](https://sourcegraph.com/github.com/sourcegraph/conc)
|
||||
[](https://goreportcard.com/report/github.com/sourcegraph/conc)
|
||||
[](https://codecov.io/gh/sourcegraph/conc)
|
||||
[](https://discord.gg/bvXQXmtRjN)
|
||||
|
||||
`conc` is your toolbelt for structured concurrency in go, making common tasks
|
||||
easier and safer.
|
||||
|
||||
```sh
|
||||
go get github.com/sourcegraph/conc
|
||||
```
|
||||
|
||||
# At a glance
|
||||
|
||||
- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup`
|
||||
- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner
|
||||
- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results
|
||||
- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible
|
||||
- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure
|
||||
- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks
|
||||
- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice
|
||||
- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice
|
||||
- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines
|
||||
|
||||
All pools are created with
|
||||
[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New)
|
||||
or
|
||||
[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults),
|
||||
then configured with methods:
|
||||
|
||||
- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool
|
||||
- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors
|
||||
- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error
|
||||
- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error
|
||||
- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored
|
||||
|
||||
# Goals
|
||||
|
||||
The main goals of the package are:
|
||||
1) Make it harder to leak goroutines
|
||||
2) Handle panics gracefully
|
||||
3) Make concurrent code easier to read
|
||||
|
||||
## Goal #1: Make it harder to leak goroutines
|
||||
|
||||
A common pain point when working with goroutines is cleaning them up. It's
|
||||
really easy to fire off a `go` statement and fail to properly wait for it to
|
||||
complete.
|
||||
|
||||
`conc` takes the opinionated stance that all concurrency should be scoped.
|
||||
That is, goroutines should have an owner and that owner should always
|
||||
ensure that its owned goroutines exit properly.
|
||||
|
||||
In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines
|
||||
are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and
|
||||
`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out
|
||||
of scope.
|
||||
|
||||
In some cases, you might want a spawned goroutine to outlast the scope of the
|
||||
caller. In that case, you could pass a `WaitGroup` into the spawning function.
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var wg conc.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
startTheThing(&wg)
|
||||
}
|
||||
|
||||
func startTheThing(wg *conc.WaitGroup) {
|
||||
wg.Go(func() { ... })
|
||||
}
|
||||
```
|
||||
|
||||
For some more discussion on why scoped concurrency is nice, check out [this
|
||||
blog
|
||||
post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/).
|
||||
|
||||
## Goal #2: Handle panics gracefully
|
||||
|
||||
A frequent problem with goroutines in long-running applications is handling
|
||||
panics. A goroutine spawned without a panic handler will crash the whole process
|
||||
on panic. This is usually undesirable.
|
||||
|
||||
However, if you do add a panic handler to a goroutine, what do you do with the
|
||||
panic once you catch it? Some options:
|
||||
1) Ignore it
|
||||
2) Log it
|
||||
3) Turn it into an error and return that to the goroutine spawner
|
||||
4) Propagate the panic to the goroutine spawner
|
||||
|
||||
Ignoring panics is a bad idea since panics usually mean there is actually
|
||||
something wrong and someone should fix it.
|
||||
|
||||
Just logging panics isn't great either because then there is no indication to the spawner
|
||||
that something bad happened, and it might just continue on as normal even though your
|
||||
program is in a really bad state.
|
||||
|
||||
Both (3) and (4) are reasonable options, but both require the goroutine to have
|
||||
an owner that can actually receive the message that something went wrong. This
|
||||
is generally not true with a goroutine spawned with `go`, but in the `conc`
|
||||
package, all goroutines have an owner that must collect the spawned goroutine.
|
||||
In the conc package, any call to `Wait()` will panic if any of the spawned goroutines
|
||||
panicked. Additionally, it decorates the panic value with a stacktrace from the child
|
||||
goroutine so that you don't lose information about what caused the panic.
|
||||
|
||||
Doing this all correctly every time you spawn something with `go` is not
|
||||
trivial and it requires a lot of boilerplate that makes the important parts of
|
||||
the code more difficult to read, so `conc` does this for you.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
type caughtPanicError struct {
|
||||
val any
|
||||
stack []byte
|
||||
}
|
||||
|
||||
func (e *caughtPanicError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"panic: %q\n%s",
|
||||
e.val,
|
||||
string(e.stack)
|
||||
)
|
||||
}
|
||||
|
||||
func main() {
|
||||
done := make(chan error)
|
||||
go func() {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
done <- &caughtPanicError{
|
||||
val: v,
|
||||
stack: debug.Stack()
|
||||
}
|
||||
} else {
|
||||
done <- nil
|
||||
}
|
||||
}()
|
||||
doSomethingThatMightPanic()
|
||||
}()
|
||||
err := <-done
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var wg conc.WaitGroup
|
||||
wg.Go(doSomethingThatMightPanic)
|
||||
// panics with a nice stacktrace
|
||||
wg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Goal #3: Make concurrent code easier to read
|
||||
|
||||
Doing concurrency correctly is difficult. Doing it in a way that doesn't
|
||||
obfuscate what the code is actually doing is more difficult. The `conc` package
|
||||
attempts to make common operations easier by abstracting as much boilerplate
|
||||
complexity as possible.
|
||||
|
||||
Want to run a set of concurrent tasks with a bounded set of goroutines? Use
|
||||
`pool.New()`. Want to process an ordered stream of results concurrently, but
|
||||
still maintain order? Try `stream.New()`. What about a concurrent map over
|
||||
a slice? Take a peek at `iter.Map()`.
|
||||
|
||||
Browse some examples below for some comparisons with doing these by hand.
|
||||
|
||||
# Examples
|
||||
|
||||
Each of these examples forgoes propagating panics for simplicity. To see
|
||||
what kind of complexity that would add, check out the "Goal #2" header above.
|
||||
|
||||
Spawn a set of goroutines and waiting for them to finish:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// crashes on panic!
|
||||
doSomething()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var wg conc.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Go(doSomething)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Process each element of a stream in a static pool of goroutines:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func process(stream chan int) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for elem := range stream {
|
||||
handle(elem)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func process(stream chan int) {
|
||||
p := pool.New().WithMaxGoroutines(10)
|
||||
for elem := range stream {
|
||||
elem := elem
|
||||
p.Go(func() {
|
||||
handle(elem)
|
||||
})
|
||||
}
|
||||
p.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Process each element of a slice in a static pool of goroutines:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func process(values []int) {
|
||||
feeder := make(chan int, 8)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for elem := range feeder {
|
||||
handle(elem)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
feeder <- value
|
||||
}
|
||||
close(feeder)
|
||||
wg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func process(values []int) {
|
||||
iter.ForEach(values, handle)
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Concurrently map a slice:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func concMap(
|
||||
input []int,
|
||||
f func(int) int,
|
||||
) []int {
|
||||
res := make([]int, len(input))
|
||||
var idx atomic.Int64
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
i := int(idx.Add(1) - 1)
|
||||
if i >= len(input) {
|
||||
return
|
||||
}
|
||||
|
||||
res[i] = f(input[i])
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return res
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func concMap(
|
||||
input []int,
|
||||
f func(*int) int,
|
||||
) []int {
|
||||
return iter.Map(input, f)
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Process an ordered stream concurrently:
|
||||
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><code>stdlib</code></th>
|
||||
<th><code>conc</code></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func mapStream(
|
||||
in chan int,
|
||||
out chan int,
|
||||
f func(int) int,
|
||||
) {
|
||||
tasks := make(chan func())
|
||||
taskResults := make(chan chan int)
|
||||
|
||||
// Worker goroutines
|
||||
var workerWg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
workerWg.Add(1)
|
||||
go func() {
|
||||
defer workerWg.Done()
|
||||
for task := range tasks {
|
||||
task()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Ordered reader goroutines
|
||||
var readerWg sync.WaitGroup
|
||||
readerWg.Add(1)
|
||||
go func() {
|
||||
defer readerWg.Done()
|
||||
for result := range taskResults {
|
||||
item := <-result
|
||||
out <- item
|
||||
}
|
||||
}()
|
||||
|
||||
// Feed the workers with tasks
|
||||
for elem := range in {
|
||||
resultCh := make(chan int, 1)
|
||||
taskResults <- resultCh
|
||||
tasks <- func() {
|
||||
resultCh <- f(elem)
|
||||
}
|
||||
}
|
||||
|
||||
// We've exhausted input.
|
||||
// Wait for everything to finish
|
||||
close(tasks)
|
||||
workerWg.Wait()
|
||||
close(taskResults)
|
||||
readerWg.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
<td>
|
||||
|
||||
```go
|
||||
func mapStream(
|
||||
in chan int,
|
||||
out chan int,
|
||||
f func(int) int,
|
||||
) {
|
||||
s := stream.New().WithMaxGoroutines(10)
|
||||
for elem := range in {
|
||||
elem := elem
|
||||
s.Go(func() stream.Callback {
|
||||
res := f(elem)
|
||||
return func() { out <- res }
|
||||
})
|
||||
}
|
||||
s.Wait()
|
||||
}
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Status
|
||||
|
||||
This package is currently pre-1.0. There are likely to be minor breaking
|
||||
changes before a 1.0 release as we stabilize the APIs and tweak defaults.
|
||||
Please open an issue if you have questions, concerns, or requests that you'd
|
||||
like addressed before the 1.0 release. Currently, a 1.0 is targeted for
|
||||
March 2023.
|
||||
102
vendor/github.com/sourcegraph/conc/panics/panics.go
generated
vendored
Normal file
102
vendor/github.com/sourcegraph/conc/panics/panics.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
package panics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Catcher is used to catch panics. You can execute a function with Try,
|
||||
// which will catch any spawned panic. Try can be called any number of times,
|
||||
// from any number of goroutines. Once all calls to Try have completed, you can
|
||||
// get the value of the first panic (if any) with Recovered(), or you can just
|
||||
// propagate the panic (re-panic) with Repanic().
|
||||
type Catcher struct {
|
||||
recovered atomic.Pointer[Recovered]
|
||||
}
|
||||
|
||||
// Try executes f, catching any panic it might spawn. It is safe
|
||||
// to call from multiple goroutines simultaneously.
|
||||
func (p *Catcher) Try(f func()) {
|
||||
defer p.tryRecover()
|
||||
f()
|
||||
}
|
||||
|
||||
func (p *Catcher) tryRecover() {
|
||||
if val := recover(); val != nil {
|
||||
rp := NewRecovered(1, val)
|
||||
p.recovered.CompareAndSwap(nil, &rp)
|
||||
}
|
||||
}
|
||||
|
||||
// Repanic panics if any calls to Try caught a panic. It will panic with the
|
||||
// value of the first panic caught, wrapped in a panics.Recovered with caller
|
||||
// information.
|
||||
func (p *Catcher) Repanic() {
|
||||
if val := p.Recovered(); val != nil {
|
||||
panic(val)
|
||||
}
|
||||
}
|
||||
|
||||
// Recovered returns the value of the first panic caught by Try, or nil if
|
||||
// no calls to Try panicked.
|
||||
func (p *Catcher) Recovered() *Recovered {
|
||||
return p.recovered.Load()
|
||||
}
|
||||
|
||||
// NewRecovered creates a panics.Recovered from a panic value and a collected
|
||||
// stacktrace. The skip parameter allows the caller to skip stack frames when
|
||||
// collecting the stacktrace. Calling with a skip of 0 means include the call to
|
||||
// NewRecovered in the stacktrace.
|
||||
func NewRecovered(skip int, value any) Recovered {
|
||||
// 64 frames should be plenty
|
||||
var callers [64]uintptr
|
||||
n := runtime.Callers(skip+1, callers[:])
|
||||
return Recovered{
|
||||
Value: value,
|
||||
Callers: callers[:n],
|
||||
Stack: debug.Stack(),
|
||||
}
|
||||
}
|
||||
|
||||
// Recovered is a panic that was caught with recover().
|
||||
type Recovered struct {
|
||||
// The original value of the panic.
|
||||
Value any
|
||||
// The caller list as returned by runtime.Callers when the panic was
|
||||
// recovered. Can be used to produce a more detailed stack information with
|
||||
// runtime.CallersFrames.
|
||||
Callers []uintptr
|
||||
// The formatted stacktrace from the goroutine where the panic was recovered.
|
||||
// Easier to use than Callers.
|
||||
Stack []byte
|
||||
}
|
||||
|
||||
// String renders a human-readable formatting of the panic.
|
||||
func (p *Recovered) String() string {
|
||||
return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack)
|
||||
}
|
||||
|
||||
// AsError casts the panic into an error implementation. The implementation
|
||||
// is unwrappable with the cause of the panic, if the panic was provided one.
|
||||
func (p *Recovered) AsError() error {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ErrRecovered{*p}
|
||||
}
|
||||
|
||||
// ErrRecovered wraps a panics.Recovered in an error implementation.
|
||||
type ErrRecovered struct{ Recovered }
|
||||
|
||||
var _ error = (*ErrRecovered)(nil)
|
||||
|
||||
func (p *ErrRecovered) Error() string { return p.String() }
|
||||
|
||||
func (p *ErrRecovered) Unwrap() error {
|
||||
if err, ok := p.Value.(error); ok {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
vendor/github.com/sourcegraph/conc/panics/try.go
generated
vendored
Normal file
11
vendor/github.com/sourcegraph/conc/panics/try.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package panics
|
||||
|
||||
// Try executes f, catching and returning any panic it might spawn.
|
||||
//
|
||||
// The recovered panic can be propagated with panic(), or handled as a normal error with
|
||||
// (*panics.Recovered).AsError().
|
||||
func Try(f func()) *Recovered {
|
||||
var c Catcher
|
||||
c.Try(f)
|
||||
return c.Recovered()
|
||||
}
|
||||
104
vendor/github.com/sourcegraph/conc/pool/context_pool.go
generated
vendored
Normal file
104
vendor/github.com/sourcegraph/conc/pool/context_pool.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// ContextPool is a pool that runs tasks that take a context.
|
||||
// A new ContextPool should be created with `New().WithContext(ctx)`.
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
type ContextPool struct {
|
||||
errorPool ErrorPool
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
cancelOnError bool
|
||||
}
|
||||
|
||||
// Go submits a task. If it returns an error, the error will be
|
||||
// collected and returned by Wait(). If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *ContextPool) Go(f func(ctx context.Context) error) {
|
||||
p.errorPool.Go(func() error {
|
||||
if p.cancelOnError {
|
||||
// If we are cancelling on error, then we also want to cancel if a
|
||||
// panic is raised. To do this, we need to recover, cancel, and then
|
||||
// re-throw the caught panic.
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
p.cancel()
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err := f(p.ctx)
|
||||
if err != nil && p.cancelOnError {
|
||||
// Leaky abstraction warning: We add the error directly because
|
||||
// otherwise, canceling could cause another goroutine to exit and
|
||||
// return an error before this error was added, which breaks the
|
||||
// expectations of WithFirstError().
|
||||
p.errorPool.addErr(err)
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait cleans up all spawned goroutines, propagates any panics, and
|
||||
// returns an error if any of the tasks errored.
|
||||
func (p *ContextPool) Wait() error {
|
||||
// Make sure we call cancel after pool is done to avoid memory leakage.
|
||||
defer p.cancel()
|
||||
return p.errorPool.Wait()
|
||||
}
|
||||
|
||||
// WithFirstError configures the pool to only return the first error
|
||||
// returned by a task. By default, Wait() will return a combined error.
|
||||
// This is particularly useful for (*ContextPool).WithCancelOnError(),
|
||||
// where all errors after the first are likely to be context.Canceled.
|
||||
func (p *ContextPool) WithFirstError() *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
p.errorPool.WithFirstError()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithCancelOnError configures the pool to cancel its context as soon as
|
||||
// any task returns an error or panics. By default, the pool's context is not
|
||||
// canceled until the parent context is canceled.
|
||||
//
|
||||
// In this case, all errors returned from the pool after the first will
|
||||
// likely be context.Canceled - you may want to also use
|
||||
// (*ContextPool).WithFirstError() to configure the pool to only return
|
||||
// the first error.
|
||||
func (p *ContextPool) WithCancelOnError() *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
p.cancelOnError = true
|
||||
return p
|
||||
}
|
||||
|
||||
// WithFailFast is an alias for the combination of WithFirstError and
|
||||
// WithCancelOnError. By default, the errors from all tasks are returned and
|
||||
// the pool's context is not canceled until the parent context is canceled.
|
||||
func (p *ContextPool) WithFailFast() *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
p.WithFirstError()
|
||||
p.WithCancelOnError()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
p.errorPool.WithMaxGoroutines(n)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ContextPool) panicIfInitialized() {
|
||||
p.errorPool.panicIfInitialized()
|
||||
}
|
||||
100
vendor/github.com/sourcegraph/conc/pool/error_pool.go
generated
vendored
Normal file
100
vendor/github.com/sourcegraph/conc/pool/error_pool.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrorPool is a pool that runs tasks that may return an error.
|
||||
// Errors are collected and returned by Wait().
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
//
|
||||
// A new ErrorPool should be created using `New().WithErrors()`.
|
||||
type ErrorPool struct {
|
||||
pool Pool
|
||||
|
||||
onlyFirstError bool
|
||||
|
||||
mu sync.Mutex
|
||||
errs []error
|
||||
}
|
||||
|
||||
// Go submits a task to the pool. If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *ErrorPool) Go(f func() error) {
|
||||
p.pool.Go(func() {
|
||||
p.addErr(f())
|
||||
})
|
||||
}
|
||||
|
||||
// Wait cleans up any spawned goroutines, propagating any panics and
|
||||
// returning any errors from tasks.
|
||||
func (p *ErrorPool) Wait() error {
|
||||
p.pool.Wait()
|
||||
|
||||
errs := p.errs
|
||||
p.errs = nil // reset errs
|
||||
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
} else if p.onlyFirstError {
|
||||
return errs[0]
|
||||
} else {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext converts the pool to a ContextPool for tasks that should
|
||||
// run under the same context, such that they each respect shared cancellation.
|
||||
// For example, WithCancelOnError can be configured on the returned pool to
|
||||
// signal that all goroutines should be cancelled upon the first error.
|
||||
func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &ContextPool{
|
||||
errorPool: p.deref(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// WithFirstError configures the pool to only return the first error
|
||||
// returned by a task. By default, Wait() will return a combined error.
|
||||
func (p *ErrorPool) WithFirstError() *ErrorPool {
|
||||
p.panicIfInitialized()
|
||||
p.onlyFirstError = true
|
||||
return p
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool {
|
||||
p.panicIfInitialized()
|
||||
p.pool.WithMaxGoroutines(n)
|
||||
return p
|
||||
}
|
||||
|
||||
// deref is a helper that creates a shallow copy of the pool with the same
|
||||
// settings. We don't want to just dereference the pointer because that makes
|
||||
// the copylock lint angry.
|
||||
func (p *ErrorPool) deref() ErrorPool {
|
||||
return ErrorPool{
|
||||
pool: p.pool.deref(),
|
||||
onlyFirstError: p.onlyFirstError,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ErrorPool) panicIfInitialized() {
|
||||
p.pool.panicIfInitialized()
|
||||
}
|
||||
|
||||
func (p *ErrorPool) addErr(err error) {
|
||||
if err != nil {
|
||||
p.mu.Lock()
|
||||
p.errs = append(p.errs, err)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
}
|
||||
174
vendor/github.com/sourcegraph/conc/pool/pool.go
generated
vendored
Normal file
174
vendor/github.com/sourcegraph/conc/pool/pool.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/sourcegraph/conc"
|
||||
)
|
||||
|
||||
// New creates a new Pool.
|
||||
func New() *Pool {
|
||||
return &Pool{}
|
||||
}
|
||||
|
||||
// Pool is a pool of goroutines used to execute tasks concurrently.
|
||||
//
|
||||
// Tasks are submitted with Go(). Once all your tasks have been submitted, you
|
||||
// must call Wait() to clean up any spawned goroutines and propagate any
|
||||
// panics.
|
||||
//
|
||||
// Goroutines are started lazily, so creating a new pool is cheap. There will
|
||||
// never be more goroutines spawned than there are tasks submitted.
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
//
|
||||
// Pool is efficient, but not zero cost. It should not be used for very short
|
||||
// tasks. Startup and teardown come with an overhead of around 1µs, and each
|
||||
// task has an overhead of around 300ns.
|
||||
type Pool struct {
|
||||
handle conc.WaitGroup
|
||||
limiter limiter
|
||||
tasks chan func()
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// Go submits a task to be run in the pool. If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *Pool) Go(f func()) {
|
||||
p.init()
|
||||
|
||||
if p.limiter == nil {
|
||||
// No limit on the number of goroutines.
|
||||
select {
|
||||
case p.tasks <- f:
|
||||
// A goroutine was available to handle the task.
|
||||
default:
|
||||
// No goroutine was available to handle the task.
|
||||
// Spawn a new one and send it the task.
|
||||
p.handle.Go(func() {
|
||||
p.worker(f)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case p.limiter <- struct{}{}:
|
||||
// If we are below our limit, spawn a new worker rather
|
||||
// than waiting for one to become available.
|
||||
p.handle.Go(func() {
|
||||
p.worker(f)
|
||||
})
|
||||
case p.tasks <- f:
|
||||
// A worker is available and has accepted the task.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Wait cleans up spawned goroutines, propagating any panics that were
|
||||
// raised by a tasks.
|
||||
func (p *Pool) Wait() {
|
||||
p.init()
|
||||
|
||||
close(p.tasks)
|
||||
|
||||
// After Wait() returns, reset the struct so tasks will be reinitialized on
|
||||
// next use. This better matches the behavior of sync.WaitGroup
|
||||
defer func() { p.initOnce = sync.Once{} }()
|
||||
|
||||
p.handle.Wait()
|
||||
}
|
||||
|
||||
// MaxGoroutines returns the maximum size of the pool.
|
||||
func (p *Pool) MaxGoroutines() int {
|
||||
return p.limiter.limit()
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *Pool) WithMaxGoroutines(n int) *Pool {
|
||||
p.panicIfInitialized()
|
||||
if n < 1 {
|
||||
panic("max goroutines in a pool must be greater than zero")
|
||||
}
|
||||
p.limiter = make(limiter, n)
|
||||
return p
|
||||
}
|
||||
|
||||
// init ensures that the pool is initialized before use. This makes the
|
||||
// zero value of the pool usable.
|
||||
func (p *Pool) init() {
|
||||
p.initOnce.Do(func() {
|
||||
p.tasks = make(chan func())
|
||||
})
|
||||
}
|
||||
|
||||
// panicIfInitialized will trigger a panic if a configuration method is called
|
||||
// after the pool has started any goroutines for the first time. In the case that
|
||||
// new settings are needed, a new pool should be created.
|
||||
func (p *Pool) panicIfInitialized() {
|
||||
if p.tasks != nil {
|
||||
panic("pool can not be reconfigured after calling Go() for the first time")
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrors converts the pool to an ErrorPool so the submitted tasks can
|
||||
// return errors.
|
||||
func (p *Pool) WithErrors() *ErrorPool {
|
||||
p.panicIfInitialized()
|
||||
return &ErrorPool{
|
||||
pool: p.deref(),
|
||||
}
|
||||
}
|
||||
|
||||
// deref is a helper that creates a shallow copy of the pool with the same
|
||||
// settings. We don't want to just dereference the pointer because that makes
|
||||
// the copylock lint angry.
|
||||
func (p *Pool) deref() Pool {
|
||||
p.panicIfInitialized()
|
||||
return Pool{
|
||||
limiter: p.limiter,
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext converts the pool to a ContextPool for tasks that should
|
||||
// run under the same context, such that they each respect shared cancellation.
|
||||
// For example, WithCancelOnError can be configured on the returned pool to
|
||||
// signal that all goroutines should be cancelled upon the first error.
|
||||
func (p *Pool) WithContext(ctx context.Context) *ContextPool {
|
||||
p.panicIfInitialized()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &ContextPool{
|
||||
errorPool: p.WithErrors().deref(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) worker(initialFunc func()) {
|
||||
// The only time this matters is if the task panics.
|
||||
// This makes it possible to spin up new workers in that case.
|
||||
defer p.limiter.release()
|
||||
|
||||
if initialFunc != nil {
|
||||
initialFunc()
|
||||
}
|
||||
|
||||
for f := range p.tasks {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
type limiter chan struct{}
|
||||
|
||||
func (l limiter) limit() int {
|
||||
return cap(l)
|
||||
}
|
||||
|
||||
func (l limiter) release() {
|
||||
if l != nil {
|
||||
<-l
|
||||
}
|
||||
}
|
||||
85
vendor/github.com/sourcegraph/conc/pool/result_context_pool.go
generated
vendored
Normal file
85
vendor/github.com/sourcegraph/conc/pool/result_context_pool.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// ResultContextPool is a pool that runs tasks that take a context and return a
|
||||
// result. The context passed to the task will be canceled if any of the tasks
|
||||
// return an error, which makes its functionality different than just capturing
|
||||
// a context with the task closure.
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
type ResultContextPool[T any] struct {
|
||||
contextPool ContextPool
|
||||
agg resultAggregator[T]
|
||||
collectErrored bool
|
||||
}
|
||||
|
||||
// Go submits a task to the pool. If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) {
|
||||
idx := p.agg.nextIndex()
|
||||
p.contextPool.Go(func(ctx context.Context) error {
|
||||
res, err := f(ctx)
|
||||
p.agg.save(idx, res, err != nil)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait cleans up all spawned goroutines, propagates any panics, and
|
||||
// returns an error if any of the tasks errored.
|
||||
func (p *ResultContextPool[T]) Wait() ([]T, error) {
|
||||
err := p.contextPool.Wait()
|
||||
results := p.agg.collect(p.collectErrored)
|
||||
p.agg = resultAggregator[T]{}
|
||||
return results, err
|
||||
}
|
||||
|
||||
// WithCollectErrored configures the pool to still collect the result of a task
|
||||
// even if the task returned an error. By default, the result of tasks that errored
|
||||
// are ignored and only the error is collected.
|
||||
func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.collectErrored = true
|
||||
return p
|
||||
}
|
||||
|
||||
// WithFirstError configures the pool to only return the first error
|
||||
// returned by a task. By default, Wait() will return a combined error.
|
||||
func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.contextPool.WithFirstError()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithCancelOnError configures the pool to cancel its context as soon as
|
||||
// any task returns an error. By default, the pool's context is not
|
||||
// canceled until the parent context is canceled.
|
||||
func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.contextPool.WithCancelOnError()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithFailFast is an alias for the combination of WithFirstError and
|
||||
// WithCancelOnError. By default, the errors from all tasks are returned and
|
||||
// the pool's context is not canceled until the parent context is canceled.
|
||||
func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.contextPool.WithFailFast()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.contextPool.WithMaxGoroutines(n)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ResultContextPool[T]) panicIfInitialized() {
|
||||
p.contextPool.panicIfInitialized()
|
||||
}
|
||||
80
vendor/github.com/sourcegraph/conc/pool/result_error_pool.go
generated
vendored
Normal file
80
vendor/github.com/sourcegraph/conc/pool/result_error_pool.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// ResultErrorPool is a pool that executes tasks that return a generic result
|
||||
// type and an error. Tasks are executed in the pool with Go(), then the
|
||||
// results of the tasks are returned by Wait().
|
||||
//
|
||||
// The order of the results is guaranteed to be the same as the order the
|
||||
// tasks were submitted.
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
type ResultErrorPool[T any] struct {
|
||||
errorPool ErrorPool
|
||||
agg resultAggregator[T]
|
||||
collectErrored bool
|
||||
}
|
||||
|
||||
// Go submits a task to the pool. If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *ResultErrorPool[T]) Go(f func() (T, error)) {
|
||||
idx := p.agg.nextIndex()
|
||||
p.errorPool.Go(func() error {
|
||||
res, err := f()
|
||||
p.agg.save(idx, res, err != nil)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait cleans up any spawned goroutines, propagating any panics and
|
||||
// returning the results and any errors from tasks.
|
||||
func (p *ResultErrorPool[T]) Wait() ([]T, error) {
|
||||
err := p.errorPool.Wait()
|
||||
results := p.agg.collect(p.collectErrored)
|
||||
p.agg = resultAggregator[T]{} // reset for reuse
|
||||
return results, err
|
||||
}
|
||||
|
||||
// WithCollectErrored configures the pool to still collect the result of a task
|
||||
// even if the task returned an error. By default, the result of tasks that errored
|
||||
// are ignored and only the error is collected.
|
||||
func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.collectErrored = true
|
||||
return p
|
||||
}
|
||||
|
||||
// WithContext converts the pool to a ResultContextPool for tasks that should
|
||||
// run under the same context, such that they each respect shared cancellation.
|
||||
// For example, WithCancelOnError can be configured on the returned pool to
|
||||
// signal that all goroutines should be cancelled upon the first error.
|
||||
func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
return &ResultContextPool[T]{
|
||||
contextPool: *p.errorPool.WithContext(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// WithFirstError configures the pool to only return the first error
|
||||
// returned by a task. By default, Wait() will return a combined error.
|
||||
func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.errorPool.WithFirstError()
|
||||
return p
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.errorPool.WithMaxGoroutines(n)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ResultErrorPool[T]) panicIfInitialized() {
|
||||
p.errorPool.panicIfInitialized()
|
||||
}
|
||||
142
vendor/github.com/sourcegraph/conc/pool/result_pool.go
generated
vendored
Normal file
142
vendor/github.com/sourcegraph/conc/pool/result_pool.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// NewWithResults creates a new ResultPool for tasks with a result of type T.
|
||||
//
|
||||
// The configuration methods (With*) will panic if they are used after calling
|
||||
// Go() for the first time.
|
||||
func NewWithResults[T any]() *ResultPool[T] {
|
||||
return &ResultPool[T]{
|
||||
pool: *New(),
|
||||
}
|
||||
}
|
||||
|
||||
// ResultPool is a pool that executes tasks that return a generic result type.
|
||||
// Tasks are executed in the pool with Go(), then the results of the tasks are
|
||||
// returned by Wait().
|
||||
//
|
||||
// The order of the results is guaranteed to be the same as the order the
|
||||
// tasks were submitted.
|
||||
type ResultPool[T any] struct {
|
||||
pool Pool
|
||||
agg resultAggregator[T]
|
||||
}
|
||||
|
||||
// Go submits a task to the pool. If all goroutines in the pool
|
||||
// are busy, a call to Go() will block until the task can be started.
|
||||
func (p *ResultPool[T]) Go(f func() T) {
|
||||
idx := p.agg.nextIndex()
|
||||
p.pool.Go(func() {
|
||||
p.agg.save(idx, f(), false)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait cleans up all spawned goroutines, propagating any panics, and returning
|
||||
// a slice of results from tasks that did not panic.
|
||||
func (p *ResultPool[T]) Wait() []T {
|
||||
p.pool.Wait()
|
||||
results := p.agg.collect(true)
|
||||
p.agg = resultAggregator[T]{} // reset for reuse
|
||||
return results
|
||||
}
|
||||
|
||||
// MaxGoroutines returns the maximum size of the pool.
|
||||
func (p *ResultPool[T]) MaxGoroutines() int {
|
||||
return p.pool.MaxGoroutines()
|
||||
}
|
||||
|
||||
// WithErrors converts the pool to an ResultErrorPool so the submitted tasks
|
||||
// can return errors.
|
||||
func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] {
|
||||
p.panicIfInitialized()
|
||||
return &ResultErrorPool[T]{
|
||||
errorPool: *p.pool.WithErrors(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext converts the pool to a ResultContextPool for tasks that should
|
||||
// run under the same context, such that they each respect shared cancellation.
|
||||
// For example, WithCancelOnError can be configured on the returned pool to
|
||||
// signal that all goroutines should be cancelled upon the first error.
|
||||
func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] {
|
||||
p.panicIfInitialized()
|
||||
return &ResultContextPool[T]{
|
||||
contextPool: *p.pool.WithContext(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxGoroutines limits the number of goroutines in a pool.
|
||||
// Defaults to unlimited. Panics if n < 1.
|
||||
func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] {
|
||||
p.panicIfInitialized()
|
||||
p.pool.WithMaxGoroutines(n)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ResultPool[T]) panicIfInitialized() {
|
||||
p.pool.panicIfInitialized()
|
||||
}
|
||||
|
||||
// resultAggregator is a utility type that lets us safely append from multiple
|
||||
// goroutines. The zero value is valid and ready to use.
|
||||
type resultAggregator[T any] struct {
|
||||
mu sync.Mutex
|
||||
len int
|
||||
results []T
|
||||
errored []int
|
||||
}
|
||||
|
||||
// nextIndex reserves a slot for a result. The returned value should be passed
|
||||
// to save() when adding a result to the aggregator.
|
||||
func (r *resultAggregator[T]) nextIndex() int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
nextIdx := r.len
|
||||
r.len += 1
|
||||
return nextIdx
|
||||
}
|
||||
|
||||
func (r *resultAggregator[T]) save(i int, res T, errored bool) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if i >= len(r.results) {
|
||||
old := r.results
|
||||
r.results = make([]T, r.len)
|
||||
copy(r.results, old)
|
||||
}
|
||||
|
||||
r.results[i] = res
|
||||
|
||||
if errored {
|
||||
r.errored = append(r.errored, i)
|
||||
}
|
||||
}
|
||||
|
||||
// collect returns the set of aggregated results.
|
||||
func (r *resultAggregator[T]) collect(collectErrored bool) []T {
|
||||
if !r.mu.TryLock() {
|
||||
panic("collect should not be called until all goroutines have exited")
|
||||
}
|
||||
|
||||
if collectErrored || len(r.errored) == 0 {
|
||||
return r.results
|
||||
}
|
||||
|
||||
filtered := r.results[:0]
|
||||
sort.Ints(r.errored)
|
||||
for i, e := range r.errored {
|
||||
if i == 0 {
|
||||
filtered = append(filtered, r.results[:e]...)
|
||||
} else {
|
||||
filtered = append(filtered, r.results[r.errored[i-1]+1:e]...)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
52
vendor/github.com/sourcegraph/conc/waitgroup.go
generated
vendored
Normal file
52
vendor/github.com/sourcegraph/conc/waitgroup.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package conc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/sourcegraph/conc/panics"
|
||||
)
|
||||
|
||||
// NewWaitGroup creates a new WaitGroup.
|
||||
func NewWaitGroup() *WaitGroup {
|
||||
return &WaitGroup{}
|
||||
}
|
||||
|
||||
// WaitGroup is the primary building block for scoped concurrency.
|
||||
// Goroutines can be spawned in the WaitGroup with the Go method,
|
||||
// and calling Wait() will ensure that each of those goroutines exits
|
||||
// before continuing. Any panics in a child goroutine will be caught
|
||||
// and propagated to the caller of Wait().
|
||||
//
|
||||
// The zero value of WaitGroup is usable, just like sync.WaitGroup.
|
||||
// Also like sync.WaitGroup, it must not be copied after first use.
|
||||
type WaitGroup struct {
|
||||
wg sync.WaitGroup
|
||||
pc panics.Catcher
|
||||
}
|
||||
|
||||
// Go spawns a new goroutine in the WaitGroup.
|
||||
func (h *WaitGroup) Go(f func()) {
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
h.pc.Try(f)
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait will block until all goroutines spawned with Go exit and will
|
||||
// propagate any panics spawned in a child goroutine.
|
||||
func (h *WaitGroup) Wait() {
|
||||
h.wg.Wait()
|
||||
|
||||
// Propagate a panic if we caught one from a child goroutine.
|
||||
h.pc.Repanic()
|
||||
}
|
||||
|
||||
// WaitAndRecover will block until all goroutines spawned with Go exit and
|
||||
// will return a *panics.Recovered if one of the child goroutines panics.
|
||||
func (h *WaitGroup) WaitAndRecover() *panics.Recovered {
|
||||
h.wg.Wait()
|
||||
|
||||
// Return a recovered panic if we caught one from a child goroutine.
|
||||
return h.pc.Recovered()
|
||||
}
|
||||
15
vendor/github.com/spf13/cast/.editorconfig
generated
vendored
Normal file
15
vendor/github.com/spf13/cast/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[{*.yml,*.yaml}]
|
||||
indent_size = 2
|
||||
25
vendor/github.com/spf13/cast/.gitignore
generated
vendored
Normal file
25
vendor/github.com/spf13/cast/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
|
||||
*.bench
|
||||
39
vendor/github.com/spf13/cast/.golangci.yaml
generated
vendored
Normal file
39
vendor/github.com/spf13/cast/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
version: "2"
|
||||
|
||||
run:
|
||||
timeout: 10m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
# - revive
|
||||
- unused
|
||||
|
||||
disable:
|
||||
- staticcheck
|
||||
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
# - gofumpt
|
||||
- goimports
|
||||
# - golines
|
||||
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
21
vendor/github.com/spf13/cast/LICENSE
generated
vendored
Normal file
21
vendor/github.com/spf13/cast/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Steve Francia
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
40
vendor/github.com/spf13/cast/Makefile
generated
vendored
Normal file
40
vendor/github.com/spf13/cast/Makefile
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2)
|
||||
|
||||
.PHONY: check fmt lint test test-race vet test-cover-html help
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
check: test-race fmt vet lint ## Run tests and linters
|
||||
|
||||
test: ## Run tests
|
||||
go test ./...
|
||||
|
||||
test-race: ## Run tests with race detector
|
||||
go test -race ./...
|
||||
|
||||
fmt: ## Run gofmt linter
|
||||
ifeq "$(GOVERSION)" "12"
|
||||
@for d in `go list` ; do \
|
||||
if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
|
||||
echo "^ improperly formatted go files" && echo && exit 1; \
|
||||
fi \
|
||||
done
|
||||
endif
|
||||
|
||||
lint: ## Run golint linter
|
||||
@for d in `go list` ; do \
|
||||
if [ "`golint $$d | tee /dev/stderr`" ]; then \
|
||||
echo "^ golint errors!" && echo && exit 1; \
|
||||
fi \
|
||||
done
|
||||
|
||||
vet: ## Run go vet linter
|
||||
@if [ "`go vet | tee /dev/stderr`" ]; then \
|
||||
echo "^ go vet errors!" && echo && exit 1; \
|
||||
fi
|
||||
|
||||
test-cover-html: ## Generate test coverage report
|
||||
go test -coverprofile=coverage.out -covermode=count
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
help:
|
||||
@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
79
vendor/github.com/spf13/cast/README.md
generated
vendored
Normal file
79
vendor/github.com/spf13/cast/README.md
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
# cast
|
||||
|
||||
[](https://github.com/spf13/cast/actions/workflows/ci.yaml)
|
||||
[](https://pkg.go.dev/mod/github.com/spf13/cast)
|
||||

|
||||
[](https://deps.dev/go/github.com%252Fspf13%252Fcast)
|
||||
|
||||
Easy and safe casting from one type to another in Go
|
||||
|
||||
Don’t Panic! ... Cast
|
||||
|
||||
## What is Cast?
|
||||
|
||||
Cast is a library to convert between different go types in a consistent and easy way.
|
||||
|
||||
Cast provides simple functions to easily convert a number to a string, an
|
||||
interface into a bool, etc. Cast does this intelligently when an obvious
|
||||
conversion is possible. It doesn’t make any attempts to guess what you meant,
|
||||
for example you can only convert a string to an int when it is a string
|
||||
representation of an int such as “8”. Cast was developed for use in
|
||||
[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON
|
||||
for meta data.
|
||||
|
||||
## Why use Cast?
|
||||
|
||||
When working with dynamic data in Go you often need to cast or convert the data
|
||||
from one type into another. Cast goes beyond just using type assertion (though
|
||||
it uses that when possible) to provide a very straightforward and convenient
|
||||
library.
|
||||
|
||||
If you are working with interfaces to handle things like dynamic content
|
||||
you’ll need an easy way to convert an interface into a given type. This
|
||||
is the library for you.
|
||||
|
||||
If you are taking in data from YAML, TOML or JSON or other formats which lack
|
||||
full types, then Cast is the library for you.
|
||||
|
||||
## Usage
|
||||
|
||||
Cast provides a handful of To_____ methods. These methods will always return
|
||||
the desired type. **If input is provided that will not convert to that type, the
|
||||
0 or nil value for that type will be returned**.
|
||||
|
||||
Cast also provides identical methods To_____E. These return the same result as
|
||||
the To_____ methods, plus an additional error which tells you if it successfully
|
||||
converted. Using these methods you can tell the difference between when the
|
||||
input matched the zero value or when the conversion failed and the zero value
|
||||
was returned.
|
||||
|
||||
The following examples are merely a sample of what is available. Please review
|
||||
the code for a complete set.
|
||||
|
||||
### Example ‘ToString’:
|
||||
|
||||
cast.ToString("mayonegg") // "mayonegg"
|
||||
cast.ToString(8) // "8"
|
||||
cast.ToString(8.31) // "8.31"
|
||||
cast.ToString([]byte("one time")) // "one time"
|
||||
cast.ToString(nil) // ""
|
||||
|
||||
var foo interface{} = "one more time"
|
||||
cast.ToString(foo) // "one more time"
|
||||
|
||||
|
||||
### Example ‘ToInt’:
|
||||
|
||||
cast.ToInt(8) // 8
|
||||
cast.ToInt(8.31) // 8
|
||||
cast.ToInt("8") // 8
|
||||
cast.ToInt(true) // 1
|
||||
cast.ToInt(false) // 0
|
||||
|
||||
var eight interface{} = 8
|
||||
cast.ToInt(eight) // 8
|
||||
cast.ToInt(nil) // 0
|
||||
|
||||
## License
|
||||
|
||||
The project is licensed under the [MIT License](LICENSE).
|
||||
69
vendor/github.com/spf13/cast/alias.go
generated
vendored
Normal file
69
vendor/github.com/spf13/cast/alias.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package cast
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
)
|
||||
|
||||
var kindNames = []string{
|
||||
reflect.String: "string",
|
||||
reflect.Bool: "bool",
|
||||
reflect.Int: "int",
|
||||
reflect.Int8: "int8",
|
||||
reflect.Int16: "int16",
|
||||
reflect.Int32: "int32",
|
||||
reflect.Int64: "int64",
|
||||
reflect.Uint: "uint",
|
||||
reflect.Uint8: "uint8",
|
||||
reflect.Uint16: "uint16",
|
||||
reflect.Uint32: "uint32",
|
||||
reflect.Uint64: "uint64",
|
||||
reflect.Float32: "float32",
|
||||
reflect.Float64: "float64",
|
||||
}
|
||||
|
||||
var kinds = map[reflect.Kind]func(reflect.Value) any{
|
||||
reflect.String: func(v reflect.Value) any { return v.String() },
|
||||
reflect.Bool: func(v reflect.Value) any { return v.Bool() },
|
||||
reflect.Int: func(v reflect.Value) any { return int(v.Int()) },
|
||||
reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) },
|
||||
reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) },
|
||||
reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) },
|
||||
reflect.Int64: func(v reflect.Value) any { return v.Int() },
|
||||
reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) },
|
||||
reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) },
|
||||
reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) },
|
||||
reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) },
|
||||
reflect.Uint64: func(v reflect.Value) any { return v.Uint() },
|
||||
reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) },
|
||||
reflect.Float64: func(v reflect.Value) any { return v.Float() },
|
||||
}
|
||||
|
||||
// resolveAlias attempts to resolve a named type to its underlying basic type (if possible).
|
||||
//
|
||||
// Pointers are expected to be indirected by this point.
|
||||
func resolveAlias(i any) (any, bool) {
|
||||
if i == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(i)
|
||||
|
||||
// Not a named type
|
||||
if t.Name() == "" || slices.Contains(kindNames, t.Name()) {
|
||||
return i, false
|
||||
}
|
||||
|
||||
resolve, ok := kinds[t.Kind()]
|
||||
if !ok { // Not a supported kind
|
||||
return i, false
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
return resolve(v), true
|
||||
}
|
||||
131
vendor/github.com/spf13/cast/basic.go
generated
vendored
Normal file
131
vendor/github.com/spf13/cast/basic.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ToBoolE casts any value to a bool type.
|
||||
func ToBoolE(i any) (bool, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch b := i.(type) {
|
||||
case bool:
|
||||
return b, nil
|
||||
case nil:
|
||||
return false, nil
|
||||
case int:
|
||||
return b != 0, nil
|
||||
case int8:
|
||||
return b != 0, nil
|
||||
case int16:
|
||||
return b != 0, nil
|
||||
case int32:
|
||||
return b != 0, nil
|
||||
case int64:
|
||||
return b != 0, nil
|
||||
case uint:
|
||||
return b != 0, nil
|
||||
case uint8:
|
||||
return b != 0, nil
|
||||
case uint16:
|
||||
return b != 0, nil
|
||||
case uint32:
|
||||
return b != 0, nil
|
||||
case uint64:
|
||||
return b != 0, nil
|
||||
case float32:
|
||||
return b != 0, nil
|
||||
case float64:
|
||||
return b != 0, nil
|
||||
case time.Duration:
|
||||
return b != 0, nil
|
||||
case string:
|
||||
return strconv.ParseBool(b)
|
||||
case json.Number:
|
||||
v, err := ToInt64E(b)
|
||||
if err == nil {
|
||||
return v != 0, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(errorMsg, i, i, false)
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToBoolE(i)
|
||||
}
|
||||
|
||||
return false, fmt.Errorf(errorMsg, i, i, false)
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringE casts any value to a string type.
|
||||
func ToStringE(i any) (string, error) {
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
return s, nil
|
||||
case bool:
|
||||
return strconv.FormatBool(s), nil
|
||||
case float64:
|
||||
return strconv.FormatFloat(s, 'f', -1, 64), nil
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
|
||||
case int:
|
||||
return strconv.Itoa(s), nil
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(s), 10), nil
|
||||
case int64:
|
||||
return strconv.FormatInt(s, 10), nil
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(s), 10), nil
|
||||
case uint64:
|
||||
return strconv.FormatUint(s, 10), nil
|
||||
case json.Number:
|
||||
return s.String(), nil
|
||||
case []byte:
|
||||
return string(s), nil
|
||||
case template.HTML:
|
||||
return string(s), nil
|
||||
case template.URL:
|
||||
return string(s), nil
|
||||
case template.JS:
|
||||
return string(s), nil
|
||||
case template.CSS:
|
||||
return string(s), nil
|
||||
case template.HTMLAttr:
|
||||
return string(s), nil
|
||||
case nil:
|
||||
return "", nil
|
||||
case fmt.Stringer:
|
||||
return s.String(), nil
|
||||
case error:
|
||||
return s.Error(), nil
|
||||
default:
|
||||
if i, ok := indirect(i); ok {
|
||||
return ToStringE(i)
|
||||
}
|
||||
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToStringE(i)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf(errorMsg, i, i, "")
|
||||
}
|
||||
}
|
||||
84
vendor/github.com/spf13/cast/cast.go
generated
vendored
Normal file
84
vendor/github.com/spf13/cast/cast.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cast provides easy and safe casting in Go.
|
||||
package cast
|
||||
|
||||
import "time"
|
||||
|
||||
const errorMsg = "unable to cast %#v of type %T to %T"
|
||||
const errorMsgWith = "unable to cast %#v of type %T to %T: %w"
|
||||
|
||||
// Basic is a type parameter constraint for functions accepting basic types.
|
||||
//
|
||||
// It represents the supported basic types this package can cast to.
|
||||
type Basic interface {
|
||||
string | bool | Number | time.Time | time.Duration
|
||||
}
|
||||
|
||||
// ToE casts any value to a [Basic] type.
|
||||
func ToE[T Basic](i any) (T, error) {
|
||||
var t T
|
||||
|
||||
var v any
|
||||
var err error
|
||||
|
||||
switch any(t).(type) {
|
||||
case string:
|
||||
v, err = ToStringE(i)
|
||||
case bool:
|
||||
v, err = ToBoolE(i)
|
||||
case int:
|
||||
v, err = toNumberE[int](i, parseInt[int])
|
||||
case int8:
|
||||
v, err = toNumberE[int8](i, parseInt[int8])
|
||||
case int16:
|
||||
v, err = toNumberE[int16](i, parseInt[int16])
|
||||
case int32:
|
||||
v, err = toNumberE[int32](i, parseInt[int32])
|
||||
case int64:
|
||||
v, err = toNumberE[int64](i, parseInt[int64])
|
||||
case uint:
|
||||
v, err = toUnsignedNumberE[uint](i, parseUint[uint])
|
||||
case uint8:
|
||||
v, err = toUnsignedNumberE[uint8](i, parseUint[uint8])
|
||||
case uint16:
|
||||
v, err = toUnsignedNumberE[uint16](i, parseUint[uint16])
|
||||
case uint32:
|
||||
v, err = toUnsignedNumberE[uint32](i, parseUint[uint32])
|
||||
case uint64:
|
||||
v, err = toUnsignedNumberE[uint64](i, parseUint[uint64])
|
||||
case float32:
|
||||
v, err = toNumberE[float32](i, parseFloat[float32])
|
||||
case float64:
|
||||
v, err = toNumberE[float64](i, parseFloat[float64])
|
||||
case time.Time:
|
||||
v, err = ToTimeE(i)
|
||||
case time.Duration:
|
||||
v, err = ToDurationE(i)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
return v.(T), nil
|
||||
}
|
||||
|
||||
// Must is a helper that wraps a call to a cast function and panics if the error is non-nil.
|
||||
func Must[T any](i any, err error) T {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return i.(T)
|
||||
}
|
||||
|
||||
// To casts any value to a [Basic] type.
|
||||
func To[T Basic](i any) T {
|
||||
v, _ := ToE[T](i)
|
||||
|
||||
return v
|
||||
}
|
||||
37
vendor/github.com/spf13/cast/indirect.go
generated
vendored
Normal file
37
vendor/github.com/spf13/cast/indirect.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// From html/template/content.go
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// indirect returns the value, after dereferencing as many times
|
||||
// as necessary to reach the base type (or nil).
|
||||
func indirect(i any) (any, bool) {
|
||||
if i == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr {
|
||||
// Avoid creating a reflect.Value if it's not a pointer.
|
||||
return i, false
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) {
|
||||
if v.IsNil() {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return v.Interface(), true
|
||||
}
|
||||
79
vendor/github.com/spf13/cast/internal/time.go
generated
vendored
Normal file
79
vendor/github.com/spf13/cast/internal/time.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=TimeFormatType
|
||||
|
||||
type TimeFormatType int
|
||||
|
||||
const (
|
||||
TimeFormatNoTimezone TimeFormatType = iota
|
||||
TimeFormatNamedTimezone
|
||||
TimeFormatNumericTimezone
|
||||
TimeFormatNumericAndNamedTimezone
|
||||
TimeFormatTimeOnly
|
||||
)
|
||||
|
||||
type TimeFormat struct {
|
||||
Format string
|
||||
Typ TimeFormatType
|
||||
}
|
||||
|
||||
func (f TimeFormat) HasTimezone() bool {
|
||||
// We don't include the formats with only named timezones, see
|
||||
// https://github.com/golang/go/issues/19694#issuecomment-289103522
|
||||
return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone
|
||||
}
|
||||
|
||||
var TimeFormats = []TimeFormat{
|
||||
// Keep common formats at the top.
|
||||
{"2006-01-02", TimeFormatNoTimezone},
|
||||
{time.RFC3339, TimeFormatNumericTimezone},
|
||||
{"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone
|
||||
{time.RFC1123Z, TimeFormatNumericTimezone},
|
||||
{time.RFC1123, TimeFormatNamedTimezone},
|
||||
{time.RFC822Z, TimeFormatNumericTimezone},
|
||||
{time.RFC822, TimeFormatNamedTimezone},
|
||||
{time.RFC850, TimeFormatNamedTimezone},
|
||||
{"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String()
|
||||
{"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
|
||||
{"2006-01-02 15:04:05", TimeFormatNoTimezone},
|
||||
{time.ANSIC, TimeFormatNoTimezone},
|
||||
{time.UnixDate, TimeFormatNamedTimezone},
|
||||
{time.RubyDate, TimeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone},
|
||||
{"02 Jan 2006", TimeFormatNoTimezone},
|
||||
{"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone},
|
||||
{"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone},
|
||||
{time.Kitchen, TimeFormatTimeOnly},
|
||||
{time.Stamp, TimeFormatTimeOnly},
|
||||
{time.StampMilli, TimeFormatTimeOnly},
|
||||
{time.StampMicro, TimeFormatTimeOnly},
|
||||
{time.StampNano, TimeFormatTimeOnly},
|
||||
}
|
||||
|
||||
func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) {
|
||||
for _, format := range formats {
|
||||
if d, e = time.Parse(format.Format, s); e == nil {
|
||||
|
||||
// Some time formats have a zone name, but no offset, so it gets
|
||||
// put in that zone name (not the default one passed in to us), but
|
||||
// without that zone's offset. So set the location manually.
|
||||
if format.Typ <= TimeFormatNamedTimezone {
|
||||
if location == nil {
|
||||
location = time.Local
|
||||
}
|
||||
year, month, day := d.Date()
|
||||
hour, min, sec := d.Clock()
|
||||
d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
return d, fmt.Errorf("unable to parse date: %s", s)
|
||||
}
|
||||
27
vendor/github.com/spf13/cast/internal/timeformattype_string.go
generated
vendored
Normal file
27
vendor/github.com/spf13/cast/internal/timeformattype_string.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT.
|
||||
|
||||
package internal
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[TimeFormatNoTimezone-0]
|
||||
_ = x[TimeFormatNamedTimezone-1]
|
||||
_ = x[TimeFormatNumericTimezone-2]
|
||||
_ = x[TimeFormatNumericAndNamedTimezone-3]
|
||||
_ = x[TimeFormatTimeOnly-4]
|
||||
}
|
||||
|
||||
const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly"
|
||||
|
||||
var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119}
|
||||
|
||||
func (i TimeFormatType) String() string {
|
||||
if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) {
|
||||
return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]]
|
||||
}
|
||||
212
vendor/github.com/spf13/cast/map.go
generated
vendored
Normal file
212
vendor/github.com/spf13/cast/map.go
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) {
|
||||
m := map[K]V{}
|
||||
|
||||
if i == nil {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[K]V:
|
||||
return v, nil
|
||||
|
||||
case map[K]any:
|
||||
for k, val := range v {
|
||||
m[k] = valFn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]V:
|
||||
for k, val := range v {
|
||||
m[keyFn(k)] = val
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
m[keyFn(k)] = valFn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
return m, err
|
||||
|
||||
default:
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
}
|
||||
|
||||
func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) {
|
||||
return toMapE(i, ToString, fn)
|
||||
}
|
||||
|
||||
// ToStringMapStringE casts any value to a map[string]string type.
|
||||
func ToStringMapStringE(i any) (map[string]string, error) {
|
||||
return toStringMapE(i, ToString)
|
||||
}
|
||||
|
||||
// ToStringMapStringSliceE casts any value to a map[string][]string type.
|
||||
func ToStringMapStringSliceE(i any) (map[string][]string, error) {
|
||||
m := map[string][]string{}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[string][]string:
|
||||
return v, nil
|
||||
case map[string][]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[string]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = []string{val}
|
||||
}
|
||||
case map[string]any:
|
||||
for k, val := range v {
|
||||
switch vt := val.(type) {
|
||||
case []any:
|
||||
m[ToString(k)] = ToStringSlice(vt)
|
||||
case []string:
|
||||
m[ToString(k)] = vt
|
||||
default:
|
||||
m[ToString(k)] = []string{ToString(val)}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
case map[any][]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any]string:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any][]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = ToStringSlice(val)
|
||||
}
|
||||
return m, nil
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
key, err := ToStringE(k)
|
||||
if err != nil {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
value, err := ToStringSliceE(val)
|
||||
if err != nil {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
m[key] = value
|
||||
}
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
return m, err
|
||||
default:
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ToStringMapBoolE casts any value to a map[string]bool type.
|
||||
func ToStringMapBoolE(i any) (map[string]bool, error) {
|
||||
return toStringMapE(i, ToBool)
|
||||
}
|
||||
|
||||
// ToStringMapE casts any value to a map[string]any type.
|
||||
func ToStringMapE(i any) (map[string]any, error) {
|
||||
fn := func(i any) any { return i }
|
||||
|
||||
return toStringMapE(i, fn)
|
||||
}
|
||||
|
||||
func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) {
|
||||
m := map[string]T{}
|
||||
|
||||
if i == nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case map[string]T:
|
||||
return v, nil
|
||||
|
||||
case map[string]any:
|
||||
for k, val := range v {
|
||||
m[k] = fn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]T:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = val
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case map[any]any:
|
||||
for k, val := range v {
|
||||
m[ToString(k)] = fn(val)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
||||
case string:
|
||||
err := jsonStringToObject(v, &m)
|
||||
return m, err
|
||||
}
|
||||
|
||||
if reflect.TypeOf(i).Kind() != reflect.Map {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
mVal := reflect.ValueOf(m)
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
for _, keyVal := range v.MapKeys() {
|
||||
val, err := fnE(v.MapIndex(keyVal).Interface())
|
||||
if err != nil {
|
||||
return m, fmt.Errorf(errorMsg, i, i, m)
|
||||
}
|
||||
|
||||
mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ToStringMapIntE casts any value to a map[string]int type.
|
||||
func ToStringMapIntE(i any) (map[string]int, error) {
|
||||
return toStringMapIntE(i, ToInt, ToIntE)
|
||||
}
|
||||
|
||||
// ToStringMapInt64E casts any value to a map[string]int64 type.
|
||||
func ToStringMapInt64E(i any) (map[string]int64, error) {
|
||||
return toStringMapIntE(i, ToInt64, ToInt64E)
|
||||
}
|
||||
|
||||
// jsonStringToObject attempts to unmarshall a string as JSON into
|
||||
// the object passed as pointer.
|
||||
func jsonStringToObject(s string, v any) error {
|
||||
data := []byte(s)
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
549
vendor/github.com/spf13/cast/number.go
generated
vendored
Normal file
549
vendor/github.com/spf13/cast/number.go
generated
vendored
Normal file
@@ -0,0 +1,549 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNegativeNotAllowed = errors.New("unable to cast negative value")
|
||||
|
||||
type float64EProvider interface {
|
||||
Float64() (float64, error)
|
||||
}
|
||||
|
||||
type float64Provider interface {
|
||||
Float64() float64
|
||||
}
|
||||
|
||||
// Number is a type parameter constraint for functions accepting number types.
|
||||
//
|
||||
// It represents the supported number types this package can cast to.
|
||||
type Number interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64
|
||||
}
|
||||
|
||||
type integer interface {
|
||||
int | int8 | int16 | int32 | int64
|
||||
}
|
||||
|
||||
type unsigned interface {
|
||||
uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
||||
|
||||
type float interface {
|
||||
float32 | float64
|
||||
}
|
||||
|
||||
// ToNumberE casts any value to a [Number] type.
|
||||
func ToNumberE[T Number](i any) (T, error) {
|
||||
var t T
|
||||
|
||||
switch any(t).(type) {
|
||||
case int:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int8:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int16:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int32:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case int64:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case uint:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint8:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint16:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint32:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case uint64:
|
||||
return toUnsignedNumberE[T](i, parseNumber[T])
|
||||
case float32:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
case float64:
|
||||
return toNumberE[T](i, parseNumber[T])
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown number type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
// ToNumber casts any value to a [Number] type.
|
||||
func ToNumber[T Number](i any) T {
|
||||
v, _ := ToNumberE[T](i)
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// toNumber's semantics differ from other "to" functions.
|
||||
// It returns false as the second parameter if the conversion fails.
|
||||
// This is to signal other callers that they should proceed with their own conversions.
|
||||
func toNumber[T Number](i any) (T, bool) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case T:
|
||||
return s, true
|
||||
case int:
|
||||
return T(s), true
|
||||
case int8:
|
||||
return T(s), true
|
||||
case int16:
|
||||
return T(s), true
|
||||
case int32:
|
||||
return T(s), true
|
||||
case int64:
|
||||
return T(s), true
|
||||
case uint:
|
||||
return T(s), true
|
||||
case uint8:
|
||||
return T(s), true
|
||||
case uint16:
|
||||
return T(s), true
|
||||
case uint32:
|
||||
return T(s), true
|
||||
case uint64:
|
||||
return T(s), true
|
||||
case float32:
|
||||
return T(s), true
|
||||
case float64:
|
||||
return T(s), true
|
||||
case bool:
|
||||
if s {
|
||||
return 1, true
|
||||
}
|
||||
|
||||
return 0, true
|
||||
case nil:
|
||||
return 0, true
|
||||
case time.Weekday:
|
||||
return T(s), true
|
||||
case time.Month:
|
||||
return T(s), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) {
|
||||
n, ok := toNumber[T](i)
|
||||
if ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case json.Number:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(string(s))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case float64EProvider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v, err := s.Float64()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
case float64Provider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
return T(s.Float64()), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return toNumberE(i, parseFn)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
}
|
||||
|
||||
func toUnsignedNumber[T Number](i any) (T, bool, bool) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case T:
|
||||
return s, true, true
|
||||
case int:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int8:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int16:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int32:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case int64:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case uint:
|
||||
return T(s), true, true
|
||||
case uint8:
|
||||
return T(s), true, true
|
||||
case uint16:
|
||||
return T(s), true, true
|
||||
case uint32:
|
||||
return T(s), true, true
|
||||
case uint64:
|
||||
return T(s), true, true
|
||||
case float32:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case float64:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case bool:
|
||||
if s {
|
||||
return 1, true, true
|
||||
}
|
||||
|
||||
return 0, true, true
|
||||
case nil:
|
||||
return 0, true, true
|
||||
case time.Weekday:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
case time.Month:
|
||||
if s < 0 {
|
||||
return 0, false, false
|
||||
}
|
||||
|
||||
return T(s), true, true
|
||||
}
|
||||
|
||||
return 0, true, false
|
||||
}
|
||||
|
||||
func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) {
|
||||
n, valid, ok := toUnsignedNumber[T](i)
|
||||
if ok {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
i, _ = indirect(i)
|
||||
|
||||
if !valid {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
switch s := i.(type) {
|
||||
case string:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case json.Number:
|
||||
if s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
v, err := parseFn(string(s))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsgWith, i, i, n, err)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
case float64EProvider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v, err := s.Float64()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
case float64Provider:
|
||||
if _, ok := any(n).(float64); !ok {
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
|
||||
v := s.Float64()
|
||||
|
||||
if v < 0 {
|
||||
return 0, errNegativeNotAllowed
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return toUnsignedNumberE(i, parseFn)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, n)
|
||||
}
|
||||
}
|
||||
|
||||
func parseNumber[T Number](s string) (T, error) {
|
||||
var t T
|
||||
|
||||
switch any(t).(type) {
|
||||
case int:
|
||||
v, err := parseInt[int](s)
|
||||
|
||||
return T(v), err
|
||||
case int8:
|
||||
v, err := parseInt[int8](s)
|
||||
|
||||
return T(v), err
|
||||
case int16:
|
||||
v, err := parseInt[int16](s)
|
||||
|
||||
return T(v), err
|
||||
case int32:
|
||||
v, err := parseInt[int32](s)
|
||||
|
||||
return T(v), err
|
||||
case int64:
|
||||
v, err := parseInt[int64](s)
|
||||
|
||||
return T(v), err
|
||||
case uint:
|
||||
v, err := parseUint[uint](s)
|
||||
|
||||
return T(v), err
|
||||
case uint8:
|
||||
v, err := parseUint[uint8](s)
|
||||
|
||||
return T(v), err
|
||||
case uint16:
|
||||
v, err := parseUint[uint16](s)
|
||||
|
||||
return T(v), err
|
||||
case uint32:
|
||||
v, err := parseUint[uint32](s)
|
||||
|
||||
return T(v), err
|
||||
case uint64:
|
||||
v, err := parseUint[uint64](s)
|
||||
|
||||
return T(v), err
|
||||
case float32:
|
||||
v, err := strconv.ParseFloat(s, 32)
|
||||
|
||||
return T(v), err
|
||||
case float64:
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
|
||||
return T(v), err
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown number type: %T", t)
|
||||
}
|
||||
}
|
||||
|
||||
func parseInt[T integer](s string) (T, error) {
|
||||
v, err := strconv.ParseInt(trimDecimal(s), 0, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
}
|
||||
|
||||
func parseUint[T unsigned](s string) (T, error) {
|
||||
v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return T(v), nil
|
||||
}
|
||||
|
||||
func parseFloat[T float](s string) (T, error) {
|
||||
var t T
|
||||
|
||||
var v any
|
||||
var err error
|
||||
|
||||
switch any(t).(type) {
|
||||
case float32:
|
||||
n, e := strconv.ParseFloat(s, 32)
|
||||
|
||||
v = float32(n)
|
||||
err = e
|
||||
case float64:
|
||||
n, e := strconv.ParseFloat(s, 64)
|
||||
|
||||
v = float64(n)
|
||||
err = e
|
||||
}
|
||||
|
||||
return v.(T), err
|
||||
}
|
||||
|
||||
// ToFloat64E casts an interface to a float64 type.
|
||||
func ToFloat64E(i any) (float64, error) {
|
||||
return toNumberE[float64](i, parseFloat[float64])
|
||||
}
|
||||
|
||||
// ToFloat32E casts an interface to a float32 type.
|
||||
func ToFloat32E(i any) (float32, error) {
|
||||
return toNumberE[float32](i, parseFloat[float32])
|
||||
}
|
||||
|
||||
// ToInt64E casts an interface to an int64 type.
|
||||
func ToInt64E(i any) (int64, error) {
|
||||
return toNumberE[int64](i, parseInt[int64])
|
||||
}
|
||||
|
||||
// ToInt32E casts an interface to an int32 type.
|
||||
func ToInt32E(i any) (int32, error) {
|
||||
return toNumberE[int32](i, parseInt[int32])
|
||||
}
|
||||
|
||||
// ToInt16E casts an interface to an int16 type.
|
||||
func ToInt16E(i any) (int16, error) {
|
||||
return toNumberE[int16](i, parseInt[int16])
|
||||
}
|
||||
|
||||
// ToInt8E casts an interface to an int8 type.
|
||||
func ToInt8E(i any) (int8, error) {
|
||||
return toNumberE[int8](i, parseInt[int8])
|
||||
}
|
||||
|
||||
// ToIntE casts an interface to an int type.
|
||||
func ToIntE(i any) (int, error) {
|
||||
return toNumberE[int](i, parseInt[int])
|
||||
}
|
||||
|
||||
// ToUintE casts an interface to a uint type.
|
||||
func ToUintE(i any) (uint, error) {
|
||||
return toUnsignedNumberE[uint](i, parseUint[uint])
|
||||
}
|
||||
|
||||
// ToUint64E casts an interface to a uint64 type.
|
||||
func ToUint64E(i any) (uint64, error) {
|
||||
return toUnsignedNumberE[uint64](i, parseUint[uint64])
|
||||
}
|
||||
|
||||
// ToUint32E casts an interface to a uint32 type.
|
||||
func ToUint32E(i any) (uint32, error) {
|
||||
return toUnsignedNumberE[uint32](i, parseUint[uint32])
|
||||
}
|
||||
|
||||
// ToUint16E casts an interface to a uint16 type.
|
||||
func ToUint16E(i any) (uint16, error) {
|
||||
return toUnsignedNumberE[uint16](i, parseUint[uint16])
|
||||
}
|
||||
|
||||
// ToUint8E casts an interface to a uint type.
|
||||
func ToUint8E(i any) (uint8, error) {
|
||||
return toUnsignedNumberE[uint8](i, parseUint[uint8])
|
||||
}
|
||||
|
||||
func trimZeroDecimal(s string) string {
|
||||
var foundZero bool
|
||||
for i := len(s); i > 0; i-- {
|
||||
switch s[i-1] {
|
||||
case '.':
|
||||
if foundZero {
|
||||
return s[:i-1]
|
||||
}
|
||||
case '0':
|
||||
foundZero = true
|
||||
default:
|
||||
return s
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`)
|
||||
|
||||
// see [BenchmarkDecimal] for details about the implementation
|
||||
func trimDecimal(s string) string {
|
||||
if !strings.Contains(s, ".") {
|
||||
return s
|
||||
}
|
||||
|
||||
matches := stringNumberRe.FindStringSubmatch(s)
|
||||
if matches != nil {
|
||||
// matches[1] is the captured integer part with sign
|
||||
s = matches[1]
|
||||
|
||||
// handle special cases
|
||||
switch s {
|
||||
case "-", "+":
|
||||
s += "0"
|
||||
case "":
|
||||
s = "0"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
106
vendor/github.com/spf13/cast/slice.go
generated
vendored
Normal file
106
vendor/github.com/spf13/cast/slice.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ToSliceE casts any value to a []any type.
|
||||
func ToSliceE(i any) ([]any, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
var s []any
|
||||
|
||||
switch v := i.(type) {
|
||||
case []any:
|
||||
// TODO: use slices.Clone
|
||||
return append(s, v...), nil
|
||||
case []map[string]any:
|
||||
for _, u := range v {
|
||||
s = append(s, u)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
default:
|
||||
return s, fmt.Errorf(errorMsg, i, i, s)
|
||||
}
|
||||
}
|
||||
|
||||
func toSliceE[T Basic](i any) ([]T, error) {
|
||||
v, ok, err := toSliceEOk[T](i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func toSliceEOk[T Basic](i any) ([]T, bool, error) {
|
||||
i, _ = indirect(i)
|
||||
if i == nil {
|
||||
return nil, true, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
switch v := i.(type) {
|
||||
case []T:
|
||||
// TODO: clone slice
|
||||
return v, true, nil
|
||||
}
|
||||
|
||||
kind := reflect.TypeOf(i).Kind()
|
||||
switch kind {
|
||||
case reflect.Slice, reflect.Array:
|
||||
s := reflect.ValueOf(i)
|
||||
a := make([]T, s.Len())
|
||||
|
||||
for j := 0; j < s.Len(); j++ {
|
||||
val, err := ToE[T](s.Index(j).Interface())
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf(errorMsg, i, i, []T{})
|
||||
}
|
||||
|
||||
a[j] = val
|
||||
}
|
||||
|
||||
return a, true, nil
|
||||
default:
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ToStringSliceE casts any value to a []string type.
|
||||
func ToStringSliceE(i any) ([]string, error) {
|
||||
if a, ok, err := toSliceEOk[string](i); ok {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var a []string
|
||||
|
||||
switch v := i.(type) {
|
||||
case string:
|
||||
return strings.Fields(v), nil
|
||||
case any:
|
||||
str, err := ToStringE(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(errorMsg, i, i, a)
|
||||
}
|
||||
|
||||
return []string{str}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(errorMsg, i, i, a)
|
||||
}
|
||||
}
|
||||
116
vendor/github.com/spf13/cast/time.go
generated
vendored
Normal file
116
vendor/github.com/spf13/cast/time.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright © 2014 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cast
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cast/internal"
|
||||
)
|
||||
|
||||
// ToTimeE any value to a [time.Time] type.
|
||||
func ToTimeE(i any) (time.Time, error) {
|
||||
return ToTimeInDefaultLocationE(i, time.UTC)
|
||||
}
|
||||
|
||||
// ToTimeInDefaultLocationE casts an empty interface to [time.Time],
|
||||
// interpreting inputs without a timezone to be in the given location,
|
||||
// or the local timezone if nil.
|
||||
func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch v := i.(type) {
|
||||
case time.Time:
|
||||
return v, nil
|
||||
case string:
|
||||
return StringToDateInDefaultLocation(v, location)
|
||||
case json.Number:
|
||||
// Originally this used ToInt64E, but adding string float conversion broke ToTime.
|
||||
// the behavior of ToTime would have changed if we continued using it.
|
||||
// For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility.
|
||||
v = json.Number(trimZeroDecimal(string(v)))
|
||||
s, err1 := v.Int64()
|
||||
if err1 != nil {
|
||||
return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{})
|
||||
}
|
||||
return time.Unix(s, 0), nil
|
||||
case int:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case int32:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case int64:
|
||||
return time.Unix(v, 0), nil
|
||||
case uint:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case uint32:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case uint64:
|
||||
return time.Unix(int64(v), 0), nil
|
||||
case nil:
|
||||
return time.Time{}, nil
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
// ToDurationE casts any value to a [time.Duration] type.
|
||||
func ToDurationE(i any) (time.Duration, error) {
|
||||
i, _ = indirect(i)
|
||||
|
||||
switch s := i.(type) {
|
||||
case time.Duration:
|
||||
return s, nil
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
v, err := ToInt64E(s)
|
||||
if err != nil {
|
||||
// TODO: once there is better error handling, this should be easier
|
||||
return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration"))
|
||||
}
|
||||
|
||||
return time.Duration(v), nil
|
||||
case float32, float64, float64EProvider, float64Provider:
|
||||
v, err := ToFloat64E(s)
|
||||
if err != nil {
|
||||
// TODO: once there is better error handling, this should be easier
|
||||
return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration"))
|
||||
}
|
||||
|
||||
return time.Duration(v), nil
|
||||
case string:
|
||||
if !strings.ContainsAny(s, "nsuµmh") {
|
||||
return time.ParseDuration(s + "ns")
|
||||
}
|
||||
|
||||
return time.ParseDuration(s)
|
||||
case nil:
|
||||
return time.Duration(0), nil
|
||||
default:
|
||||
if i, ok := resolveAlias(i); ok {
|
||||
return ToDurationE(i)
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0))
|
||||
}
|
||||
}
|
||||
|
||||
// StringToDate attempts to parse a string into a [time.Time] type using a
|
||||
// predefined list of formats.
|
||||
//
|
||||
// If no suitable format is found, an error is returned.
|
||||
func StringToDate(s string) (time.Time, error) {
|
||||
return internal.ParseDateWith(s, time.UTC, internal.TimeFormats)
|
||||
}
|
||||
|
||||
// StringToDateInDefaultLocation casts an empty interface to a [time.Time],
|
||||
// interpreting inputs without a timezone to be in the given location,
|
||||
// or the local timezone if nil.
|
||||
func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) {
|
||||
return internal.ParseDateWith(s, location, internal.TimeFormats)
|
||||
}
|
||||
261
vendor/github.com/spf13/cast/zz_generated.go
generated
vendored
Normal file
261
vendor/github.com/spf13/cast/zz_generated.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Code generated by cast generator. DO NOT EDIT.
|
||||
|
||||
package cast
|
||||
|
||||
import "time"
|
||||
|
||||
// ToBool casts any value to a(n) bool type.
|
||||
func ToBool(i any) bool {
|
||||
v, _ := ToBoolE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToString casts any value to a(n) string type.
|
||||
func ToString(i any) string {
|
||||
v, _ := ToStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToTime casts any value to a(n) time.Time type.
|
||||
func ToTime(i any) time.Time {
|
||||
v, _ := ToTimeE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToTimeInDefaultLocation casts any value to a(n) time.Time type.
|
||||
func ToTimeInDefaultLocation(i any, location *time.Location) time.Time {
|
||||
v, _ := ToTimeInDefaultLocationE(i, location)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToDuration casts any value to a(n) time.Duration type.
|
||||
func ToDuration(i any) time.Duration {
|
||||
v, _ := ToDurationE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt casts any value to a(n) int type.
|
||||
func ToInt(i any) int {
|
||||
v, _ := ToIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt8 casts any value to a(n) int8 type.
|
||||
func ToInt8(i any) int8 {
|
||||
v, _ := ToInt8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt16 casts any value to a(n) int16 type.
|
||||
func ToInt16(i any) int16 {
|
||||
v, _ := ToInt16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt32 casts any value to a(n) int32 type.
|
||||
func ToInt32(i any) int32 {
|
||||
v, _ := ToInt32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64 casts any value to a(n) int64 type.
|
||||
func ToInt64(i any) int64 {
|
||||
v, _ := ToInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint casts any value to a(n) uint type.
|
||||
func ToUint(i any) uint {
|
||||
v, _ := ToUintE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint8 casts any value to a(n) uint8 type.
|
||||
func ToUint8(i any) uint8 {
|
||||
v, _ := ToUint8E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint16 casts any value to a(n) uint16 type.
|
||||
func ToUint16(i any) uint16 {
|
||||
v, _ := ToUint16E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint32 casts any value to a(n) uint32 type.
|
||||
func ToUint32(i any) uint32 {
|
||||
v, _ := ToUint32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUint64 casts any value to a(n) uint64 type.
|
||||
func ToUint64(i any) uint64 {
|
||||
v, _ := ToUint64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat32 casts any value to a(n) float32 type.
|
||||
func ToFloat32(i any) float32 {
|
||||
v, _ := ToFloat32E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat64 casts any value to a(n) float64 type.
|
||||
func ToFloat64(i any) float64 {
|
||||
v, _ := ToFloat64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapString casts any value to a(n) map[string]string type.
|
||||
func ToStringMapString(i any) map[string]string {
|
||||
v, _ := ToStringMapStringE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapStringSlice casts any value to a(n) map[string][]string type.
|
||||
func ToStringMapStringSlice(i any) map[string][]string {
|
||||
v, _ := ToStringMapStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapBool casts any value to a(n) map[string]bool type.
|
||||
func ToStringMapBool(i any) map[string]bool {
|
||||
v, _ := ToStringMapBoolE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt casts any value to a(n) map[string]int type.
|
||||
func ToStringMapInt(i any) map[string]int {
|
||||
v, _ := ToStringMapIntE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMapInt64 casts any value to a(n) map[string]int64 type.
|
||||
func ToStringMapInt64(i any) map[string]int64 {
|
||||
v, _ := ToStringMapInt64E(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringMap casts any value to a(n) map[string]any type.
|
||||
func ToStringMap(i any) map[string]any {
|
||||
v, _ := ToStringMapE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToSlice casts any value to a(n) []any type.
|
||||
func ToSlice(i any) []any {
|
||||
v, _ := ToSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToBoolSlice casts any value to a(n) []bool type.
|
||||
func ToBoolSlice(i any) []bool {
|
||||
v, _ := ToBoolSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToStringSlice casts any value to a(n) []string type.
|
||||
func ToStringSlice(i any) []string {
|
||||
v, _ := ToStringSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToIntSlice casts any value to a(n) []int type.
|
||||
func ToIntSlice(i any) []int {
|
||||
v, _ := ToIntSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToInt64Slice casts any value to a(n) []int64 type.
|
||||
func ToInt64Slice(i any) []int64 {
|
||||
v, _ := ToInt64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToUintSlice casts any value to a(n) []uint type.
|
||||
func ToUintSlice(i any) []uint {
|
||||
v, _ := ToUintSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToFloat64Slice casts any value to a(n) []float64 type.
|
||||
func ToFloat64Slice(i any) []float64 {
|
||||
v, _ := ToFloat64SliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToDurationSlice casts any value to a(n) []time.Duration type.
|
||||
func ToDurationSlice(i any) []time.Duration {
|
||||
v, _ := ToDurationSliceE(i)
|
||||
return v
|
||||
}
|
||||
|
||||
// ToBoolSliceE casts any value to a(n) []bool type.
|
||||
func ToBoolSliceE(i any) ([]bool, error) {
|
||||
return toSliceE[bool](i)
|
||||
}
|
||||
|
||||
// ToDurationSliceE casts any value to a(n) []time.Duration type.
|
||||
func ToDurationSliceE(i any) ([]time.Duration, error) {
|
||||
return toSliceE[time.Duration](i)
|
||||
}
|
||||
|
||||
// ToIntSliceE casts any value to a(n) []int type.
|
||||
func ToIntSliceE(i any) ([]int, error) {
|
||||
return toSliceE[int](i)
|
||||
}
|
||||
|
||||
// ToInt8SliceE casts any value to a(n) []int8 type.
|
||||
func ToInt8SliceE(i any) ([]int8, error) {
|
||||
return toSliceE[int8](i)
|
||||
}
|
||||
|
||||
// ToInt16SliceE casts any value to a(n) []int16 type.
|
||||
func ToInt16SliceE(i any) ([]int16, error) {
|
||||
return toSliceE[int16](i)
|
||||
}
|
||||
|
||||
// ToInt32SliceE casts any value to a(n) []int32 type.
|
||||
func ToInt32SliceE(i any) ([]int32, error) {
|
||||
return toSliceE[int32](i)
|
||||
}
|
||||
|
||||
// ToInt64SliceE casts any value to a(n) []int64 type.
|
||||
func ToInt64SliceE(i any) ([]int64, error) {
|
||||
return toSliceE[int64](i)
|
||||
}
|
||||
|
||||
// ToUintSliceE casts any value to a(n) []uint type.
|
||||
func ToUintSliceE(i any) ([]uint, error) {
|
||||
return toSliceE[uint](i)
|
||||
}
|
||||
|
||||
// ToUint8SliceE casts any value to a(n) []uint8 type.
|
||||
func ToUint8SliceE(i any) ([]uint8, error) {
|
||||
return toSliceE[uint8](i)
|
||||
}
|
||||
|
||||
// ToUint16SliceE casts any value to a(n) []uint16 type.
|
||||
func ToUint16SliceE(i any) ([]uint16, error) {
|
||||
return toSliceE[uint16](i)
|
||||
}
|
||||
|
||||
// ToUint32SliceE casts any value to a(n) []uint32 type.
|
||||
func ToUint32SliceE(i any) ([]uint32, error) {
|
||||
return toSliceE[uint32](i)
|
||||
}
|
||||
|
||||
// ToUint64SliceE casts any value to a(n) []uint64 type.
|
||||
func ToUint64SliceE(i any) ([]uint64, error) {
|
||||
return toSliceE[uint64](i)
|
||||
}
|
||||
|
||||
// ToFloat32SliceE casts any value to a(n) []float32 type.
|
||||
func ToFloat32SliceE(i any) ([]float32, error) {
|
||||
return toSliceE[float32](i)
|
||||
}
|
||||
|
||||
// ToFloat64SliceE casts any value to a(n) []float64 type.
|
||||
func ToFloat64SliceE(i any) ([]float64, error) {
|
||||
return toSliceE[float64](i)
|
||||
}
|
||||
21
vendor/github.com/spf13/viper/.editorconfig
generated
vendored
Normal file
21
vendor/github.com/spf13/viper/.editorconfig
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
||||
[{Makefile,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.nix]
|
||||
indent_size = 2
|
||||
|
||||
[.golangci.yaml]
|
||||
indent_size = 2
|
||||
8
vendor/github.com/spf13/viper/.gitignore
generated
vendored
Normal file
8
vendor/github.com/spf13/viper/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
/.devenv/
|
||||
/.direnv/
|
||||
/.idea/
|
||||
/.pre-commit-config.yaml
|
||||
/bin/
|
||||
/build/
|
||||
/var/
|
||||
/vendor/
|
||||
118
vendor/github.com/spf13/viper/.golangci.yaml
generated
vendored
Normal file
118
vendor/github.com/spf13/viper/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
version: "2"
|
||||
|
||||
run:
|
||||
timeout: 5m
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- exhaustive
|
||||
- gocritic
|
||||
- godot
|
||||
- gomoddirectives
|
||||
- goprintffuncname
|
||||
- govet
|
||||
- importas
|
||||
- ineffassign
|
||||
- makezero
|
||||
- misspell
|
||||
- nakedret
|
||||
- nilerr
|
||||
- noctx
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- tparallel
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- wastedassign
|
||||
- whitespace
|
||||
|
||||
# fixme
|
||||
# - cyclop
|
||||
# - errcheck
|
||||
# - errorlint
|
||||
# - exhaustivestruct
|
||||
# - forbidigo
|
||||
# - forcetypeassert
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gocognit
|
||||
# - goconst
|
||||
# - gocyclo
|
||||
# - gosec
|
||||
# - gosimple
|
||||
# - ifshort
|
||||
# - lll
|
||||
# - nlreturn
|
||||
# - paralleltest
|
||||
# - scopelint
|
||||
# - thelper
|
||||
# - wrapcheck
|
||||
|
||||
# unused
|
||||
# - depguard
|
||||
# - goheader
|
||||
# - gomodguard
|
||||
|
||||
# don't enable:
|
||||
# - asciicheck
|
||||
# - funlen
|
||||
# - godox
|
||||
# - goerr113
|
||||
# - gomnd
|
||||
# - interfacer
|
||||
# - maligned
|
||||
# - nestif
|
||||
# - testpackage
|
||||
# - wsl
|
||||
|
||||
exclusions:
|
||||
rules:
|
||||
- linters:
|
||||
- errcheck
|
||||
- noctx
|
||||
path: _test.go
|
||||
presets:
|
||||
- comments
|
||||
- std-error-handling
|
||||
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
nolintlint:
|
||||
allow-unused: false # report any unused nolint directives
|
||||
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
|
||||
gocritic:
|
||||
# Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage.
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- style
|
||||
disabled-checks:
|
||||
- importShadow
|
||||
- unnamedResult
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
# - golines
|
||||
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
2
vendor/github.com/spf13/viper/.yamlignore
generated
vendored
Normal file
2
vendor/github.com/spf13/viper/.yamlignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# TODO: FIXME
|
||||
/.github/
|
||||
6
vendor/github.com/spf13/viper/.yamllint.yaml
generated
vendored
Normal file
6
vendor/github.com/spf13/viper/.yamllint.yaml
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
ignore-from-file: [.gitignore, .yamlignore]
|
||||
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
21
vendor/github.com/spf13/viper/LICENSE
generated
vendored
Normal file
21
vendor/github.com/spf13/viper/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Steve Francia
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
87
vendor/github.com/spf13/viper/Makefile
generated
vendored
Normal file
87
vendor/github.com/spf13/viper/Makefile
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
|
||||
|
||||
OS = $(shell uname | tr A-Z a-z)
|
||||
export PATH := $(abspath bin/):${PATH}
|
||||
|
||||
# Build variables
|
||||
BUILD_DIR ?= build
|
||||
export CGO_ENABLED ?= 0
|
||||
export GOOS = $(shell go env GOOS)
|
||||
ifeq (${VERBOSE}, 1)
|
||||
ifeq ($(filter -v,${GOARGS}),)
|
||||
GOARGS += -v
|
||||
endif
|
||||
TEST_FORMAT = short-verbose
|
||||
endif
|
||||
|
||||
# Dependency versions
|
||||
GOTESTSUM_VERSION = 1.9.0
|
||||
GOLANGCI_VERSION = 1.53.3
|
||||
|
||||
# Add the ability to override some variables
|
||||
# Use with care
|
||||
-include override.mk
|
||||
|
||||
.PHONY: clear
|
||||
clear: ## Clear the working area and the project
|
||||
rm -rf bin/
|
||||
|
||||
.PHONY: check
|
||||
check: test lint ## Run tests and linters
|
||||
|
||||
|
||||
TEST_PKGS ?= ./...
|
||||
.PHONY: test
|
||||
test: TEST_FORMAT ?= short
|
||||
test: SHELL = /bin/bash
|
||||
test: export CGO_ENABLED=1
|
||||
test: bin/gotestsum ## Run tests
|
||||
@mkdir -p ${BUILD_DIR}
|
||||
bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
|
||||
|
||||
.PHONY: lint
|
||||
lint: lint-go lint-yaml
|
||||
lint: ## Run linters
|
||||
|
||||
.PHONY: lint-go
|
||||
lint-go:
|
||||
golangci-lint run $(if ${CI},--out-format github-actions,)
|
||||
|
||||
.PHONY: lint-yaml
|
||||
lint-yaml:
|
||||
yamllint $(if ${CI},-f github,) --no-warnings .
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Format code
|
||||
golangci-lint run --fix
|
||||
|
||||
deps: bin/golangci-lint bin/gotestsum yamllint
|
||||
deps: ## Install dependencies
|
||||
|
||||
bin/gotestsum:
|
||||
@mkdir -p bin
|
||||
curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum && chmod +x ./bin/gotestsum
|
||||
|
||||
bin/golangci-lint:
|
||||
@mkdir -p bin
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- v${GOLANGCI_VERSION}
|
||||
|
||||
.PHONY: yamllint
|
||||
yamllint:
|
||||
pip3 install --user yamllint
|
||||
|
||||
# Add custom targets here
|
||||
-include custom.mk
|
||||
|
||||
.PHONY: list
|
||||
list: ## List all make targets
|
||||
@${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
|
||||
|
||||
.PHONY: help
|
||||
.DEFAULT_GOAL := help
|
||||
help:
|
||||
@grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
# Variable outputting/exporting rules
|
||||
var-%: ; @echo $($*)
|
||||
varexport-%: ; @echo $*=$($*)
|
||||
931
vendor/github.com/spf13/viper/README.md
generated
vendored
Normal file
931
vendor/github.com/spf13/viper/README.md
generated
vendored
Normal file
@@ -0,0 +1,931 @@
|
||||
> ## Viper v2 feedback
|
||||
> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9
|
||||
>
|
||||
> **Thank you!**
|
||||
|
||||

|
||||
|
||||
|
||||
[](https://github.com/avelino/awesome-go#configuration)
|
||||
[](https://repl.it/@sagikazarmark/Viper-example#main.go)
|
||||
|
||||
[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
|
||||
[](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://goreportcard.com/report/github.com/spf13/viper)
|
||||

|
||||
[](https://pkg.go.dev/mod/github.com/spf13/viper)
|
||||
|
||||
**Go configuration with fangs!**
|
||||
|
||||
Many Go projects are built using Viper including:
|
||||
|
||||
* [Hugo](http://gohugo.io)
|
||||
* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
|
||||
* [Imgur’s Incus](https://github.com/Imgur/incus)
|
||||
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
|
||||
* [Docker Notary](https://github.com/docker/Notary)
|
||||
* [BloomApi](https://www.bloomapi.com/)
|
||||
* [doctl](https://github.com/digitalocean/doctl)
|
||||
* [Clairctl](https://github.com/jgsqware/clairctl)
|
||||
* [Mercure](https://mercure.rocks)
|
||||
* [Meshery](https://github.com/meshery/meshery)
|
||||
* [Bearer](https://github.com/bearer/bearer)
|
||||
* [Coder](https://github.com/coder/coder)
|
||||
* [Vitess](https://vitess.io/)
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```shell
|
||||
go get github.com/spf13/viper
|
||||
```
|
||||
|
||||
**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies.
|
||||
|
||||
|
||||
## What is Viper?
|
||||
|
||||
Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors).
|
||||
It is designed to work within an application, and can handle all types of configuration needs
|
||||
and formats. It supports:
|
||||
|
||||
* setting defaults
|
||||
* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files
|
||||
* live watching and re-reading of config files (optional)
|
||||
* reading from environment variables
|
||||
* reading from remote config systems (etcd or Consul), and watching changes
|
||||
* reading from command line flags
|
||||
* reading from buffer
|
||||
* setting explicit values
|
||||
|
||||
Viper can be thought of as a registry for all of your applications configuration needs.
|
||||
|
||||
|
||||
## Why Viper?
|
||||
|
||||
When building a modern application, you don’t want to worry about
|
||||
configuration file formats; you want to focus on building awesome software.
|
||||
Viper is here to help with that.
|
||||
|
||||
Viper does the following for you:
|
||||
|
||||
1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats.
|
||||
2. Provide a mechanism to set default values for your different configuration options.
|
||||
3. Provide a mechanism to set override values for options specified through command line flags.
|
||||
4. Provide an alias system to easily rename parameters without breaking existing code.
|
||||
5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default.
|
||||
|
||||
Viper uses the following precedence order. Each item takes precedence over the item below it:
|
||||
|
||||
* explicit call to `Set`
|
||||
* flag
|
||||
* env
|
||||
* config
|
||||
* key/value store
|
||||
* default
|
||||
|
||||
**Important:** Viper configuration keys are case insensitive.
|
||||
There are ongoing discussions about making that optional.
|
||||
|
||||
|
||||
## Putting Values into Viper
|
||||
|
||||
### Establishing Defaults
|
||||
|
||||
A good configuration system will support default values. A default value is not
|
||||
required for a key, but it’s useful in the event that a key hasn't been set via
|
||||
config file, environment variable, remote configuration or flag.
|
||||
|
||||
Examples:
|
||||
|
||||
```go
|
||||
viper.SetDefault("ContentDir", "content")
|
||||
viper.SetDefault("LayoutDir", "layouts")
|
||||
viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
|
||||
```
|
||||
|
||||
### Reading Config Files
|
||||
|
||||
Viper requires minimal configuration so it knows where to look for config files.
|
||||
Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but
|
||||
currently a single Viper instance only supports a single configuration file.
|
||||
Viper does not default to any configuration search paths leaving defaults decision
|
||||
to an application.
|
||||
|
||||
Here is an example of how to use Viper to search for and read a configuration file.
|
||||
None of the specific paths are required, but at least one path should be provided
|
||||
where a configuration file is expected.
|
||||
|
||||
```go
|
||||
viper.SetConfigName("config") // name of config file (without extension)
|
||||
viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name
|
||||
viper.AddConfigPath("/etc/appname/") // path to look for the config file in
|
||||
viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
|
||||
viper.AddConfigPath(".") // optionally look for config in the working directory
|
||||
err := viper.ReadInConfig() // Find and read the config file
|
||||
if err != nil { // Handle errors reading the config file
|
||||
panic(fmt.Errorf("fatal error config file: %w", err))
|
||||
}
|
||||
```
|
||||
|
||||
You can handle the specific case where no config file is found like this:
|
||||
|
||||
```go
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
// Config file not found; ignore error if desired
|
||||
} else {
|
||||
// Config file was found but another error was produced
|
||||
}
|
||||
}
|
||||
|
||||
// Config file found and successfully parsed
|
||||
```
|
||||
|
||||
*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmatically. For those configuration files that lie in the home of the user without any extension like `.bashrc`
|
||||
|
||||
### Writing Config Files
|
||||
|
||||
Reading from config files is useful, but at times you want to store all modifications made at run time.
|
||||
For that, a bunch of commands are available, each with its own purpose:
|
||||
|
||||
* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists.
|
||||
* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists.
|
||||
* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists.
|
||||
* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists.
|
||||
|
||||
As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate.
|
||||
|
||||
A small examples section:
|
||||
|
||||
```go
|
||||
viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName'
|
||||
viper.SafeWriteConfig()
|
||||
viper.WriteConfigAs("/path/to/my/.config")
|
||||
viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written
|
||||
viper.SafeWriteConfigAs("/path/to/my/.other_config")
|
||||
```
|
||||
|
||||
### Watching and re-reading config files
|
||||
|
||||
Viper supports the ability to have your application live read a config file while running.
|
||||
|
||||
Gone are the days of needing to restart a server to have a config take effect,
|
||||
viper powered applications can read an update to a config file while running and
|
||||
not miss a beat.
|
||||
|
||||
Simply tell the viper instance to watchConfig.
|
||||
Optionally you can provide a function for Viper to run each time a change occurs.
|
||||
|
||||
**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
|
||||
|
||||
```go
|
||||
viper.OnConfigChange(func(e fsnotify.Event) {
|
||||
fmt.Println("Config file changed:", e.Name)
|
||||
})
|
||||
viper.WatchConfig()
|
||||
```
|
||||
|
||||
### Reading Config from io.Reader
|
||||
|
||||
Viper predefines many configuration sources such as files, environment
|
||||
variables, flags, and remote K/V store, but you are not bound to them. You can
|
||||
also implement your own required configuration source and feed it to viper.
|
||||
|
||||
```go
|
||||
viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
|
||||
|
||||
// any approach to require this configuration into your program.
|
||||
var yamlExample = []byte(`
|
||||
Hacker: true
|
||||
name: steve
|
||||
hobbies:
|
||||
- skateboarding
|
||||
- snowboarding
|
||||
- go
|
||||
clothing:
|
||||
jacket: leather
|
||||
trousers: denim
|
||||
age: 35
|
||||
eyes : brown
|
||||
beard: true
|
||||
`)
|
||||
|
||||
viper.ReadConfig(bytes.NewBuffer(yamlExample))
|
||||
|
||||
viper.Get("name") // this would be "steve"
|
||||
```
|
||||
|
||||
### Setting Overrides
|
||||
|
||||
These could be from a command line flag, or from your own application logic.
|
||||
|
||||
```go
|
||||
viper.Set("Verbose", true)
|
||||
viper.Set("LogFile", LogFile)
|
||||
viper.Set("host.port", 5899) // set subset
|
||||
```
|
||||
|
||||
### Registering and Using Aliases
|
||||
|
||||
Aliases permit a single value to be referenced by multiple keys
|
||||
|
||||
```go
|
||||
viper.RegisterAlias("loud", "Verbose")
|
||||
|
||||
viper.Set("verbose", true) // same result as next line
|
||||
viper.Set("loud", true) // same result as prior line
|
||||
|
||||
viper.GetBool("loud") // true
|
||||
viper.GetBool("verbose") // true
|
||||
```
|
||||
|
||||
### Working with Environment Variables
|
||||
|
||||
Viper has full support for environment variables. This enables 12 factor
|
||||
applications out of the box. There are five methods that exist to aid working
|
||||
with ENV:
|
||||
|
||||
* `AutomaticEnv()`
|
||||
* `BindEnv(string...) : error`
|
||||
* `SetEnvPrefix(string)`
|
||||
* `SetEnvKeyReplacer(string...) *strings.Replacer`
|
||||
* `AllowEmptyEnv(bool)`
|
||||
|
||||
_When working with ENV variables, it’s important to recognize that Viper
|
||||
treats ENV variables as case sensitive._
|
||||
|
||||
Viper provides a mechanism to try to ensure that ENV variables are unique. By
|
||||
using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from
|
||||
the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
|
||||
prefix.
|
||||
|
||||
`BindEnv` takes one or more parameters. The first parameter is the key name, the
|
||||
rest are the name of the environment variables to bind to this key. If more than
|
||||
one are provided, they will take precedence in the specified order. The name of
|
||||
the environment variable is case sensitive. If the ENV variable name is not provided, then
|
||||
Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter),
|
||||
it **does not** automatically add the prefix. For example if the second parameter is "id",
|
||||
Viper will look for the ENV variable "ID".
|
||||
|
||||
One important thing to recognize when working with ENV variables is that the
|
||||
value will be read each time it is accessed. Viper does not fix the value when
|
||||
the `BindEnv` is called.
|
||||
|
||||
`AutomaticEnv` is a powerful helper especially when combined with
|
||||
`SetEnvPrefix`. When called, Viper will check for an environment variable any
|
||||
time a `viper.Get` request is made. It will apply the following rules. It will
|
||||
check for an environment variable with a name matching the key uppercased and
|
||||
prefixed with the `EnvPrefix` if set.
|
||||
|
||||
`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
|
||||
keys to an extent. This is useful if you want to use `-` or something in your
|
||||
`Get()` calls, but want your environmental variables to use `_` delimiters. An
|
||||
example of using it can be found in `viper_test.go`.
|
||||
|
||||
Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function.
|
||||
Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic.
|
||||
|
||||
By default empty environment variables are considered unset and will fall back to
|
||||
the next configuration source. To treat empty environment variables as set, use
|
||||
the `AllowEmptyEnv` method.
|
||||
|
||||
#### Env example
|
||||
|
||||
```go
|
||||
SetEnvPrefix("spf") // will be uppercased automatically
|
||||
BindEnv("id")
|
||||
|
||||
os.Setenv("SPF_ID", "13") // typically done outside of the app
|
||||
|
||||
id := Get("id") // 13
|
||||
```
|
||||
|
||||
### Working with Flags
|
||||
|
||||
Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
|
||||
as used in the [Cobra](https://github.com/spf13/cobra) library.
|
||||
|
||||
Like `BindEnv`, the value is not set when the binding method is called, but when
|
||||
it is accessed. This means you can bind as early as you want, even in an
|
||||
`init()` function.
|
||||
|
||||
For individual flags, the `BindPFlag()` method provides this functionality.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
|
||||
viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
|
||||
```
|
||||
|
||||
You can also bind an existing set of pflags (pflag.FlagSet):
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
pflag.Int("flagname", 1234, "help message for flagname")
|
||||
|
||||
pflag.Parse()
|
||||
viper.BindPFlags(pflag.CommandLine)
|
||||
|
||||
i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
|
||||
```
|
||||
|
||||
The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
|
||||
the use of other packages that use the [flag](https://golang.org/pkg/flag/)
|
||||
package from the standard library. The pflag package can handle the flags
|
||||
defined for the flag package by importing these flags. This is accomplished
|
||||
by a calling a convenience function provided by the pflag package called
|
||||
AddGoFlagSet().
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// using standard library "flag" package
|
||||
flag.Int("flagname", 1234, "help message for flagname")
|
||||
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
pflag.Parse()
|
||||
viper.BindPFlags(pflag.CommandLine)
|
||||
|
||||
i := viper.GetInt("flagname") // retrieve value from viper
|
||||
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
#### Flag interfaces
|
||||
|
||||
Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
|
||||
|
||||
`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
|
||||
|
||||
```go
|
||||
type myFlag struct {}
|
||||
func (f myFlag) HasChanged() bool { return false }
|
||||
func (f myFlag) Name() string { return "my-flag-name" }
|
||||
func (f myFlag) ValueString() string { return "my-flag-value" }
|
||||
func (f myFlag) ValueType() string { return "string" }
|
||||
```
|
||||
|
||||
Once your flag implements this interface, you can simply tell Viper to bind it:
|
||||
|
||||
```go
|
||||
viper.BindFlagValue("my-flag-name", myFlag{})
|
||||
```
|
||||
|
||||
`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
|
||||
|
||||
```go
|
||||
type myFlagSet struct {
|
||||
flags []myFlag
|
||||
}
|
||||
|
||||
func (f myFlagSet) VisitAll(fn func(FlagValue)) {
|
||||
for _, flag := range flags {
|
||||
fn(flag)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Once your flag set implements this interface, you can simply tell Viper to bind it:
|
||||
|
||||
```go
|
||||
fSet := myFlagSet{
|
||||
flags: []myFlag{myFlag{}, myFlag{}},
|
||||
}
|
||||
viper.BindFlagValues("my-flags", fSet)
|
||||
```
|
||||
|
||||
### Remote Key/Value Store Support
|
||||
|
||||
To enable remote support in Viper, do a blank import of the `viper/remote`
|
||||
package:
|
||||
|
||||
`import _ "github.com/spf13/viper/remote"`
|
||||
|
||||
Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path
|
||||
in a Key/Value store such as etcd or Consul. These values take precedence over
|
||||
default values, but are overridden by configuration values retrieved from disk,
|
||||
flags, or environment variables.
|
||||
|
||||
Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`.
|
||||
|
||||
Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve
|
||||
configuration from the K/V store, which means that you can store your
|
||||
configuration values encrypted and have them automatically decrypted if you have
|
||||
the correct gpg keyring. Encryption is optional.
|
||||
|
||||
You can use remote configuration in conjunction with local configuration, or
|
||||
independently of it.
|
||||
|
||||
`crypt` has a command-line helper that you can use to put configurations in your
|
||||
K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
|
||||
|
||||
```bash
|
||||
$ go get github.com/sagikazarmark/crypt/bin/crypt
|
||||
$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
|
||||
```
|
||||
|
||||
Confirm that your value was set:
|
||||
|
||||
```bash
|
||||
$ crypt get -plaintext /config/hugo.json
|
||||
```
|
||||
|
||||
See the `crypt` documentation for examples of how to set encrypted values, or
|
||||
how to use Consul.
|
||||
|
||||
### Remote Key/Value Store Example - Unencrypted
|
||||
|
||||
#### etcd
|
||||
```go
|
||||
viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
|
||||
viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
|
||||
err := viper.ReadRemoteConfig()
|
||||
```
|
||||
|
||||
#### etcd3
|
||||
```go
|
||||
viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json")
|
||||
viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
|
||||
err := viper.ReadRemoteConfig()
|
||||
```
|
||||
|
||||
#### Consul
|
||||
You need to set a key to Consul key/value storage with JSON value containing your desired config.
|
||||
For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
|
||||
|
||||
```json
|
||||
{
|
||||
"port": 8080,
|
||||
"hostname": "myhostname.com"
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY")
|
||||
viper.SetConfigType("json") // Need to explicitly set this to json
|
||||
err := viper.ReadRemoteConfig()
|
||||
|
||||
fmt.Println(viper.Get("port")) // 8080
|
||||
fmt.Println(viper.Get("hostname")) // myhostname.com
|
||||
```
|
||||
|
||||
#### Firestore
|
||||
|
||||
```go
|
||||
viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document")
|
||||
viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml"
|
||||
err := viper.ReadRemoteConfig()
|
||||
```
|
||||
|
||||
Of course, you're allowed to use `SecureRemoteProvider` also
|
||||
|
||||
|
||||
#### NATS
|
||||
|
||||
```go
|
||||
viper.AddRemoteProvider("nats", "nats://127.0.0.1:4222", "myapp.config")
|
||||
viper.SetConfigType("json")
|
||||
err := viper.ReadRemoteConfig()
|
||||
```
|
||||
|
||||
### Remote Key/Value Store Example - Encrypted
|
||||
|
||||
```go
|
||||
viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
|
||||
viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
|
||||
err := viper.ReadRemoteConfig()
|
||||
```
|
||||
|
||||
### Watching Changes in etcd - Unencrypted
|
||||
|
||||
```go
|
||||
// alternatively, you can create a new viper instance.
|
||||
var runtime_viper = viper.New()
|
||||
|
||||
runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
|
||||
runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv"
|
||||
|
||||
// read from remote config the first time.
|
||||
err := runtime_viper.ReadRemoteConfig()
|
||||
|
||||
// unmarshal config
|
||||
runtime_viper.Unmarshal(&runtime_conf)
|
||||
|
||||
// open a goroutine to watch remote changes forever
|
||||
go func(){
|
||||
for {
|
||||
time.Sleep(time.Second * 5) // delay after each request
|
||||
|
||||
// currently, only tested with etcd support
|
||||
err := runtime_viper.WatchRemoteConfig()
|
||||
if err != nil {
|
||||
log.Errorf("unable to read remote config: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// unmarshal new config into our runtime config struct. you can also use channel
|
||||
// to implement a signal to notify the system of the changes
|
||||
runtime_viper.Unmarshal(&runtime_conf)
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
## Getting Values From Viper
|
||||
|
||||
In Viper, there are a few ways to get a value depending on the value’s type.
|
||||
The following functions and methods exist:
|
||||
|
||||
* `Get(key string) : any`
|
||||
* `GetBool(key string) : bool`
|
||||
* `GetFloat64(key string) : float64`
|
||||
* `GetInt(key string) : int`
|
||||
* `GetIntSlice(key string) : []int`
|
||||
* `GetString(key string) : string`
|
||||
* `GetStringMap(key string) : map[string]any`
|
||||
* `GetStringMapString(key string) : map[string]string`
|
||||
* `GetStringSlice(key string) : []string`
|
||||
* `GetTime(key string) : time.Time`
|
||||
* `GetDuration(key string) : time.Duration`
|
||||
* `IsSet(key string) : bool`
|
||||
* `AllSettings() : map[string]any`
|
||||
|
||||
One important thing to recognize is that each Get function will return a zero
|
||||
value if it’s not found. To check if a given key exists, the `IsSet()` method
|
||||
has been provided.
|
||||
|
||||
The zero value will also be returned if the value is set, but fails to parse
|
||||
as the requested type.
|
||||
|
||||
Example:
|
||||
```go
|
||||
viper.GetString("logfile") // case-insensitive Setting & Getting
|
||||
if viper.GetBool("verbose") {
|
||||
fmt.Println("verbose enabled")
|
||||
}
|
||||
```
|
||||
### Accessing nested keys
|
||||
|
||||
The accessor methods also accept formatted paths to deeply nested keys. For
|
||||
example, if the following JSON file is loaded:
|
||||
|
||||
```json
|
||||
{
|
||||
"host": {
|
||||
"address": "localhost",
|
||||
"port": 5799
|
||||
},
|
||||
"datastore": {
|
||||
"metric": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 3099
|
||||
},
|
||||
"warehouse": {
|
||||
"host": "198.0.0.1",
|
||||
"port": 2112
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Viper can access a nested field by passing a `.` delimited path of keys:
|
||||
|
||||
```go
|
||||
GetString("datastore.metric.host") // (returns "127.0.0.1")
|
||||
```
|
||||
|
||||
This obeys the precedence rules established above; the search for the path
|
||||
will cascade through the remaining configuration registries until found.
|
||||
|
||||
For example, given this configuration file, both `datastore.metric.host` and
|
||||
`datastore.metric.port` are already defined (and may be overridden). If in addition
|
||||
`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
|
||||
|
||||
However, if `datastore.metric` was overridden (by a flag, an environment variable,
|
||||
the `Set()` method, …) with an immediate value, then all sub-keys of
|
||||
`datastore.metric` become undefined, they are “shadowed” by the higher-priority
|
||||
configuration level.
|
||||
|
||||
Viper can access array indices by using numbers in the path. For example:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"host": {
|
||||
"address": "localhost",
|
||||
"ports": [
|
||||
5799,
|
||||
6029
|
||||
]
|
||||
},
|
||||
"datastore": {
|
||||
"metric": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 3099
|
||||
},
|
||||
"warehouse": {
|
||||
"host": "198.0.0.1",
|
||||
"port": 2112
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GetInt("host.ports.1") // returns 6029
|
||||
|
||||
```
|
||||
|
||||
Lastly, if there exists a key that matches the delimited key path, its value
|
||||
will be returned instead. E.g.
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"datastore.metric.host": "0.0.0.0",
|
||||
"host": {
|
||||
"address": "localhost",
|
||||
"port": 5799
|
||||
},
|
||||
"datastore": {
|
||||
"metric": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 3099
|
||||
},
|
||||
"warehouse": {
|
||||
"host": "198.0.0.1",
|
||||
"port": 2112
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GetString("datastore.metric.host") // returns "0.0.0.0"
|
||||
```
|
||||
|
||||
### Extracting a sub-tree
|
||||
|
||||
When developing reusable modules, it's often useful to extract a subset of the configuration
|
||||
and pass it to a module. This way the module can be instantiated more than once, with different configurations.
|
||||
|
||||
For example, an application might use multiple different cache stores for different purposes:
|
||||
|
||||
```yaml
|
||||
cache:
|
||||
cache1:
|
||||
max-items: 100
|
||||
item-size: 64
|
||||
cache2:
|
||||
max-items: 200
|
||||
item-size: 80
|
||||
```
|
||||
|
||||
We could pass the cache name to a module (eg. `NewCache("cache1")`),
|
||||
but it would require weird concatenation for accessing config keys and would be less separated from the global config.
|
||||
|
||||
So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration:
|
||||
|
||||
```go
|
||||
cache1Config := viper.Sub("cache.cache1")
|
||||
if cache1Config == nil { // Sub returns nil if the key cannot be found
|
||||
panic("cache configuration not found")
|
||||
}
|
||||
|
||||
cache1 := NewCache(cache1Config)
|
||||
```
|
||||
|
||||
**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found.
|
||||
|
||||
Internally, the `NewCache` function can address `max-items` and `item-size` keys directly:
|
||||
|
||||
```go
|
||||
func NewCache(v *Viper) *Cache {
|
||||
return &Cache{
|
||||
MaxItems: v.GetInt("max-items"),
|
||||
ItemSize: v.GetInt("item-size"),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The resulting code is easy to test, since it's decoupled from the main config structure,
|
||||
and easier to reuse (for the same reason).
|
||||
|
||||
|
||||
### Unmarshaling
|
||||
|
||||
You also have the option of Unmarshaling all or a specific value to a struct, map,
|
||||
etc.
|
||||
|
||||
There are two methods to do this:
|
||||
|
||||
* `Unmarshal(rawVal any) : error`
|
||||
* `UnmarshalKey(key string, rawVal any) : error`
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
Port int
|
||||
Name string
|
||||
PathMap string `mapstructure:"path_map"`
|
||||
}
|
||||
|
||||
var C config
|
||||
|
||||
err := viper.Unmarshal(&C)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to decode into struct, %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter),
|
||||
you have to change the delimiter:
|
||||
|
||||
```go
|
||||
v := viper.NewWithOptions(viper.KeyDelimiter("::"))
|
||||
|
||||
v.SetDefault("chart::values", map[string]any{
|
||||
"ingress": map[string]any{
|
||||
"annotations": map[string]any{
|
||||
"traefik.frontend.rule.type": "PathPrefix",
|
||||
"traefik.ingress.kubernetes.io/ssl-redirect": "true",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
type config struct {
|
||||
Chart struct{
|
||||
Values map[string]any
|
||||
}
|
||||
}
|
||||
|
||||
var C config
|
||||
|
||||
v.Unmarshal(&C)
|
||||
```
|
||||
|
||||
Viper also supports unmarshaling into embedded structs:
|
||||
|
||||
```go
|
||||
/*
|
||||
Example config:
|
||||
|
||||
module:
|
||||
enabled: true
|
||||
token: 89h3f98hbwf987h3f98wenf89ehf
|
||||
*/
|
||||
type config struct {
|
||||
Module struct {
|
||||
Enabled bool
|
||||
|
||||
moduleConfig `mapstructure:",squash"`
|
||||
}
|
||||
}
|
||||
|
||||
// moduleConfig could be in a module specific package
|
||||
type moduleConfig struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
var C config
|
||||
|
||||
err := viper.Unmarshal(&C)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to decode into struct, %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
Viper uses [github.com/go-viper/mapstructure](https://github.com/go-viper/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default.
|
||||
|
||||
### Decoding custom formats
|
||||
|
||||
A frequently requested feature for Viper is adding more value formats and decoders.
|
||||
For example, parsing character (dot, comma, semicolon, etc) separated strings into slices.
|
||||
|
||||
This is already available in Viper using mapstructure decode hooks.
|
||||
|
||||
Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/).
|
||||
|
||||
### Marshalling to string
|
||||
|
||||
You may need to marshal all the settings held in viper into a string rather than write them to a file.
|
||||
You can use your favorite format's marshaller with the config returned by `AllSettings()`.
|
||||
|
||||
```go
|
||||
import (
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
// ...
|
||||
)
|
||||
|
||||
func yamlStringSettings() string {
|
||||
c := viper.AllSettings()
|
||||
bs, err := yaml.Marshal(c)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to marshal config to YAML: %v", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
```
|
||||
|
||||
## Viper or Vipers?
|
||||
|
||||
Viper comes with a global instance (singleton) out of the box.
|
||||
|
||||
Although it makes setting up configuration easy,
|
||||
using it is generally discouraged as it makes testing harder and can lead to unexpected behavior.
|
||||
|
||||
The best practice is to initialize a Viper instance and pass that around when necessary.
|
||||
|
||||
The global instance _MAY_ be deprecated in the future.
|
||||
See [#1855](https://github.com/spf13/viper/issues/1855) for more details.
|
||||
|
||||
### Working with multiple vipers
|
||||
|
||||
You can also create many different vipers for use in your application. Each will
|
||||
have its own unique set of configurations and values. Each can read from a
|
||||
different config file, key value store, etc. All of the functions that viper
|
||||
package supports are mirrored as methods on a viper.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
x := viper.New()
|
||||
y := viper.New()
|
||||
|
||||
x.SetDefault("ContentDir", "content")
|
||||
y.SetDefault("ContentDir", "foobar")
|
||||
|
||||
//...
|
||||
```
|
||||
|
||||
When working with multiple vipers, it is up to the user to keep track of the
|
||||
different vipers.
|
||||
|
||||
|
||||
## Q & A
|
||||
|
||||
### Why is it called “Viper”?
|
||||
|
||||
A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
|
||||
to [Cobra](https://github.com/spf13/cobra). While both can operate completely
|
||||
independently, together they make a powerful pair to handle much of your
|
||||
application foundation needs.
|
||||
|
||||
### Why is it called “Cobra”?
|
||||
|
||||
Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
|
||||
|
||||
### Does Viper support case sensitive keys?
|
||||
|
||||
**tl;dr:** No.
|
||||
|
||||
Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars).
|
||||
In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive.
|
||||
|
||||
There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much.
|
||||
|
||||
You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9
|
||||
|
||||
### Is it safe to concurrently read and write to a viper?
|
||||
|
||||
No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See [TROUBLESHOOTING.md](TROUBLESHOOTING.md).
|
||||
|
||||
## Development
|
||||
|
||||
**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).**
|
||||
|
||||
_Alternatively, install [Go](https://go.dev/dl/) on your computer then run `make deps` to install the rest of the dependencies._
|
||||
|
||||
Run the test suite:
|
||||
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
Run linters:
|
||||
|
||||
```shell
|
||||
make lint # pass -j option to run them in parallel
|
||||
```
|
||||
|
||||
Some linter violations can automatically be fixed:
|
||||
|
||||
```shell
|
||||
make fmt
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The project is licensed under the [MIT License](LICENSE).
|
||||
32
vendor/github.com/spf13/viper/TROUBLESHOOTING.md
generated
vendored
Normal file
32
vendor/github.com/spf13/viper/TROUBLESHOOTING.md
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Unmarshaling doesn't work
|
||||
|
||||
The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags.
|
||||
|
||||
## Cannot find package
|
||||
|
||||
Viper installation seems to fail a lot lately with the following (or a similar) error:
|
||||
|
||||
```
|
||||
cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of:
|
||||
/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT)
|
||||
/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH)
|
||||
```
|
||||
|
||||
As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`.
|
||||
Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch).
|
||||
|
||||
The solution is easy: switch to using Go Modules.
|
||||
Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that.
|
||||
|
||||
**tl;dr* `export GO111MODULE=on`
|
||||
|
||||
## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file
|
||||
|
||||
This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740).
|
||||
|
||||
Potential solutions are:
|
||||
|
||||
1. Quoting values resolved as boolean
|
||||
1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build)
|
||||
147
vendor/github.com/spf13/viper/UPGRADE.md
generated
vendored
Normal file
147
vendor/github.com/spf13/viper/UPGRADE.md
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
# Update Log
|
||||
|
||||
**This document details any major updates required to use new features or improvements in Viper.**
|
||||
|
||||
## v1.20.x
|
||||
|
||||
### New file searching API
|
||||
|
||||
Viper now includes a new file searching API that allows users to customize how Viper looks for config files.
|
||||
|
||||
Viper accepts a custom [`Finder`](https://pkg.go.dev/github.com/spf13/viper#Finder) interface implementation:
|
||||
|
||||
```go
|
||||
// Finder looks for files and directories in an [afero.Fs] filesystem.
|
||||
type Finder interface {
|
||||
Find(fsys afero.Fs) ([]string, error)
|
||||
}
|
||||
```
|
||||
|
||||
It is supposed to return a list of paths to config files.
|
||||
|
||||
The default implementation uses [github.com/sagikazarmark/locafero](https://github.com/sagikazarmark/locafero) under the hood.
|
||||
|
||||
You can supply your own implementation using `WithFinder`:
|
||||
|
||||
```go
|
||||
v := viper.NewWithOptions(
|
||||
viper.WithFinder(&MyFinder{}),
|
||||
)
|
||||
```
|
||||
|
||||
For more information, check out the [Finder examples](https://pkg.go.dev/github.com/spf13/viper#Finder)
|
||||
and the [documentation](https://pkg.go.dev/github.com/sagikazarmark/locafero) for the locafero package.
|
||||
|
||||
### New encoding API
|
||||
|
||||
Viper now allows customizing the encoding layer by providing an API for encoding and decoding configuration data:
|
||||
|
||||
```go
|
||||
// Encoder encodes Viper's internal data structures into a byte representation.
|
||||
// It's primarily used for encoding a map[string]any into a file format.
|
||||
type Encoder interface {
|
||||
Encode(v map[string]any) ([]byte, error)
|
||||
}
|
||||
|
||||
// Decoder decodes the contents of a byte slice into Viper's internal data structures.
|
||||
// It's primarily used for decoding contents of a file into a map[string]any.
|
||||
type Decoder interface {
|
||||
Decode(b []byte, v map[string]any) error
|
||||
}
|
||||
|
||||
// Codec combines [Encoder] and [Decoder] interfaces.
|
||||
type Codec interface {
|
||||
Encoder
|
||||
Decoder
|
||||
}
|
||||
```
|
||||
|
||||
By default, Viper includes the following codecs:
|
||||
|
||||
- JSON
|
||||
- TOML
|
||||
- YAML
|
||||
- Dotenv
|
||||
|
||||
The rest of the codecs are moved to [github.com/go-viper/encoding](https://github.com/go-viper/encoding)
|
||||
|
||||
Customizing the encoding layer is possible by providing a custom registry of codecs:
|
||||
|
||||
- [Encoder](https://pkg.go.dev/github.com/spf13/viper#Encoder) -> [EncoderRegistry](https://pkg.go.dev/github.com/spf13/viper#EncoderRegistry)
|
||||
- [Decoder](https://pkg.go.dev/github.com/spf13/viper#Decoder) -> [DecoderRegistry](https://pkg.go.dev/github.com/spf13/viper#DecoderRegistry)
|
||||
- [Codec](https://pkg.go.dev/github.com/spf13/viper#Codec) -> [CodecRegistry](https://pkg.go.dev/github.com/spf13/viper#CodecRegistry)
|
||||
|
||||
You can supply the registry of codecs to Viper using the appropriate `With*Registry` function:
|
||||
|
||||
```go
|
||||
codecRegistry := viper.NewCodecRegistry()
|
||||
|
||||
codecRegistry.RegisterCodec("myformat", &MyCodec{})
|
||||
|
||||
v := viper.NewWithOptions(
|
||||
viper.WithCodecRegistry(codecRegistry),
|
||||
)
|
||||
```
|
||||
|
||||
### BREAKING: "github.com/mitchellh/mapstructure" depedency replaced
|
||||
|
||||
The original [mapstructure](https://github.com/mitchellh/mapstructure) has been [archived](https://github.com/mitchellh/mapstructure/issues/349) and was replaced with a [fork](https://github.com/go-viper/mapstructure) maintained by Viper ([#1723](https://github.com/spf13/viper/pull/1723)).
|
||||
|
||||
As a result, the package import path needs to be changed in cases where `mapstructure` is directly referenced in your code.
|
||||
|
||||
For example, when providing a custom decoder config:
|
||||
|
||||
```go
|
||||
err := viper.Unmarshal(&appConfig, func(config *mapstructure.DecoderConfig) {
|
||||
config.TagName = "yaml"
|
||||
})
|
||||
```
|
||||
|
||||
The change is fairly straightforward, just replace all occurrences of the import path `github.com/mitchellh/mapstructure` with `github.com/go-viper/mapstructure/v2`:
|
||||
|
||||
```diff
|
||||
- import "github.com/mitchellh/mapstructure"
|
||||
+ import "github.com/go-viper/mapstructure/v2"
|
||||
```
|
||||
|
||||
### BREAKING: HCL, Java properties, INI removed from core
|
||||
|
||||
In order to reduce third-party dependencies, Viper dropped support for the following formats from the core:
|
||||
|
||||
- HCL
|
||||
- Java properties
|
||||
- INI
|
||||
|
||||
You can still use these formats though by importing them from [github.com/go-viper/encoding](https://github.com/go-viper/encoding):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/go-viper/encoding/hcl"
|
||||
"github.com/go-viper/encoding/javaproperties"
|
||||
"github.com/go-viper/encoding/ini"
|
||||
)
|
||||
|
||||
codecRegistry := viper.NewCodecRegistry()
|
||||
|
||||
{
|
||||
codec := hcl.Codec{}
|
||||
|
||||
codecRegistry.RegisterCodec("hcl", codec)
|
||||
codecRegistry.RegisterCodec("tfvars", codec)
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
codec := &javaproperties.Codec{}
|
||||
|
||||
codecRegistry.RegisterCodec("properties", codec)
|
||||
codecRegistry.RegisterCodec("props", codec)
|
||||
codecRegistry.RegisterCodec("prop", codec)
|
||||
}
|
||||
|
||||
codecRegistry.RegisterCodec("ini", ini.Codec{})
|
||||
|
||||
v := viper.NewWithOptions(
|
||||
viper.WithCodecRegistry(codecRegistry),
|
||||
)
|
||||
```
|
||||
181
vendor/github.com/spf13/viper/encoding.go
generated
vendored
Normal file
181
vendor/github.com/spf13/viper/encoding.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
package viper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/viper/internal/encoding/dotenv"
|
||||
"github.com/spf13/viper/internal/encoding/json"
|
||||
"github.com/spf13/viper/internal/encoding/toml"
|
||||
"github.com/spf13/viper/internal/encoding/yaml"
|
||||
)
|
||||
|
||||
// Encoder encodes Viper's internal data structures into a byte representation.
|
||||
// It's primarily used for encoding a map[string]any into a file format.
|
||||
type Encoder interface {
|
||||
Encode(v map[string]any) ([]byte, error)
|
||||
}
|
||||
|
||||
// Decoder decodes the contents of a byte slice into Viper's internal data structures.
|
||||
// It's primarily used for decoding contents of a file into a map[string]any.
|
||||
type Decoder interface {
|
||||
Decode(b []byte, v map[string]any) error
|
||||
}
|
||||
|
||||
// Codec combines [Encoder] and [Decoder] interfaces.
|
||||
type Codec interface {
|
||||
Encoder
|
||||
Decoder
|
||||
}
|
||||
|
||||
// TODO: consider adding specific errors for not found scenarios
|
||||
|
||||
// EncoderRegistry returns an [Encoder] for a given format.
|
||||
//
|
||||
// Format is case-insensitive.
|
||||
//
|
||||
// [EncoderRegistry] returns an error if no [Encoder] is registered for the format.
|
||||
type EncoderRegistry interface {
|
||||
Encoder(format string) (Encoder, error)
|
||||
}
|
||||
|
||||
// DecoderRegistry returns an [Decoder] for a given format.
|
||||
//
|
||||
// Format is case-insensitive.
|
||||
//
|
||||
// [DecoderRegistry] returns an error if no [Decoder] is registered for the format.
|
||||
type DecoderRegistry interface {
|
||||
Decoder(format string) (Decoder, error)
|
||||
}
|
||||
|
||||
// [CodecRegistry] combines [EncoderRegistry] and [DecoderRegistry] interfaces.
|
||||
type CodecRegistry interface {
|
||||
EncoderRegistry
|
||||
DecoderRegistry
|
||||
}
|
||||
|
||||
// WithEncoderRegistry sets a custom [EncoderRegistry].
|
||||
func WithEncoderRegistry(r EncoderRegistry) Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.encoderRegistry = r
|
||||
})
|
||||
}
|
||||
|
||||
// WithDecoderRegistry sets a custom [DecoderRegistry].
|
||||
func WithDecoderRegistry(r DecoderRegistry) Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.decoderRegistry = r
|
||||
})
|
||||
}
|
||||
|
||||
// WithCodecRegistry sets a custom [EncoderRegistry] and [DecoderRegistry].
|
||||
func WithCodecRegistry(r CodecRegistry) Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.encoderRegistry = r
|
||||
v.decoderRegistry = r
|
||||
})
|
||||
}
|
||||
|
||||
// DefaultCodecRegistry is a simple implementation of [CodecRegistry] that allows registering custom [Codec]s.
|
||||
type DefaultCodecRegistry struct {
|
||||
codecs map[string]Codec
|
||||
|
||||
mu sync.RWMutex
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewCodecRegistry returns a new [CodecRegistry], ready to accept custom [Codec]s.
|
||||
func NewCodecRegistry() *DefaultCodecRegistry {
|
||||
r := &DefaultCodecRegistry{}
|
||||
|
||||
r.init()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *DefaultCodecRegistry) init() {
|
||||
r.once.Do(func() {
|
||||
r.codecs = map[string]Codec{}
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterCodec registers a custom [Codec].
|
||||
//
|
||||
// Format is case-insensitive.
|
||||
func (r *DefaultCodecRegistry) RegisterCodec(format string, codec Codec) error {
|
||||
r.init()
|
||||
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.codecs[strings.ToLower(format)] = codec
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encoder implements the [EncoderRegistry] interface.
|
||||
//
|
||||
// Format is case-insensitive.
|
||||
func (r *DefaultCodecRegistry) Encoder(format string) (Encoder, error) {
|
||||
encoder, ok := r.codec(format)
|
||||
if !ok {
|
||||
return nil, errors.New("encoder not found for this format")
|
||||
}
|
||||
|
||||
return encoder, nil
|
||||
}
|
||||
|
||||
// Decoder implements the [DecoderRegistry] interface.
|
||||
//
|
||||
// Format is case-insensitive.
|
||||
func (r *DefaultCodecRegistry) Decoder(format string) (Decoder, error) {
|
||||
decoder, ok := r.codec(format)
|
||||
if !ok {
|
||||
return nil, errors.New("decoder not found for this format")
|
||||
}
|
||||
|
||||
return decoder, nil
|
||||
}
|
||||
|
||||
func (r *DefaultCodecRegistry) codec(format string) (Codec, bool) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
format = strings.ToLower(format)
|
||||
|
||||
if r.codecs != nil {
|
||||
codec, ok := r.codecs[format]
|
||||
if ok {
|
||||
return codec, true
|
||||
}
|
||||
}
|
||||
|
||||
switch format {
|
||||
case "yaml", "yml":
|
||||
return yaml.Codec{}, true
|
||||
|
||||
case "json":
|
||||
return json.Codec{}, true
|
||||
|
||||
case "toml":
|
||||
return toml.Codec{}, true
|
||||
|
||||
case "dotenv", "env":
|
||||
return &dotenv.Codec{}, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
8
vendor/github.com/spf13/viper/experimental.go
generated
vendored
Normal file
8
vendor/github.com/spf13/viper/experimental.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package viper
|
||||
|
||||
// ExperimentalBindStruct tells Viper to use the new bind struct feature.
|
||||
func ExperimentalBindStruct() Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
v.experimentalBindStruct = true
|
||||
})
|
||||
}
|
||||
104
vendor/github.com/spf13/viper/file.go
generated
vendored
Normal file
104
vendor/github.com/spf13/viper/file.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
package viper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sagikazarmark/locafero"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// ExperimentalFinder tells Viper to use the new Finder interface for finding configuration files.
|
||||
func ExperimentalFinder() Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
v.experimentalFinder = true
|
||||
})
|
||||
}
|
||||
|
||||
// Search for a config file.
|
||||
func (v *Viper) findConfigFile() (string, error) {
|
||||
finder := v.finder
|
||||
|
||||
if finder == nil && v.experimentalFinder {
|
||||
var names []string
|
||||
|
||||
if v.configType != "" {
|
||||
names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...)
|
||||
} else {
|
||||
names = locafero.NameWithExtensions(v.configName, SupportedExts...)
|
||||
}
|
||||
|
||||
finder = locafero.Finder{
|
||||
Paths: v.configPaths,
|
||||
Names: names,
|
||||
Type: locafero.FileTypeFile,
|
||||
}
|
||||
}
|
||||
|
||||
if finder != nil {
|
||||
return v.findConfigFileWithFinder(finder)
|
||||
}
|
||||
|
||||
return v.findConfigFileOld()
|
||||
}
|
||||
|
||||
func (v *Viper) findConfigFileWithFinder(finder Finder) (string, error) {
|
||||
results, err := finder.Find(v.fs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
|
||||
}
|
||||
|
||||
// We call clean on the final result to ensure that the path is in its canonical form.
|
||||
// This is mostly for consistent path handling and to make sure tests pass.
|
||||
return results[0], nil
|
||||
}
|
||||
|
||||
// Search all configPaths for any config file.
|
||||
// Returns the first path that exists (and is a config file).
|
||||
func (v *Viper) findConfigFileOld() (string, error) {
|
||||
v.logger.Info("searching for config in paths", "paths", v.configPaths)
|
||||
|
||||
for _, cp := range v.configPaths {
|
||||
file := v.searchInPath(cp)
|
||||
if file != "" {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
|
||||
}
|
||||
|
||||
func (v *Viper) searchInPath(in string) (filename string) {
|
||||
v.logger.Debug("searching for config in path", "path", in)
|
||||
for _, ext := range SupportedExts {
|
||||
v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext))
|
||||
if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
|
||||
v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext))
|
||||
return filepath.Join(in, v.configName+"."+ext)
|
||||
}
|
||||
}
|
||||
|
||||
if v.configType != "" {
|
||||
if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
|
||||
return filepath.Join(in, v.configName)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// exists checks if file exists.
|
||||
func exists(fs afero.Fs, path string) (bool, error) {
|
||||
stat, err := fs.Stat(path)
|
||||
if err == nil {
|
||||
return !stat.IsDir(), nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
55
vendor/github.com/spf13/viper/finder.go
generated
vendored
Normal file
55
vendor/github.com/spf13/viper/finder.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package viper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// WithFinder sets a custom [Finder].
|
||||
func WithFinder(f Finder) Option {
|
||||
return optionFunc(func(v *Viper) {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.finder = f
|
||||
})
|
||||
}
|
||||
|
||||
// Finder looks for files and directories in an [afero.Fs] filesystem.
|
||||
type Finder interface {
|
||||
Find(fsys afero.Fs) ([]string, error)
|
||||
}
|
||||
|
||||
// Finders combines multiple finders into one.
|
||||
func Finders(finders ...Finder) Finder {
|
||||
return &combinedFinder{finders: finders}
|
||||
}
|
||||
|
||||
// combinedFinder is a Finder that combines multiple finders.
|
||||
type combinedFinder struct {
|
||||
finders []Finder
|
||||
}
|
||||
|
||||
// Find implements the [Finder] interface.
|
||||
func (c *combinedFinder) Find(fsys afero.Fs) ([]string, error) {
|
||||
var results []string
|
||||
var errs []error
|
||||
|
||||
for _, finder := range c.finders {
|
||||
if finder == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
r, err := finder.Find(fsys)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, r...)
|
||||
}
|
||||
|
||||
return results, errors.Join(errs...)
|
||||
}
|
||||
57
vendor/github.com/spf13/viper/flags.go
generated
vendored
Normal file
57
vendor/github.com/spf13/viper/flags.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package viper
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
// FlagValueSet is an interface that users can implement
|
||||
// to bind a set of flags to viper.
|
||||
type FlagValueSet interface {
|
||||
VisitAll(fn func(FlagValue))
|
||||
}
|
||||
|
||||
// FlagValue is an interface that users can implement
|
||||
// to bind different flags to viper.
|
||||
type FlagValue interface {
|
||||
HasChanged() bool
|
||||
Name() string
|
||||
ValueString() string
|
||||
ValueType() string
|
||||
}
|
||||
|
||||
// pflagValueSet is a wrapper around *pflag.ValueSet
|
||||
// that implements FlagValueSet.
|
||||
type pflagValueSet struct {
|
||||
flags *pflag.FlagSet
|
||||
}
|
||||
|
||||
// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
|
||||
func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
|
||||
p.flags.VisitAll(func(flag *pflag.Flag) {
|
||||
fn(pflagValue{flag})
|
||||
})
|
||||
}
|
||||
|
||||
// pflagValue is a wrapper around *pflag.flag
|
||||
// that implements FlagValue.
|
||||
type pflagValue struct {
|
||||
flag *pflag.Flag
|
||||
}
|
||||
|
||||
// HasChanged returns whether the flag has changes or not.
|
||||
func (p pflagValue) HasChanged() bool {
|
||||
return p.flag.Changed
|
||||
}
|
||||
|
||||
// Name returns the name of the flag.
|
||||
func (p pflagValue) Name() string {
|
||||
return p.flag.Name
|
||||
}
|
||||
|
||||
// ValueString returns the value of the flag as a string.
|
||||
func (p pflagValue) ValueString() string {
|
||||
return p.flag.Value.String()
|
||||
}
|
||||
|
||||
// ValueType returns the type of the flag as a string.
|
||||
func (p pflagValue) ValueType() string {
|
||||
return p.flag.Value.Type()
|
||||
}
|
||||
255
vendor/github.com/spf13/viper/flake.lock
generated
vendored
Normal file
255
vendor/github.com/spf13/viper/flake.lock
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
{
|
||||
"nodes": {
|
||||
"cachix": {
|
||||
"inputs": {
|
||||
"devenv": [
|
||||
"devenv"
|
||||
],
|
||||
"flake-compat": [
|
||||
"devenv"
|
||||
],
|
||||
"git-hooks": [
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748883665,
|
||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "latest",
|
||||
"repo": "cachix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"cachix": "cachix",
|
||||
"flake-compat": "flake-compat",
|
||||
"git-hooks": "git-hooks",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1755257397,
|
||||
"narHash": "sha256-VU+OHexL2y6y7yrpEc6bZvYYwoQg6aZK1b4YxT0yZCk=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "6f9c3d4722aa253631644329f7bda60b1d3d1b97",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"devenv",
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733312601,
|
||||
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts_2": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754487366,
|
||||
"narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1750779888,
|
||||
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"git-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-parts": "flake-parts",
|
||||
"git-hooks-nix": [
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-23-11": [
|
||||
"devenv"
|
||||
],
|
||||
"nixpkgs-regression": [
|
||||
"devenv"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1755029779,
|
||||
"narHash": "sha256-3+GHIYGg4U9XKUN4rg473frIVNn8YD06bjwxKS1IPrU=",
|
||||
"owner": "cachix",
|
||||
"repo": "nix",
|
||||
"rev": "b0972b0eee6726081d10b1199f54de6d2917f861",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "devenv-2.30",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1750441195,
|
||||
"narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"rev": "0ceffe312871b443929ff3006960d29b120dc627",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"ref": "rolling",
|
||||
"repo": "devenv-nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1753579242,
|
||||
"narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1755268003,
|
||||
"narHash": "sha256-nNaeJjo861wFR0tjHDyCnHs1rbRtrMgxAKMoig9Sj/w=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "32f313e49e42f715491e1ea7b306a87c16fe0388",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
61
vendor/github.com/spf13/viper/flake.nix
generated
vendored
Normal file
61
vendor/github.com/spf13/viper/flake.nix
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
description = "Viper";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
devenv.url = "github:cachix/devenv";
|
||||
};
|
||||
|
||||
outputs =
|
||||
inputs@{ flake-parts, ... }:
|
||||
flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
imports = [
|
||||
inputs.devenv.flakeModule
|
||||
];
|
||||
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
perSystem =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
devenv.shells = {
|
||||
default = {
|
||||
languages = {
|
||||
go.enable = true;
|
||||
};
|
||||
|
||||
git-hooks.hooks = {
|
||||
nixpkgs-fmt.enable = true;
|
||||
yamllint.enable = true;
|
||||
};
|
||||
|
||||
packages = with pkgs; [
|
||||
gnumake
|
||||
|
||||
golangci-lint
|
||||
yamllint
|
||||
];
|
||||
|
||||
scripts = {
|
||||
versions.exec = ''
|
||||
go version
|
||||
golangci-lint version
|
||||
'';
|
||||
};
|
||||
|
||||
enterShell = ''
|
||||
versions
|
||||
'';
|
||||
|
||||
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
|
||||
containers = pkgs.lib.mkForce { };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
61
vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go
generated
vendored
Normal file
61
vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/subosito/gotenv"
|
||||
)
|
||||
|
||||
const keyDelimiter = "_"
|
||||
|
||||
// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables
|
||||
// (commonly called as dotenv format).
|
||||
type Codec struct{}
|
||||
|
||||
func (Codec) Encode(v map[string]any) ([]byte, error) {
|
||||
flattened := map[string]any{}
|
||||
|
||||
flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter)
|
||||
|
||||
keys := make([]string, 0, len(flattened))
|
||||
|
||||
for key := range flattened {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, key := range keys {
|
||||
_, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (Codec) Decode(b []byte, v map[string]any) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
_, err := buf.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
env, err := gotenv.StrictParse(&buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range env {
|
||||
v[key] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
41
vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go
generated
vendored
Normal file
41
vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
// flattenAndMergeMap recursively flattens the given map into a new map
|
||||
// Code is based on the function with the same name in the main package.
|
||||
// TODO: move it to a common place.
|
||||
func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any {
|
||||
if shadow != nil && prefix != "" && shadow[prefix] != nil {
|
||||
// prefix is shadowed => nothing more to flatten
|
||||
return shadow
|
||||
}
|
||||
if shadow == nil {
|
||||
shadow = make(map[string]any)
|
||||
}
|
||||
|
||||
var m2 map[string]any
|
||||
if prefix != "" {
|
||||
prefix += delimiter
|
||||
}
|
||||
for k, val := range m {
|
||||
fullKey := prefix + k
|
||||
switch val := val.(type) {
|
||||
case map[string]any:
|
||||
m2 = val
|
||||
case map[any]any:
|
||||
m2 = cast.ToStringMap(val)
|
||||
default:
|
||||
// immediate value
|
||||
shadow[strings.ToLower(fullKey)] = val
|
||||
continue
|
||||
}
|
||||
// recursively merge to shadow map
|
||||
shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter)
|
||||
}
|
||||
return shadow
|
||||
}
|
||||
17
vendor/github.com/spf13/viper/internal/encoding/json/codec.go
generated
vendored
Normal file
17
vendor/github.com/spf13/viper/internal/encoding/json/codec.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding.
|
||||
type Codec struct{}
|
||||
|
||||
func (Codec) Encode(v map[string]any) ([]byte, error) {
|
||||
// TODO: expose prefix and indent in the Codec as setting?
|
||||
return json.MarshalIndent(v, "", " ")
|
||||
}
|
||||
|
||||
func (Codec) Decode(b []byte, v map[string]any) error {
|
||||
return json.Unmarshal(b, &v)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user