Added initial support for Live DBHub.io databases

This commit is contained in:
Justin Clift
2023-03-26 18:36:03 +11:00
parent d370c48723
commit c9d72b144f
813 changed files with 214 additions and 324174 deletions

108
dbhub.go
View File

@@ -14,15 +14,16 @@ import (
)
const (
version = "0.0.2"
version = "0.1.0"
)
// New creates a new DBHub.io connection object. It doesn't connect to DBHub.io to do this. Connection only occurs
// when subsequent functions (eg Query()) are called.
func New(key string) (Connection, error) {
c := Connection{
APIKey: key,
Server: "https://api.dbhub.io",
APIKey: key,
Server: "https://api.dbhub.io",
VerifyServerCert: true,
}
return c, nil
}
@@ -37,6 +38,11 @@ func (c *Connection) ChangeServer(s string) {
c.Server = s
}
// ChangeVerifyServerCert changes whether to verify the server provided https certificate. Useful for testing and development.
func (c *Connection) ChangeVerifyServerCert(b bool) {
c.VerifyServerCert = b
}
// Branches returns a list of all available branches of a database along with the name of the default branch
func (c Connection) Branches(dbOwner, dbName string) (branches map[string]com.BranchEntry, defaultBranch string, err error) {
// Prepare the API parameters
@@ -45,7 +51,7 @@ func (c Connection) Branches(dbOwner, dbName string) (branches map[string]com.Br
// Fetch the list of branches and the default branch
var response com.BranchListResponseContainer
queryUrl := c.Server + "/v1/branches"
err = sendRequestJSON(queryUrl, data, &response)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &response)
// Extract information for return values
branches = response.Branches
@@ -61,7 +67,7 @@ func (c Connection) Columns(dbOwner, dbName string, ident Identifier, table stri
// Fetch the list of columns
queryUrl := c.Server + "/v1/columns"
err = sendRequestJSON(queryUrl, data, &columns)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &columns)
return
}
@@ -72,11 +78,11 @@ func (c Connection) Commits(dbOwner, dbName string) (commits map[string]com.Comm
// Fetch the commits
queryUrl := c.Server + "/v1/commits"
err = sendRequestJSON(queryUrl, data, &commits)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &commits)
return
}
// Databases returns the list of databases in your account
// Databases returns the list of standard databases in your account
func (c Connection) Databases() (databases []string, err error) {
// Prepare the API parameters
data := url.Values{}
@@ -84,7 +90,20 @@ func (c Connection) Databases() (databases []string, err error) {
// Fetch the list of databases
queryUrl := c.Server + "/v1/databases"
err = sendRequestJSON(queryUrl, data, &databases)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &databases)
return
}
// DatabasesLive returns the list of Live databases in your account
func (c Connection) DatabasesLive() (databases []string, err error) {
// Prepare the API parameters
data := url.Values{}
data.Set("apikey", c.APIKey)
data.Set("live", "true")
// Fetch the list of databases
queryUrl := c.Server + "/v1/databases"
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &databases)
return
}
@@ -95,7 +114,7 @@ func (c Connection) Delete(dbName string) (err error) {
// Delete the database
queryUrl := c.Server + "/v1/delete"
err = sendRequestJSON(queryUrl, data, nil)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, nil)
if err != nil && err.Error() == "no rows in result set" { // Feels like a dodgy workaround
err = fmt.Errorf("Unknown database\n")
}
@@ -146,7 +165,7 @@ func (c Connection) Diff(dbOwnerA, dbNameA string, identA Identifier, dbOwnerB,
// Fetch the diffs
queryUrl := c.Server + "/v1/diff"
err = sendRequestJSON(queryUrl, data, &diffs)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &diffs)
return
}
@@ -157,13 +176,30 @@ func (c Connection) Download(dbOwner, dbName string, ident Identifier) (db io.Re
// Fetch the database file
queryUrl := c.Server + "/v1/download"
db, err = sendRequest(queryUrl, data)
db, err = sendRequest(queryUrl, c.VerifyServerCert, data)
if err != nil {
return
}
return
}
// Execute executes a SQL statement (INSERT, UPDATE, DELETE) on the chosen database.
func (c Connection) Execute(dbOwner, dbName string, sql string) (rowsChanged int, err error) {
// Prepare the API parameters
data := c.PrepareVals(dbOwner, dbName, Identifier{})
data.Set("sql", base64.StdEncoding.EncodeToString([]byte(sql)))
// Run the query on the remote database
var execResponse com.ExecuteResponseContainer
queryUrl := c.Server + "/v1/execute"
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &execResponse)
if err != nil {
return
}
rowsChanged = execResponse.RowsChanged
return
}
// Indexes returns the list of indexes present in the database, along with the table they belong to
func (c Connection) Indexes(dbOwner, dbName string, ident Identifier) (idx []com.APIJSONIndex, err error) {
// Prepare the API parameters
@@ -171,7 +207,7 @@ func (c Connection) Indexes(dbOwner, dbName string, ident Identifier) (idx []com
// Fetch the list of indexes
queryUrl := c.Server + "/v1/indexes"
err = sendRequestJSON(queryUrl, data, &idx)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &idx)
return
}
@@ -182,11 +218,11 @@ func (c Connection) Metadata(dbOwner, dbName string) (meta com.MetadataResponseC
// Fetch the list of databases
queryUrl := c.Server + "/v1/metadata"
err = sendRequestJSON(queryUrl, data, &meta)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &meta)
return
}
// PrepareVals creates a url.Values container holding the API key, database owner, name, and database identifier. The
// PrepareVals creates an url.Values container holding the API key, database owner, name, and database identifier. The
// url.Values container is then used for the requests to DBHub.io.
func (c Connection) PrepareVals(dbOwner, dbName string, ident Identifier) (data url.Values) {
// Prepare the API parameters
@@ -226,7 +262,7 @@ func (c Connection) Query(dbOwner, dbName string, ident Identifier, blobBase64 b
// Run the query on the remote database
var returnedData []com.DataRow
queryUrl := c.Server + "/v1/query"
err = sendRequestJSON(queryUrl, data, &returnedData)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &returnedData)
if err != nil {
return
}
@@ -272,7 +308,7 @@ func (c Connection) Releases(dbOwner, dbName string) (releases map[string]com.Re
// Fetch the releases
queryUrl := c.Server + "/v1/releases"
err = sendRequestJSON(queryUrl, data, &releases)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &releases)
return
}
@@ -283,7 +319,7 @@ func (c Connection) Tables(dbOwner, dbName string, ident Identifier) (tbl []stri
// Fetch the list of tables
queryUrl := c.Server + "/v1/tables"
err = sendRequestJSON(queryUrl, data, &tbl)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &tbl)
return
}
@@ -294,7 +330,7 @@ func (c Connection) Tags(dbOwner, dbName string) (tags map[string]com.TagEntry,
// Fetch the tags
queryUrl := c.Server + "/v1/tags"
err = sendRequestJSON(queryUrl, data, &tags)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &tags)
return
}
@@ -305,11 +341,11 @@ func (c Connection) Views(dbOwner, dbName string, ident Identifier) (views []str
// Fetch the list of views
queryUrl := c.Server + "/v1/views"
err = sendRequestJSON(queryUrl, data, &views)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &views)
return
}
// Upload uploads a new database, or a new revision of a database
// Upload uploads a new standard database, or a new revision of a database
func (c Connection) Upload(dbName string, info UploadInformation, dbBytes *[]byte) (err error) {
// Prepare the API parameters
data := c.PrepareVals("", dbName, info.Ident)
@@ -357,7 +393,35 @@ func (c Connection) Upload(dbName string, info UploadInformation, dbBytes *[]byt
// Upload the database
var body io.ReadCloser
queryUrl := c.Server + "/v1/upload"
body, err = sendUpload(queryUrl, &data, dbBytes)
body, err = sendUpload(queryUrl, c.VerifyServerCert, &data, dbBytes)
if body != nil {
defer body.Close()
}
if err != nil {
if body != nil {
// If there's useful error info in the returned JSON, return that as the error message
var z JSONError
err = json.NewDecoder(body).Decode(&z)
if err != nil {
return
}
err = fmt.Errorf("%s", z.Msg)
}
}
return
}
// UploadLive uploads a new Live database
func (c Connection) UploadLive(dbName string, dbBytes *[]byte) (err error) {
// Prepare the API parameters
data := c.PrepareVals("", dbName, Identifier{})
data.Del("dbowner") // The upload function always stores the database in the account of the API key user
data.Set("live", "true")
// Upload the database
var body io.ReadCloser
queryUrl := c.Server + "/v1/upload"
body, err = sendUpload(queryUrl, c.VerifyServerCert, &data, dbBytes)
if body != nil {
defer body.Close()
}
@@ -382,6 +446,6 @@ func (c Connection) Webpage(dbOwner, dbName string) (webPage com.WebpageResponse
// Fetch the releases
queryUrl := c.Server + "/v1/webpage"
err = sendRequestJSON(queryUrl, data, &webPage)
err = sendRequestJSON(queryUrl, c.VerifyServerCert, data, &webPage)
return
}

43
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/sqlitebrowser/go-dbhub
go 1.14
go 1.17
replace (
github.com/Sirupsen/logrus v1.0.5 => github.com/sirupsen/logrus v1.0.5
@@ -10,4 +10,43 @@ replace (
github.com/Sirupsen/logrus v1.6.0 => github.com/sirupsen/logrus v1.6.0
)
require github.com/sqlitebrowser/dbhub.io v0.0.14
require github.com/sqlitebrowser/dbhub.io v0.1.1
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/aquilax/truncate v1.0.0 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b // indirect
github.com/go-ini/ini v1.56.0 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/css v1.0.0 // indirect
github.com/gwenn/gosqlite v0.0.0-20200521090053-24878be1a237 // indirect
github.com/gwenn/yacr v0.0.0-20200112083327-bbe82c1f4d60 // indirect
github.com/jackc/pgx v2.11.0+incompatible // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/microcosm-cc/bluemonday v1.0.16 // indirect
github.com/minio/minio-go v6.0.14+incompatible // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/rabbitmq/amqp091-go v1.7.0 // indirect
github.com/segmentio/ksuid v1.0.3 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 // indirect
github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480 // indirect
github.com/shurcooL/highlight_go v0.0.0-20191220051317-782971ddf21b // indirect
github.com/shurcooL/octicon v0.0.0-20191102190552-cbb32d6a785c // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/smartystreets/assertions v1.13.0 // indirect
github.com/smtp2go-oss/smtp2go-go v1.0.2 // indirect
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d // indirect
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e // indirect
github.com/sqlitebrowser/blackfriday v9.0.0+incompatible // indirect
github.com/sqlitebrowser/github_flavored_markdown v0.0.0-20190120045821-b8cf8f054e47 // indirect
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.8.0 // indirect
)

157
go.sum
View File

@@ -1,14 +1,13 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Freeaqingme/dkim v0.0.0-20160606200812-57418d2fbdcd h1:OXAdgeC8/sNLxo6w6GjeW/yTU0iLrcv6BZXzbsgHZLY=
github.com/Freeaqingme/dkim v0.0.0-20160606200812-57418d2fbdcd/go.mod h1:2jaO1ZDxGuwzrfRhIe1QKvws/FY3SJ9f1EMNLQLTa0g=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/aquilax/truncate v1.0.0 h1:UgIGS8U/aZ4JyOJ2h3xcF5cSQ06+gGBnjxH2RUHJe0U=
github.com/aquilax/truncate v1.0.0/go.mod h1:BeMESIDMlvlS3bmg4BVvBbbZUNwWtS8uzYPAKXwwhLw=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414f458e1/go.mod h1:dkChI7Tbtx7H1Tj7TqGSZMOeGpMP5gLHtjroHd4agiI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -23,48 +22,20 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+
github.com/go-playground/validator/v10 v10.3.0 h1:nZU+7q+yJoFmwvNgv/LnPUkwPal62+b2xXj0AU1Es7o=
github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
github.com/gwenn/gosqlite v0.0.0-20200521090053-24878be1a237 h1:RY/HYWiI6K/HL8ePZYpgvMGf4wX3/FgGxgE8kPh8vDM=
github.com/gwenn/gosqlite v0.0.0-20200521090053-24878be1a237/go.mod h1:WBYs9HfQGOYDCz7rFwMk7aHkbTTB0cUkQe3pZQARvIg=
github.com/gwenn/yacr v0.0.0-20200110180258-a66d8c42d0ff/go.mod h1:5SNcBGxZ5OaJAMJCSI/x3V7SGsvXqbwnwP/sHZLgYsw=
github.com/gwenn/yacr v0.0.0-20200112083327-bbe82c1f4d60 h1:JX4Yy6S9U/f3Jix82M58NLIAFeW/UjBFVrnYn5GS4X8=
github.com/gwenn/yacr v0.0.0-20200112083327-bbe82c1f4d60/go.mod h1:Ps/gikIXcn2rRmeP0HQ9EvUYJrfrjAi51Wg8acsrkP0=
github.com/hectane/go-attest v0.1.2 h1:HzAy7PhILWGy3Vr6WsTFG4k+8csZ7rYsgB9zO3HD+RE=
github.com/hectane/go-attest v0.1.2/go.mod h1:gxESRl/EHIwBfuD9xidptRiaabPkeyoDYGkIkQoOlm4=
github.com/hectane/go-nonblockingchan v0.1.0 h1:w5dFzLYim23KoK64xqfA0iSMNMA8ruLXvGkyXlZBDFY=
github.com/hectane/go-nonblockingchan v0.1.0/go.mod h1:Ztuu6NIB+3zEHbsCEXcynf5a4B49/PofiBiQUGDGbRw=
github.com/hectane/hectane v0.3.2 h1:UyJMWFHtKNiK40cJvfzi/a3PuUTF0nf6kFRULt/joYc=
github.com/hectane/hectane v0.3.2/go.mod h1:enmgBsfbzuxwQ9OLZeWYMWJt27wcegpNZORlhfw77p0=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgx v2.11.0+incompatible h1:IgFLUrzrhJj8mxbK44ZYExGVnjtfV4+TOkerb/XERV8=
github.com/jackc/pgx v2.11.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -74,35 +45,27 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/memcachier/mc v2.0.1+incompatible/go.mod h1:7bkvFE61leUBvXz+yxsOnGBQSZpBSPIMUQSmmSHvuXc=
github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/microcosm-cc/bluemonday v1.0.16 h1:kHmAq2t7WPWLjiGvzKa5o3HzSfahUKiOq7fAPUiMNIc=
github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=
github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/rabbitmq/amqp091-go v1.7.0 h1:V5CF5qPem5OGSnEo8BoSbsDGwejg6VUJsKEdneaoTUo=
github.com/rabbitmq/amqp091-go v1.7.0/go.mod h1:wfClAtY0C7bOHxd3GjmF26jEHn+rR/0B3+YV+Vn9/NI=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/segmentio/ksuid v1.0.3 h1:FoResxvleQwYiPAVKe1tMUlEirodZqlqglIuFsdDntY=
github.com/segmentio/ksuid v1.0.3/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470 h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20190704215121-7189cc372560 h1:SpaoQDTgpo2YZkvmr2mtgloFFfPTjtLMlZkQtNAPQik=
github.com/shurcooL/go v0.0.0-20190704215121-7189cc372560/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480 h1:KaKXZldeYH73dpQL+Nr38j1r5BgpAYQjYvENOUpIZDQ=
github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20191220051317-782971ddf21b h1:rBIwpb5ggtqf0uZZY5BPs1sL7njUMM7I8qD2jiou70E=
@@ -111,72 +74,74 @@ github.com/shurcooL/octicon v0.0.0-20191102190552-cbb32d6a785c h1:p3w+lTqXulfa3a
github.com/shurcooL/octicon v0.0.0-20191102190552-cbb32d6a785c/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs=
github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smtp2go-oss/smtp2go-go v1.0.2 h1:vXkqx9kyoQIuetyV3nm40b+OZevihhgb78X4vA/u2fs=
github.com/smtp2go-oss/smtp2go-go v1.0.2/go.mod h1:lkv36awQXRBWAvnd517FFESKvne8465KCu90lPThcEY=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/sqlitebrowser/blackfriday v9.0.0+incompatible h1:ddH/UyzasooYgGIblVU4R8DdmBuJ7QXLvSqX/0chZv4=
github.com/sqlitebrowser/blackfriday v9.0.0+incompatible/go.mod h1:/zga9sqpWzcewuI83AO5JZwe9+6F9GgPDdqqdNNEL/0=
github.com/sqlitebrowser/dbhub.io v0.0.14 h1:M//NHuEpEiTj5itk95UL/CzhskkIG32BnT9Aob9l/2M=
github.com/sqlitebrowser/dbhub.io v0.0.14/go.mod h1:GgZi8wkjdRGkkUVgYNuZp5i+Yps3jGt/jjRuLKRi6Po=
github.com/sqlitebrowser/dbhub.io v0.1.1 h1:Lnm29SWWAc4pcE+d5tA58QTY9/2pnH546FR+aYT5gio=
github.com/sqlitebrowser/dbhub.io v0.1.1/go.mod h1:YD/BP8fgop8s3yRSLA7kjx4N+pWYU6Z353On+ZGrm58=
github.com/sqlitebrowser/github_flavored_markdown v0.0.0-20190120045821-b8cf8f054e47 h1:s0+Ea95n1LrsKh6rtclU/9Qb2/5ofvnfnR7gDDiFTw8=
github.com/sqlitebrowser/github_flavored_markdown v0.0.0-20190120045821-b8cf8f054e47/go.mod h1:8vPIKi5FslxCXEgfQxrFtWfdclGy6VWAc9NA1ZTYCJg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20200109203555-b30bc20e4fd1 h1:iiHuQZCNgYPmFQxd3BBN/Nc5+dAwzZuq5y40s20oQw0=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20200109203555-b30bc20e4fd1/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag=
gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

29
http.go
View File

@@ -2,6 +2,7 @@ package dbhub
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
@@ -12,10 +13,10 @@ import (
)
// sendRequestJSON sends a request to DBHub.io, formatting the returned result as JSON
func sendRequestJSON(queryUrl string, data url.Values, returnStructure interface{}) (err error) {
func sendRequestJSON(queryUrl string, verifyServerCert bool, data url.Values, returnStructure interface{}) (err error) {
// Send the request
var body io.ReadCloser
body, err = sendRequest(queryUrl, data)
body, err = sendRequest(queryUrl, verifyServerCert, data)
if body != nil {
defer body.Close()
}
@@ -44,10 +45,18 @@ func sendRequestJSON(queryUrl string, data url.Values, returnStructure interface
// sendRequest sends a request to DBHub.io. It exists because http.PostForm() doesn't seem to have a way of changing
// header values.
func sendRequest(queryUrl string, data url.Values) (body io.ReadCloser, err error) {
func sendRequest(queryUrl string, verifyServerCert bool, data url.Values) (body io.ReadCloser, err error) {
// Disable verification of the server https cert, if we've been told to
var client http.Client
if !verifyServerCert {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client = http.Client{Transport: tr}
}
var req *http.Request
var resp *http.Response
var client http.Client
req, err = http.NewRequest(http.MethodPost, queryUrl, strings.NewReader(data.Encode()))
if err != nil {
return
@@ -73,7 +82,7 @@ func sendRequest(queryUrl string, data url.Values) (body io.ReadCloser, err erro
}
// sendUpload uploads a database to DBHub.io. It exists because the DBHub.io upload end point requires multi-part data
func sendUpload(queryUrl string, data *url.Values, dbBytes *[]byte) (body io.ReadCloser, err error) {
func sendUpload(queryUrl string, verifyServerCert bool, data *url.Values, dbBytes *[]byte) (body io.ReadCloser, err error) {
// Prepare the database file byte stream
var buf bytes.Buffer
w := multipart.NewWriter(&buf)
@@ -108,10 +117,18 @@ func sendUpload(queryUrl string, data *url.Values, dbBytes *[]byte) (body io.Rea
return
}
// Disable verification of the server https cert, if we've been told to
var client http.Client
if !verifyServerCert {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client = http.Client{Transport: tr}
}
// Prepare the request
var req *http.Request
var resp *http.Response
var client http.Client
req, err = http.NewRequest(http.MethodPost, queryUrl, &buf)
if err != nil {
return

View File

@@ -4,8 +4,9 @@ import "time"
// Connection is a simple container holding the API key and address of the DBHub.io server
type Connection struct {
APIKey string `json:"api_key"`
Server string `json:"server"`
APIKey string `json:"api_key"`
Server string `json:"server"`
VerifyServerCert bool `json:"verify_certificate"`
}
// Identifier holds information used to identify a specific commit, tag, release, or the head of a specific branch

View File

@@ -1,5 +0,0 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

View File

@@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View File

@@ -1,3 +0,0 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View File

@@ -1,218 +0,0 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

View File

@@ -1,509 +0,0 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

View File

@@ -1,121 +0,0 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

View File

@@ -1,27 +0,0 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

View File

@@ -1,568 +0,0 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View File

@@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

View File

@@ -1,953 +0,0 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

View File

@@ -1,592 +0,0 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View File

@@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View File

@@ -1,91 +0,0 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View File

@@ -1,242 +0,0 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

View File

@@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@@ -1,3 +0,0 @@
guard 'shell' do
watch(/(.*).go/) {|m| `go test` }
end

View File

@@ -1,3 +0,0 @@
DKIM signing for Go
You can find the package docs [here](http://godoc.org/github.com/eaigner/dkim)

View File

@@ -1,131 +0,0 @@
package dkim
import (
"crypto"
_ "crypto/sha256"
"fmt"
"strconv"
"strings"
"time"
)
var (
minRequired = []string{
VersionKey,
AlgorithmKey,
DomainKey,
SelectorKey,
CanonicalizationKey,
QueryMethodKey,
TimestampKey,
}
keyOrder = []string{
VersionKey,
AlgorithmKey,
CanonicalizationKey,
DomainKey,
QueryMethodKey,
SelectorKey,
TimestampKey,
BodyHashKey,
FieldsKey,
CopiedFieldsKey,
AUIDKey,
BodyLengthKey,
SignatureDataKey,
}
)
type Conf map[string]string
const (
VersionKey = "v"
AlgorithmKey = "a"
DomainKey = "d"
SelectorKey = "s"
CanonicalizationKey = "c"
QueryMethodKey = "q"
BodyLengthKey = "l"
TimestampKey = "t"
ExpireKey = "x"
FieldsKey = "h"
BodyHashKey = "bh"
SignatureDataKey = "b"
AUIDKey = "i"
CopiedFieldsKey = "z"
)
const (
AlgorithmSHA256 = "rsa-sha256"
DefaultVersion = "1"
DefaultCanonicalization = "relaxed/simple"
DefaultQueryMethod = "dns/txt"
)
func NewConf(domain string, selector string) (Conf, error) {
if domain == "" {
return nil, fmt.Errorf("domain invalid")
}
if selector == "" {
return nil, fmt.Errorf("selector invalid")
}
return Conf{
VersionKey: DefaultVersion,
AlgorithmKey: AlgorithmSHA256,
DomainKey: domain,
SelectorKey: selector,
CanonicalizationKey: DefaultCanonicalization,
QueryMethodKey: DefaultQueryMethod,
TimestampKey: strconv.FormatInt(time.Now().Unix(), 10),
FieldsKey: Empty,
BodyHashKey: Empty,
SignatureDataKey: Empty,
}, nil
}
func (c Conf) Validate() error {
for _, key := range minRequired {
if _, ok := c[key]; !ok {
return fmt.Errorf("key '%s' missing", key)
}
}
return nil
}
func (c Conf) Algorithm() string {
if algorithm := c[AlgorithmKey]; algorithm != Empty {
return algorithm
}
return AlgorithmSHA256
}
func (c Conf) Hash() crypto.Hash {
if c.Algorithm() == AlgorithmSHA256 {
return crypto.SHA256
}
panic("algorithm not implemented")
}
func (c Conf) RelaxedHeader() bool {
if strings.HasPrefix(strings.ToLower(c[CanonicalizationKey]), "relaxed") {
return true
}
return false
}
func (c Conf) RelaxedBody() bool {
if strings.HasSuffix(strings.ToLower(c[CanonicalizationKey]), "/relaxed") {
return true
}
return false
}
func (c Conf) String() string {
pairs := make([]string, 0, len(keyOrder))
for _, key := range keyOrder {
if value, ok := c[key]; ok {
pairs = append(pairs, fmt.Sprintf("%s=%s", key, value))
}
}
return strings.Join(pairs, "; ")
}

View File

@@ -1,211 +0,0 @@
package dkim
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"net/mail"
"regexp"
"strings"
)
const (
CRLF = "\r\n"
DOUBLE_CRLF = "\r\n\r\n"
Empty = ""
WSP = " "
)
var (
CRLF_AS_BYTE = []byte{'\r', '\n'}
WSP_AS_BYTE = []byte{' '}
)
var (
headerRelaxRx = regexp.MustCompile(`\s+`)
singleWspRx = regexp.MustCompile(`[ \t]+`)
)
const (
SignatureHeaderKey = "DKIM-Signature"
)
var StdSignableHeaders = []string{
"Cc",
"Content-Type",
"Date",
"From",
"Reply-To",
"Subject",
"To",
SignatureHeaderKey,
}
type DKIM struct {
signableHeaders []string
conf Conf
privateKey *rsa.PrivateKey
msgBody []byte
}
func New(conf Conf, keyPEM []byte) (d *DKIM, err error) {
err = conf.Validate()
if err != nil {
return
}
if len(keyPEM) == 0 {
return nil, errors.New("invalid key PEM data")
}
der, _ := pem.Decode(keyPEM)
key, err := x509.ParsePKCS1PrivateKey(der.Bytes)
if err != nil {
return nil, err
}
return NewByKey(conf, key), nil
}
func NewByKey(conf Conf, key *rsa.PrivateKey) *DKIM {
dkim := &DKIM{
signableHeaders: StdSignableHeaders,
conf: conf,
}
dkim.privateKey = key
return dkim
}
var (
rxWsCompress = regexp.MustCompile(`[ \t]+`)
rxWsCRLF = regexp.MustCompile(` \r\n`)
)
func (d *DKIM) canonicalBody(msg *mail.Message) []byte {
body := d.msgBody
if d.conf.RelaxedBody() {
if len(d.msgBody) == 0 {
return nil
}
// Reduce WSP sequences to single WSP
body = singleWspRx.ReplaceAll(body, WSP_AS_BYTE)
body = rxWsCompress.ReplaceAll(body, []byte(" "))
// Ignore all whitespace at end of lines.
// Implementations MUST NOT remove the CRLF
// at the end of the line
body = rxWsCRLF.ReplaceAll(body, []byte("\r\n"))
} else {
if len(body) == 0 {
return CRLF_AS_BYTE
}
}
// Ignore all empty lines at the end of the message body
for i := len(body) - 1; i >= 0; i-- {
if body[i] != '\r' && body[i] != '\n' && body[i] != ' ' {
body = body[:i+1]
break
}
}
return append(body, CRLF_AS_BYTE...)
}
func (d *DKIM) canonicalBodyHash(msg *mail.Message) []byte {
b := d.canonicalBody(msg)
digest := d.conf.Hash().New()
digest.Write([]byte(b))
return digest.Sum(nil)
}
func (d *DKIM) signableHeaderBlock(msg *mail.Message) string {
signableHeaderList := make(mail.Header)
signableHeaderKeys := make([]string, 0)
for _, k := range d.signableHeaders {
if v := msg.Header[k]; len(v) != 0 {
signableHeaderList[k] = v
signableHeaderKeys = append(signableHeaderKeys, k)
}
}
d.conf[BodyHashKey] = base64.StdEncoding.EncodeToString(d.canonicalBodyHash(msg))
d.conf[FieldsKey] = strings.Join(signableHeaderKeys, ":")
signableHeaderList[SignatureHeaderKey] = []string{d.conf.String()}
signableHeaderKeys = append(signableHeaderKeys, SignatureHeaderKey)
relax := d.conf.RelaxedHeader()
canonical := make([]string, 0, len(signableHeaderKeys))
for _, key := range signableHeaderKeys {
value := signableHeaderList[key][0]
if relax {
value = headerRelaxRx.ReplaceAllString(value, WSP)
key = strings.ToLower(key)
}
canonical = append(canonical, fmt.Sprintf("%s:%s", key, strings.TrimSpace(value)))
}
// According to RFC6376 http://tools.ietf.org/html/rfc6376#section-3.7
// the DKIM header must be inserted without a trailing <CRLF>.
// That's why we have to trim the space from the canonical header.
return strings.TrimSpace(strings.Join(canonical, CRLF) + CRLF)
}
func (d *DKIM) signature(msg *mail.Message) (string, error) {
block := d.signableHeaderBlock(msg)
hash := d.conf.Hash()
digest := hash.New()
digest.Write([]byte(block))
sig, err := rsa.SignPKCS1v15(rand.Reader, d.privateKey, hash, digest.Sum(nil))
if err != nil {
return Empty, err
}
return base64.StdEncoding.EncodeToString(sig), nil
}
func (d *DKIM) Sign(eml []byte) ([]byte, error) {
msg, err := readEML(eml)
if err != nil {
return eml, err
}
d.readBody(msg)
sig, err := d.signature(msg)
if err != nil {
return eml, err
}
d.conf[SignatureDataKey] = sig
// Append the signature header. Keep in mind these are raw values,
// so we add a <SP> character before the key-value list
/* msg.Header[SignatureHeaderKey] = []string{d.conf.String()} */
buf := new(bytes.Buffer)
for key, _ := range msg.Header {
buf.WriteString(key)
buf.WriteString(": ")
buf.WriteString(msg.Header.Get(key))
buf.WriteString(CRLF)
}
buf.WriteString(SignatureHeaderKey)
buf.WriteString(":")
buf.WriteString(d.conf.String())
buf.WriteString(DOUBLE_CRLF)
buf.Write(d.msgBody)
return buf.Bytes(), nil
}
func (d *DKIM) readBody(msg *mail.Message) {
body := new(bytes.Buffer)
body.ReadFrom(msg.Body)
d.msgBody = body.Bytes()
}

View File

@@ -1,11 +0,0 @@
package dkim
import (
"bytes"
"net/mail"
)
func readEML(eml []byte) (m *mail.Message, err error) {
r := bytes.NewReader(eml)
return mail.ReadMessage(r)
}

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,718 +0,0 @@
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package memcache provides a client for the memcached cache server.
package memcache
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Similar to:
// https://godoc.org/google.golang.org/appengine/memcache
var (
// ErrCacheMiss means that a Get failed because the item wasn't present.
ErrCacheMiss = errors.New("memcache: cache miss")
// ErrCASConflict means that a CompareAndSwap call failed due to the
// cached value being modified between the Get and the CompareAndSwap.
// If the cached value was simply evicted rather than replaced,
// ErrNotStored will be returned instead.
ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
// ErrNotStored means that a conditional write operation (i.e. Add or
// CompareAndSwap) failed because the condition was not satisfied.
ErrNotStored = errors.New("memcache: item not stored")
// ErrServer means that a server error occurred.
ErrServerError = errors.New("memcache: server error")
// ErrNoStats means that no statistics were available.
ErrNoStats = errors.New("memcache: no statistics available")
// ErrMalformedKey is returned when an invalid key is used.
// Keys must be at maximum 250 bytes long and not
// contain whitespace or control characters.
ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
// ErrNoServers is returned when no servers are configured or available.
ErrNoServers = errors.New("memcache: no servers configured or available")
)
const (
// DefaultTimeout is the default socket read/write timeout.
DefaultTimeout = 100 * time.Millisecond
// DefaultMaxIdleConns is the default maximum number of idle connections
// kept for any single address.
DefaultMaxIdleConns = 2
)
const buffered = 8 // arbitrary buffered channel size, for readability
// resumableError returns true if err is only a protocol-level cache error.
// This is used to determine whether or not a server connection should
// be re-used or not. If an error occurs, by default we don't reuse the
// connection, unless it was just a cache error.
func resumableError(err error) bool {
switch err {
case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
return true
}
return false
}
func legalKey(key string) bool {
if len(key) > 250 {
return false
}
for i := 0; i < len(key); i++ {
if key[i] <= ' ' || key[i] == 0x7f {
return false
}
}
return true
}
var (
crlf = []byte("\r\n")
space = []byte(" ")
resultOK = []byte("OK\r\n")
resultStored = []byte("STORED\r\n")
resultNotStored = []byte("NOT_STORED\r\n")
resultExists = []byte("EXISTS\r\n")
resultNotFound = []byte("NOT_FOUND\r\n")
resultDeleted = []byte("DELETED\r\n")
resultEnd = []byte("END\r\n")
resultOk = []byte("OK\r\n")
resultTouched = []byte("TOUCHED\r\n")
resultClientErrorPrefix = []byte("CLIENT_ERROR ")
versionPrefix = []byte("VERSION")
)
// New returns a memcache client using the provided server(s)
// with equal weight. If a server is listed multiple times,
// it gets a proportional amount of weight.
func New(server ...string) *Client {
ss := new(ServerList)
ss.SetServers(server...)
return NewFromSelector(ss)
}
// NewFromSelector returns a new Client using the provided ServerSelector.
func NewFromSelector(ss ServerSelector) *Client {
return &Client{selector: ss}
}
// Client is a memcache client.
// It is safe for unlocked use by multiple concurrent goroutines.
type Client struct {
// Timeout specifies the socket read/write timeout.
// If zero, DefaultTimeout is used.
Timeout time.Duration
// MaxIdleConns specifies the maximum number of idle connections that will
// be maintained per address. If less than one, DefaultMaxIdleConns will be
// used.
//
// Consider your expected traffic rates and latency carefully. This should
// be set to a number higher than your peak parallel requests.
MaxIdleConns int
selector ServerSelector
lk sync.Mutex
freeconn map[string][]*conn
}
// Item is an item to be got or stored in a memcached server.
type Item struct {
// Key is the Item's key (250 bytes maximum).
Key string
// Value is the Item's value.
Value []byte
// Flags are server-opaque flags whose semantics are entirely
// up to the app.
Flags uint32
// Expiration is the cache expiration time, in seconds: either a relative
// time from now (up to 1 month), or an absolute Unix epoch time.
// Zero means the Item has no expiration time.
Expiration int32
// Compare and swap ID.
casid uint64
}
// conn is a connection to a server.
type conn struct {
nc net.Conn
rw *bufio.ReadWriter
addr net.Addr
c *Client
}
// release returns this connection back to the client's free pool
func (cn *conn) release() {
cn.c.putFreeConn(cn.addr, cn)
}
func (cn *conn) extendDeadline() {
cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
}
// condRelease releases this connection if the error pointed to by err
// is nil (not an error) or is only a protocol level error (e.g. a
// cache miss). The purpose is to not recycle TCP connections that
// are bad.
func (cn *conn) condRelease(err *error) {
if *err == nil || resumableError(*err) {
cn.release()
} else {
cn.nc.Close()
}
}
func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
c.lk.Lock()
defer c.lk.Unlock()
if c.freeconn == nil {
c.freeconn = make(map[string][]*conn)
}
freelist := c.freeconn[addr.String()]
if len(freelist) >= c.maxIdleConns() {
cn.nc.Close()
return
}
c.freeconn[addr.String()] = append(freelist, cn)
}
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
c.lk.Lock()
defer c.lk.Unlock()
if c.freeconn == nil {
return nil, false
}
freelist, ok := c.freeconn[addr.String()]
if !ok || len(freelist) == 0 {
return nil, false
}
cn = freelist[len(freelist)-1]
c.freeconn[addr.String()] = freelist[:len(freelist)-1]
return cn, true
}
func (c *Client) netTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
return DefaultTimeout
}
func (c *Client) maxIdleConns() int {
if c.MaxIdleConns > 0 {
return c.MaxIdleConns
}
return DefaultMaxIdleConns
}
// ConnectTimeoutError is the error type used when it takes
// too long to connect to the desired host. This level of
// detail can generally be ignored.
type ConnectTimeoutError struct {
Addr net.Addr
}
func (cte *ConnectTimeoutError) Error() string {
return "memcache: connect timeout to " + cte.Addr.String()
}
func (c *Client) dial(addr net.Addr) (net.Conn, error) {
type connError struct {
cn net.Conn
err error
}
nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
if err == nil {
return nc, nil
}
if ne, ok := err.(net.Error); ok && ne.Timeout() {
return nil, &ConnectTimeoutError{addr}
}
return nil, err
}
func (c *Client) getConn(addr net.Addr) (*conn, error) {
cn, ok := c.getFreeConn(addr)
if ok {
cn.extendDeadline()
return cn, nil
}
nc, err := c.dial(addr)
if err != nil {
return nil, err
}
cn = &conn{
nc: nc,
addr: addr,
rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
c: c,
}
cn.extendDeadline()
return cn, nil
}
func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
addr, err := c.selector.PickServer(item.Key)
if err != nil {
return err
}
cn, err := c.getConn(addr)
if err != nil {
return err
}
defer cn.condRelease(&err)
if err = fn(c, cn.rw, item); err != nil {
return err
}
return nil
}
func (c *Client) FlushAll() error {
return c.selector.Each(c.flushAllFromAddr)
}
// Get gets the item for the given key. ErrCacheMiss is returned for a
// memcache cache miss. The key must be at most 250 bytes in length.
func (c *Client) Get(key string) (item *Item, err error) {
err = c.withKeyAddr(key, func(addr net.Addr) error {
return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
})
if err == nil && item == nil {
err = ErrCacheMiss
}
return
}
// Touch updates the expiry for the given key. The seconds parameter is either
// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
// into the future at which time the item will expire. Zero means the item has
// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
// The key must be at most 250 bytes in length.
func (c *Client) Touch(key string, seconds int32) (err error) {
return c.withKeyAddr(key, func(addr net.Addr) error {
return c.touchFromAddr(addr, []string{key}, seconds)
})
}
func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
if !legalKey(key) {
return ErrMalformedKey
}
addr, err := c.selector.PickServer(key)
if err != nil {
return err
}
return fn(addr)
}
func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
cn, err := c.getConn(addr)
if err != nil {
return err
}
defer cn.condRelease(&err)
return fn(cn.rw)
}
func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
return c.withKeyAddr(key, func(addr net.Addr) error {
return c.withAddrRw(addr, fn)
})
}
func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
if err := parseGetResponse(rw.Reader, cb); err != nil {
return err
}
return nil
})
}
// flushAllFromAddr send the flush_all command to the given addr
func (c *Client) flushAllFromAddr(addr net.Addr) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultOk):
break
default:
return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
}
return nil
})
}
// ping sends the version command to the given addr
func (c *Client) ping(addr net.Addr) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.HasPrefix(line, versionPrefix):
break
default:
return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line))
}
return nil
})
}
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
for _, key := range keys {
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultTouched):
break
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
default:
return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
}
}
return nil
})
}
// GetMulti is a batch version of Get. The returned map from keys to
// items may have fewer elements than the input slice, due to memcache
// cache misses. Each key must be at most 250 bytes in length.
// If no error is returned, the returned map will also be non-nil.
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
var lk sync.Mutex
m := make(map[string]*Item)
addItemToMap := func(it *Item) {
lk.Lock()
defer lk.Unlock()
m[it.Key] = it
}
keyMap := make(map[net.Addr][]string)
for _, key := range keys {
if !legalKey(key) {
return nil, ErrMalformedKey
}
addr, err := c.selector.PickServer(key)
if err != nil {
return nil, err
}
keyMap[addr] = append(keyMap[addr], key)
}
ch := make(chan error, buffered)
for addr, keys := range keyMap {
go func(addr net.Addr, keys []string) {
ch <- c.getFromAddr(addr, keys, addItemToMap)
}(addr, keys)
}
var err error
for _ = range keyMap {
if ge := <-ch; ge != nil {
err = ge
}
}
return m, err
}
// parseGetResponse reads a GET response from r and calls cb for each
// read and allocated Item
func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
for {
line, err := r.ReadSlice('\n')
if err != nil {
return err
}
if bytes.Equal(line, resultEnd) {
return nil
}
it := new(Item)
size, err := scanGetResponseLine(line, it)
if err != nil {
return err
}
it.Value = make([]byte, size+2)
_, err = io.ReadFull(r, it.Value)
if err != nil {
it.Value = nil
return err
}
if !bytes.HasSuffix(it.Value, crlf) {
it.Value = nil
return fmt.Errorf("memcache: corrupt get result read")
}
it.Value = it.Value[:size]
cb(it)
}
}
// scanGetResponseLine populates it and returns the declared size of the item.
// It does not read the bytes of the item.
func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
pattern := "VALUE %s %d %d %d\r\n"
dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
if bytes.Count(line, space) == 3 {
pattern = "VALUE %s %d %d\r\n"
dest = dest[:3]
}
n, err := fmt.Sscanf(string(line), pattern, dest...)
if err != nil || n != len(dest) {
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
}
return size, nil
}
// Set writes the given item, unconditionally.
func (c *Client) Set(item *Item) error {
return c.onItem(item, (*Client).set)
}
func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "set", item)
}
// Add writes the given item, if no value already exists for its
// key. ErrNotStored is returned if that condition is not met.
func (c *Client) Add(item *Item) error {
return c.onItem(item, (*Client).add)
}
func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "add", item)
}
// Replace writes the given item, but only if the server *does*
// already hold data for this key
func (c *Client) Replace(item *Item) error {
return c.onItem(item, (*Client).replace)
}
func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "replace", item)
}
// CompareAndSwap writes the given item that was previously returned
// by Get, if the value was neither modified or evicted between the
// Get and the CompareAndSwap calls. The item's Key should not change
// between calls but all other item fields may differ. ErrCASConflict
// is returned if the value was modified in between the
// calls. ErrNotStored is returned if the value was evicted in between
// the calls.
func (c *Client) CompareAndSwap(item *Item) error {
return c.onItem(item, (*Client).cas)
}
func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "cas", item)
}
func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
if !legalKey(item.Key) {
return ErrMalformedKey
}
var err error
if verb == "cas" {
_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
} else {
_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
verb, item.Key, item.Flags, item.Expiration, len(item.Value))
}
if err != nil {
return err
}
if _, err = rw.Write(item.Value); err != nil {
return err
}
if _, err := rw.Write(crlf); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultStored):
return nil
case bytes.Equal(line, resultNotStored):
return ErrNotStored
case bytes.Equal(line, resultExists):
return ErrCASConflict
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
}
return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
}
func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
_, err := fmt.Fprintf(rw, format, args...)
if err != nil {
return nil, err
}
if err := rw.Flush(); err != nil {
return nil, err
}
line, err := rw.ReadSlice('\n')
return line, err
}
func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
line, err := writeReadLine(rw, format, args...)
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultOK):
return nil
case bytes.Equal(line, expect):
return nil
case bytes.Equal(line, resultNotStored):
return ErrNotStored
case bytes.Equal(line, resultExists):
return ErrCASConflict
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
}
return fmt.Errorf("memcache: unexpected response line: %q", string(line))
}
// Delete deletes the item with the provided key. The error ErrCacheMiss is
// returned if the item didn't already exist in the cache.
func (c *Client) Delete(key string) error {
return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
})
}
// DeleteAll deletes all items in the cache.
func (c *Client) DeleteAll() error {
return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
return writeExpectf(rw, resultDeleted, "flush_all\r\n")
})
}
// Ping checks all instances if they are alive. Returns error if any
// of them is down.
func (c *Client) Ping() error {
return c.selector.Each(c.ping)
}
// Increment atomically increments key by delta. The return value is
// the new value after being incremented or an error. If the value
// didn't exist in memcached the error is ErrCacheMiss. The value in
// memcached must be an decimal number, or an error will be returned.
// On 64-bit overflow, the new value wraps around.
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
return c.incrDecr("incr", key, delta)
}
// Decrement atomically decrements key by delta. The return value is
// the new value after being decremented or an error. If the value
// didn't exist in memcached the error is ErrCacheMiss. The value in
// memcached must be an decimal number, or an error will be returned.
// On underflow, the new value is capped at zero and does not wrap
// around.
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
return c.incrDecr("decr", key, delta)
}
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
var val uint64
err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
case bytes.HasPrefix(line, resultClientErrorPrefix):
errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
return errors.New("memcache: client error: " + string(errMsg))
}
val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
if err != nil {
return err
}
return nil
})
return val, err
}

View File

@@ -1,129 +0,0 @@
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memcache
import (
"hash/crc32"
"net"
"strings"
"sync"
)
// ServerSelector is the interface that selects a memcache server
// as a function of the item's key.
//
// All ServerSelector implementations must be safe for concurrent use
// by multiple goroutines.
type ServerSelector interface {
// PickServer returns the server address that a given item
// should be shared onto.
PickServer(key string) (net.Addr, error)
Each(func(net.Addr) error) error
}
// ServerList is a simple ServerSelector. Its zero value is usable.
type ServerList struct {
mu sync.RWMutex
addrs []net.Addr
}
// staticAddr caches the Network() and String() values from any net.Addr.
type staticAddr struct {
ntw, str string
}
func newStaticAddr(a net.Addr) net.Addr {
return &staticAddr{
ntw: a.Network(),
str: a.String(),
}
}
func (s *staticAddr) Network() string { return s.ntw }
func (s *staticAddr) String() string { return s.str }
// SetServers changes a ServerList's set of servers at runtime and is
// safe for concurrent use by multiple goroutines.
//
// Each server is given equal weight. A server is given more weight
// if it's listed multiple times.
//
// SetServers returns an error if any of the server names fail to
// resolve. No attempt is made to connect to the server. If any error
// is returned, no changes are made to the ServerList.
func (ss *ServerList) SetServers(servers ...string) error {
naddr := make([]net.Addr, len(servers))
for i, server := range servers {
if strings.Contains(server, "/") {
addr, err := net.ResolveUnixAddr("unix", server)
if err != nil {
return err
}
naddr[i] = newStaticAddr(addr)
} else {
tcpaddr, err := net.ResolveTCPAddr("tcp", server)
if err != nil {
return err
}
naddr[i] = newStaticAddr(tcpaddr)
}
}
ss.mu.Lock()
defer ss.mu.Unlock()
ss.addrs = naddr
return nil
}
// Each iterates over each server calling the given function
func (ss *ServerList) Each(f func(net.Addr) error) error {
ss.mu.RLock()
defer ss.mu.RUnlock()
for _, a := range ss.addrs {
if err := f(a); nil != err {
return err
}
}
return nil
}
// keyBufPool returns []byte buffers for use by PickServer's call to
// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
// copies, which at least are bounded in size and small)
var keyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 256)
return &b
},
}
func (ss *ServerList) PickServer(key string) (net.Addr, error) {
ss.mu.RLock()
defer ss.mu.RUnlock()
if len(ss.addrs) == 0 {
return nil, ErrNoServers
}
if len(ss.addrs) == 1 {
return ss.addrs[0], nil
}
bufp := keyBufPool.Get().(*[]byte)
n := copy(*bufp, key)
cs := crc32.ChecksumIEEE((*bufp)[:n])
keyBufPool.Put(bufp)
return ss.addrs[cs%uint32(len(ss.addrs))], nil
}

View File

@@ -1,6 +0,0 @@
testdata/conf_out.ini
ini.sublime-project
ini.sublime-workspace
testdata/conf_reflect.ini
.idea
/.vscode

191
vendor/github.com/go-ini/ini/LICENSE generated vendored
View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,15 +0,0 @@
.PHONY: build test bench vet coverage
build: vet bench
test:
go test -v -cover -race
bench:
go test -v -cover -test.bench=. -test.benchmem
vet:
go vet
coverage:
go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out

View File

@@ -1,43 +0,0 @@
# INI
[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
Package ini provides INI file read and write functionality in Go.
## Features
- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
- Read with multiple-line values.
- Read with tons of helper methods.
- Read and convert values to Go types.
- Read and **WRITE** comments of sections and keys.
- Manipulate sections, keys and comments with ease.
- Keep sections and keys in order as you parse and save.
## Installation
The minimum requirement of Go is **1.6**.
```sh
$ go get gopkg.in/ini.v1
```
Please add `-u` flag to update in the future.
## Getting Help
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
- 中国大陆镜像https://ini.unknwon.cn
## License
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

View File

@@ -1,9 +0,0 @@
coverage:
range: "60...95"
status:
project:
default:
threshold: 1%
comment:
layout: 'diff, files'

View File

@@ -1,76 +0,0 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
)
var (
_ dataSource = (*sourceFile)(nil)
_ dataSource = (*sourceData)(nil)
_ dataSource = (*sourceReadCloser)(nil)
)
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
case io.Reader:
return &sourceReadCloser{ioutil.NopCloser(s)}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
}
}

View File

@@ -1,25 +0,0 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
const (
// Deprecated: Use "DefaultSection" instead.
DEFAULT_SECTION = DefaultSection
)
var (
// Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore = SnackCase
)

View File

@@ -1,34 +0,0 @@
// Copyright 2016 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
)
// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
type ErrDelimiterNotFound struct {
Line string
}
// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
func IsErrDelimiterNotFound(err error) bool {
_, ok := err.(ErrDelimiterNotFound)
return ok
}
func (err ErrDelimiterNotFound) Error() string {
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
}

509
vendor/github.com/go-ini/ini/file.go generated vendored
View File

@@ -1,509 +0,0 @@
// Copyright 2017 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"sync"
)
// File represents a combination of one or more INI files in memory.
type File struct {
options LoadOptions
dataSources []dataSource
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
lock sync.RWMutex
// To keep data in order.
sectionList []string
// To keep track of the index of a section with same name.
// This meta list is only used with non-unique section names are allowed.
sectionIndexes []int
// Actual data is stored here.
sections map[string][]*Section
NameMapper
ValueMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
if len(opts.KeyValueDelimiters) == 0 {
opts.KeyValueDelimiters = "=:"
}
if len(opts.KeyValueDelimiterOnWrite) == 0 {
opts.KeyValueDelimiterOnWrite = "="
}
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string][]*Section),
options: opts,
}
}
// Empty returns an empty file object.
func Empty(opts ...LoadOptions) *File {
var opt LoadOptions
if len(opts) > 0 {
opt = opts[0]
}
// Ignore error here, we are sure our data is good.
f, _ := LoadSources(opt, []byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("empty section name")
}
if f.options.Insensitive && name != DefaultSection {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
return f.sections[name][0], nil
}
f.sectionList = append(f.sectionList, name)
// NOTE: Append to indexes must happen before appending to sections,
// otherwise index will have off-by-one problem.
f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
sec := newSection(f, name)
f.sections[name] = append(f.sections[name], sec)
return sec, nil
}
// NewRawSection creates a new section with an unparseable body.
func (f *File) NewRawSection(name, body string) (*Section, error) {
section, err := f.NewSection(name)
if err != nil {
return nil, err
}
section.isRawSection = true
section.rawBody = body
return section, nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
secs, err := f.SectionsByName(name)
if err != nil {
return nil, err
}
return secs[0], err
}
// SectionsByName returns all sections with given name.
func (f *File) SectionsByName(name string) ([]*Section, error) {
if len(name) == 0 {
name = DefaultSection
}
if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
secs := f.sections[name]
if len(secs) == 0 {
return nil, fmt.Errorf("section %q does not exist", name)
}
return secs, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// SectionWithIndex assumes named section exists and returns a new section when not.
func (f *File) SectionWithIndex(name string, index int) *Section {
secs, err := f.SectionsByName(name)
if err != nil || len(secs) <= index {
// NOTE: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
newSec, _ := f.NewSection(name)
return newSec
}
return secs[index]
}
// Sections returns a list of Section stored in the current instance.
func (f *File) Sections() []*Section {
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sections := make([]*Section, len(f.sectionList))
for i, name := range f.sectionList {
sections[i] = f.sections[name][f.sectionIndexes[i]]
}
return sections
}
// ChildSections returns a list of child sections of given section name.
func (f *File) ChildSections(name string) []*Section {
return f.Section(name).ChildSections()
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section or all sections with given name.
func (f *File) DeleteSection(name string) {
secs, err := f.SectionsByName(name)
if err != nil {
return
}
for i := 0; i < len(secs); i++ {
// For non-unique sections, it is always needed to remove the first one so
// in the next iteration, the subsequent section continue having index 0.
// Ignoring the error as index 0 never returns an error.
_ = f.DeleteSectionWithIndex(name, 0)
}
}
// DeleteSectionWithIndex deletes a section with given name and index.
func (f *File) DeleteSectionWithIndex(name string, index int) error {
if !f.options.AllowNonUniqueSections && index != 0 {
return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
}
if len(name) == 0 {
name = DefaultSection
}
if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
// Count occurrences of the sections
occurrences := 0
sectionListCopy := make([]string, len(f.sectionList))
copy(sectionListCopy, f.sectionList)
for i, s := range sectionListCopy {
if s != name {
continue
}
if occurrences == index {
if len(f.sections[name]) <= 1 {
delete(f.sections, name) // The last one in the map
} else {
f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
}
// Fix section lists
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
} else if occurrences > index {
// Fix the indices of all following sections with this name.
f.sectionIndexes[i-1]--
}
occurrences++
}
return nil
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
_ = f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
if PrettyFormat || PrettyEqual {
equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
if len(sec.Comment) > 0 {
// Support multiline comments
lines := strings.Split(sec.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + lines[i]
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if i > 0 || DefaultHeader {
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return nil, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
if sec.isRawSection {
if _, err := buf.WriteString(sec.rawBody); err != nil {
return nil, err
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
continue
}
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modified if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
}
if keyLength > alignLength {
alignLength = keyLength
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KeyList:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DefaultSection {
buf.WriteString(indent)
}
// Support multiline comments
lines := strings.Split(key.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + strings.TrimSpace(lines[i])
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if len(indent) > 0 && sname != DefaultSection {
buf.WriteString(indent)
}
switch {
case key.isAutoIncrement:
kname = "-"
case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
for _, val := range key.ValueWithShadows() {
if _, err := buf.WriteString(kname); err != nil {
return nil, err
}
if key.isBooleanType {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KeyList
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
return nil, err
}
}
for _, val := range key.nestedValues {
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
return nil, err
}
}
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
}
return buf, nil
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
buf, err := f.writeToBuffer(indent)
if err != nil {
return 0, err
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
buf, err := f.writeToBuffer(indent)
if err != nil {
return err
}
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

View File

@@ -1,24 +0,0 @@
// Copyright 2019 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}

168
vendor/github.com/go-ini/ini/ini.go generated vendored
View File

@@ -1,168 +0,0 @@
// +build go1.6
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"os"
"regexp"
"runtime"
"strings"
)
const (
// DefaultSection is the name of default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DefaultSection = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
depthValues = 99
)
var (
// LineBreak is the delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows at package init time.
LineBreak = "\n"
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
// DefaultHeader explicitly writes default section header.
DefaultHeader = false
// PrettySection indicates whether to put a line between sections.
PrettySection = true
// PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
PrettyEqual = false
// DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatLeft = ""
// DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
DefaultFormatRight = ""
)
var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
func init() {
if runtime.GOOS == "windows" && !inTest {
LineBreak = "\r\n"
}
}
// LoadOptions contains all customized options used for load data source(s).
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
SkipUnrecognizableLines bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
// AllowNestedValues indicates whether to allow AWS-like nested values.
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
AllowNestedValues bool
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
// than the first line of the value.
AllowPythonMultilineValues bool
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
// Docs: https://docs.python.org/2/library/configparser.html
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
SpaceBeforeInlineComment bool
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
UnescapeValueDoubleQuotes bool
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
// when value is NOT surrounded by any quotes.
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
UnescapeValueCommentSymbols bool
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
KeyValueDelimiters string
// KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=".
KeyValueDelimiterOnWrite string
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
PreserveSurroundedQuote bool
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
DebugFunc DebugFunc
// ReaderBufferSize is the buffer size of the reader in bytes.
ReaderBufferSize int
// AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
AllowNonUniqueSections bool
}
// DebugFunc is the type of function called to log parse events.
type DebugFunc func(message string)
// LoadSources allows caller to apply customized options for loading from data source(s).
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, opts)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{}, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Loose: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it forces all section and key names to be lowercased.
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// ShadowLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
}

829
vendor/github.com/go-ini/ini/key.go generated vendored
View File

@@ -1,829 +0,0 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// Key represents a key under a section.
type Key struct {
s *Section
Comment string
name string
value string
isAutoIncrement bool
isBooleanType bool
isShadow bool
shadows []*Key
nestedValues []string
}
// newKey simply return a key object with given values.
func newKey(s *Section, name, val string) *Key {
return &Key{
s: s,
name: name,
value: val,
}
}
func (k *Key) addShadow(val string) error {
if k.isShadow {
return errors.New("cannot add shadow to another shadow key")
} else if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add shadow to auto-increment or boolean key")
}
// Deduplicate shadows based on their values.
if k.value == val {
return nil
}
for i := range k.shadows {
if k.shadows[i].value == val {
return nil
}
}
shadow := newKey(k.s, k.name, val)
shadow.isShadow = true
k.shadows = append(k.shadows, shadow)
return nil
}
// AddShadow adds a new shadow key to itself.
func (k *Key) AddShadow(val string) error {
if !k.s.f.options.AllowShadows {
return errors.New("shadow key is not allowed")
}
return k.addShadow(val)
}
func (k *Key) addNestedValue(val string) error {
if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add nested value to auto-increment or boolean key")
}
k.nestedValues = append(k.nestedValues, val)
return nil
}
// AddNestedValue adds a nested value to the key.
func (k *Key) AddNestedValue(val string) error {
if !k.s.f.options.AllowNestedValues {
return errors.New("nested value is not allowed")
}
return k.addNestedValue(val)
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
// Name returns name of key.
func (k *Key) Name() string {
return k.name
}
// Value returns raw value of key for performance purpose.
func (k *Key) Value() string {
return k.value
}
// ValueWithShadows returns raw values of key and its shadows if any.
func (k *Key) ValueWithShadows() []string {
if len(k.shadows) == 0 {
return []string{k.value}
}
vals := make([]string, len(k.shadows)+1)
vals[0] = k.value
for i := range k.shadows {
vals[i+1] = k.shadows[i].value
}
return vals
}
// NestedValues returns nested values stored in the key.
// It is possible returned value is nil if no nested values stored in the key.
func (k *Key) NestedValues() []string {
return k.nestedValues
}
// transformValue takes a raw value and transforms to its final string.
func (k *Key) transformValue(val string) string {
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
// Fail-fast if no indicate char found for recursive value
if !strings.Contains(val, "%") {
return val
}
for i := 0; i < depthValues; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
break
}
// Take off leading '%(' and trailing ')s'.
noption := vr[2 : len(vr)-2]
// Search in the same section.
// If not found or found the key itself, then search again in default section.
nk, err := k.s.GetKey(noption)
if err != nil || k == nk {
nk, _ = k.s.f.Section("").GetKey(noption)
if nk == nil {
// Stop when no results found in the default section,
// and returns the value as-is.
break
}
}
// Substitute by new value and take off leading '%(' and trailing ')s'.
val = strings.Replace(val, vr, nk.value, -1)
}
return val
}
// String returns string representation of value.
func (k *Key) String() string {
return k.transformValue(k.value)
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
return fn(k.String())
}
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
}
// Bool returns bool type value.
func (k *Key) Bool() (bool, error) {
return parseBool(k.String())
}
// Float64 returns float64 type value.
func (k *Key) Float64() (float64, error) {
return strconv.ParseFloat(k.String(), 64)
}
// Int returns int type value.
func (k *Key) Int() (int, error) {
v, err := strconv.ParseInt(k.String(), 0, 64)
return int(v), err
}
// Int64 returns int64 type value.
func (k *Key) Int64() (int64, error) {
return strconv.ParseInt(k.String(), 0, 64)
}
// Uint returns uint type valued.
func (k *Key) Uint() (uint, error) {
u, e := strconv.ParseUint(k.String(), 0, 64)
return uint(u), e
}
// Uint64 returns uint64 type value.
func (k *Key) Uint64() (uint64, error) {
return strconv.ParseUint(k.String(), 0, 64)
}
// Duration returns time.Duration type value.
func (k *Key) Duration() (time.Duration, error) {
return time.ParseDuration(k.String())
}
// TimeFormat parses with given format and returns time.Time type value.
func (k *Key) TimeFormat(format string) (time.Time, error) {
return time.Parse(format, k.String())
}
// Time parses with RFC3339 format and returns time.Time type value.
func (k *Key) Time() (time.Time, error) {
return k.TimeFormat(time.RFC3339)
}
// MustString returns default value if key value is empty.
func (k *Key) MustString(defaultVal string) string {
val := k.String()
if len(val) == 0 {
k.value = defaultVal
return defaultVal
}
return val
}
// MustBool always returns value without error,
// it returns false if error occurs.
func (k *Key) MustBool(defaultVal ...bool) bool {
val, err := k.Bool()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatBool(defaultVal[0])
return defaultVal[0]
}
return val
}
// MustFloat64 always returns value without error,
// it returns 0.0 if error occurs.
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
val, err := k.Float64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
return defaultVal[0]
}
return val
}
// MustInt always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt(defaultVal ...int) int {
val, err := k.Int()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustInt64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt64(defaultVal ...int64) int64 {
val, err := k.Int64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustUint always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint(defaultVal ...uint) uint {
val, err := k.Uint()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustUint64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
val, err := k.Uint64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustDuration always returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
val, err := k.Duration()
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].String()
return defaultVal[0]
}
return val
}
// MustTimeFormat always parses with given format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
val, err := k.TimeFormat(format)
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].Format(format)
return defaultVal[0]
}
return val
}
// MustTime always parses with RFC3339 format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
return k.MustTimeFormat(time.RFC3339, defaultVal...)
}
// In always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) In(defaultVal string, candidates []string) string {
val := k.String()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InFloat64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
val := k.MustFloat64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt(defaultVal int, candidates []int) int {
val := k.MustInt()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
val := k.MustInt64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
val := k.MustUint()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
val := k.MustUint64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTimeFormat always parses with given format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
val := k.MustTimeFormat(format)
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTime always parses with RFC3339 format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
}
// RangeFloat64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
val := k.MustFloat64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt(defaultVal, min, max int) int {
val := k.MustInt()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
val := k.MustInt64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeTimeFormat checks if value with given format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
val := k.MustTimeFormat(format)
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
return defaultVal
}
return val
}
// RangeTime checks if value with RFC3339 format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
return []string{}
}
runes := []rune(str)
vals := make([]string, 0, 2)
var buf bytes.Buffer
escape := false
idx := 0
for {
if escape {
escape = false
if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
buf.WriteRune('\\')
}
buf.WriteRune(runes[idx])
} else {
if runes[idx] == '\\' {
escape = true
} else if strings.HasPrefix(string(runes[idx:]), delim) {
idx += len(delim) - 1
vals = append(vals, strings.TrimSpace(buf.String()))
buf.Reset()
} else {
buf.WriteRune(runes[idx])
}
}
idx++
if idx == len(runes) {
break
}
}
if buf.Len() > 0 {
vals = append(vals, strings.TrimSpace(buf.String()))
}
return vals
}
// StringsWithShadows returns list of string divided by given delimiter.
// Shadows will also be appended if any.
func (k *Key) StringsWithShadows(delim string) []string {
vals := k.ValueWithShadows()
results := make([]string, 0, len(vals)*2)
for i := range vals {
if len(vals) == 0 {
continue
}
results = append(results, strings.Split(vals[i], delim)...)
}
for i := range results {
results[i] = k.transformValue(strings.TrimSpace(results[i]))
}
return results
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.parseInts(k.Strings(delim), true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.parseUints(k.Strings(delim), true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
return vals
}
// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Bools(delim string) []bool {
vals, _ := k.parseBools(k.Strings(delim), true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
return vals
}
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.parseInts(k.Strings(delim), false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.parseUints(k.Strings(delim), false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
return vals
}
// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidBools(delim string) []bool {
vals, _ := k.parseBools(k.Strings(delim), false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
return vals
}
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimes(delim string) []time.Time {
return k.ValidTimesFormat(time.RFC3339, delim)
}
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.parseFloat64s(k.Strings(delim), false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.parseInts(k.Strings(delim), false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.parseInt64s(k.Strings(delim), false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.parseUints(k.Strings(delim), false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.parseUint64s(k.Strings(delim), false, true)
}
// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
func (k *Key) StrictBools(delim string) ([]bool, error) {
return k.parseBools(k.Strings(delim), false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.parseTimesFormat(format, k.Strings(delim), false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// parseBools transforms strings to bools.
func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
vals := make([]bool, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := parseBool(str)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, val.(bool))
}
}
return vals, err
}
// parseFloat64s transforms strings to float64s.
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := strconv.ParseFloat(str, 64)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, val.(float64))
}
}
return vals, err
}
// parseInts transforms strings to ints.
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := strconv.ParseInt(str, 0, 64)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, int(val.(int64)))
}
}
return vals, err
}
// parseInt64s transforms strings to int64s.
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := strconv.ParseInt(str, 0, 64)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, val.(int64))
}
}
return vals, err
}
// parseUints transforms strings to uints.
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := strconv.ParseUint(str, 0, 64)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, uint(val.(uint64)))
}
}
return vals, err
}
// parseUint64s transforms strings to uint64s.
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := strconv.ParseUint(str, 0, 64)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, val.(uint64))
}
}
return vals, err
}
type Parser func(str string) (interface{}, error)
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
parser := func(str string) (interface{}, error) {
val, err := time.Parse(format, str)
return val, err
}
rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
if err == nil {
for _, val := range rawVals {
vals = append(vals, val.(time.Time))
}
}
return vals, err
}
// doParse transforms strings to different types
func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
vals := make([]interface{}, 0, len(strs))
for _, str := range strs {
val, err := parser(str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}

View File

@@ -1,535 +0,0 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"strings"
"unicode"
)
const minReaderBufferSize = 4096
var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
type parserOptions struct {
IgnoreContinuation bool
IgnoreInlineComment bool
AllowPythonMultilineValues bool
SpaceBeforeInlineComment bool
UnescapeValueDoubleQuotes bool
UnescapeValueCommentSymbols bool
PreserveSurroundedQuote bool
DebugFunc DebugFunc
ReaderBufferSize int
}
type parser struct {
buf *bufio.Reader
options parserOptions
isEOF bool
count int
comment *bytes.Buffer
}
func (p *parser) debug(format string, args ...interface{}) {
if p.options.DebugFunc != nil {
p.options.DebugFunc(fmt.Sprintf(format, args...))
}
}
func newParser(r io.Reader, opts parserOptions) *parser {
size := opts.ReaderBufferSize
if size < minReaderBufferSize {
size = minReaderBufferSize
}
return &parser{
buf: bufio.NewReaderSize(r, size),
options: opts,
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(2)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 2 {
return nil
}
switch {
case mask[0] == 254 && mask[1] == 255:
fallthrough
case mask[0] == 255 && mask[1] == 254:
_, err = p.buf.Read(mask)
if err != nil {
return err
}
case mask[0] == 239 && mask[1] == 187:
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
}
if mask[2] == 191 {
_, err = p.buf.Read(mask)
if err != nil {
return err
}
}
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(delimiters string, in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
var endIdx int
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], delimiters)
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, delimiters)
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
} else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
valQuote = `"`
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
}
return line[startIdx : pos+startIdx], nil
}
lastChar := line[len(line)-1]
// Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
trimmedLastChar := line[len(line)-1]
// Check continuation lines when desired
if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
// Check if ignore inline comment
if !p.options.IgnoreInlineComment {
var i int
if p.options.SpaceBeforeInlineComment {
i = strings.Index(line, " #")
if i == -1 {
i = strings.Index(line, " ;")
}
} else {
i = strings.IndexAny(line, "#;")
}
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
}
// Trim single and double quotes
if (hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
line = line[1 : len(line)-1]
} else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
if strings.Contains(line, `\;`) {
line = strings.Replace(line, `\;`, ";", -1)
}
if strings.Contains(line, `\#`) {
line = strings.Replace(line, `\#`, "#", -1)
}
} else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
return p.readPythonMultilines(line, bufferSize)
}
return line, nil
}
func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
indentSize := 0
for {
peekData, peekErr := peekBuffer.ReadBytes('\n')
if peekErr != nil {
if peekErr == io.EOF {
p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line)
return line, nil
}
p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
return "", peekErr
}
p.debug("readPythonMultilines: parsing %q", string(peekData))
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
for n, v := range peekMatches {
p.debug(" %d: %q", n, v)
}
// Return if not a Python multiline value.
if len(peekMatches) != 3 {
p.debug("readPythonMultilines: end of value, got: %q", line)
return line, nil
}
// Determine indent size and line prefix.
currentIndentSize := len(peekMatches[1])
if indentSize < 1 {
indentSize = currentIndentSize
p.debug("readPythonMultilines: indent size is %d", indentSize)
}
// Make sure each line is indented at least as far as first line.
if currentIndentSize < indentSize {
p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line)
return line, nil
}
// Advance the parser reader (buffer) in-sync with the peek buffer.
_, err := p.buf.Discard(len(peekData))
if err != nil {
p.debug("readPythonMultilines: failed to skip to the end, returning error")
return "", err
}
// Handle indented empty line.
line += "\n" + peekMatches[1][indentSize:] + peekMatches[2]
}
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader, parserOptions{
IgnoreContinuation: f.options.IgnoreContinuation,
IgnoreInlineComment: f.options.IgnoreInlineComment,
AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
DebugFunc: f.options.DebugFunc,
ReaderBufferSize: f.options.ReaderBufferSize,
})
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
name := DefaultSection
if f.options.Insensitive {
name = strings.ToLower(DefaultSection)
}
section, _ := f.NewSection(name)
// This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
var isLastValueEmpty bool
var lastRegularKey *Key
var line []byte
var inUnparseableSection bool
// NOTE: Iterate and increase `currentPeekSize` until
// the size of the parser buffer is found.
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
parserBufferSize := 0
// NOTE: Peek 4kb at a time.
currentPeekSize := minReaderBufferSize
if f.options.AllowPythonMultilineValues {
for {
peekBytes, _ := p.buf.Peek(currentPeekSize)
peekBytesLength := len(peekBytes)
if parserBufferSize >= peekBytesLength {
break
}
currentPeekSize *= 2
parserBufferSize = peekBytesLength
}
}
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
if f.options.AllowNestedValues &&
isLastValueEmpty && len(line) > 0 {
if line[0] == ' ' || line[0] == '\t' {
err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
if err != nil {
return err
}
continue
}
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
closeIdx := bytes.LastIndexByte(line, ']')
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
name := string(line[1:closeIdx])
section, err = f.NewSection(name)
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset auto-counter and comments
p.comment.Reset()
p.count = 1
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
(f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) {
inUnparseableSection = true
continue
}
}
continue
}
if inUnparseableSection {
section.isRawSection = true
section.rawBody += string(line)
continue
}
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) {
switch {
case f.options.AllowBooleanKeys:
kname, err := p.readValue(line, parserBufferSize)
if err != nil {
return err
}
key, err := section.NewBooleanKey(kname)
if err != nil {
return err
}
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
case f.options.SkipUnrecognizableLines:
continue
}
}
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
value, err := p.readValue(line[offset:], parserBufferSize)
if err != nil {
return err
}
isLastValueEmpty = len(value) == 0
key, err := section.NewKey(kname, value)
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
lastRegularKey = key
}
return nil
}

View File

@@ -1,256 +0,0 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"errors"
"fmt"
"strings"
)
// Section represents a config section.
type Section struct {
f *File
Comment string
name string
keys map[string]*Key
keyList []string
keysHash map[string]string
isRawSection bool
rawBody string
}
func newSection(f *File, name string) *Section {
return &Section{
f: f,
name: name,
keys: make(map[string]*Key),
keyList: make([]string, 0, 10),
keysHash: make(map[string]string),
}
}
// Name returns name of Section.
func (s *Section) Name() string {
return s.name
}
// Body returns rawBody of Section if the section was marked as unparseable.
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
func (s *Section) Body() string {
return strings.TrimSpace(s.rawBody)
}
// SetBody updates body content only if section is raw.
func (s *Section) SetBody(body string) {
if !s.isRawSection {
return
}
s.rawBody = body
}
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
} else if s.f.options.Insensitive {
name = strings.ToLower(name)
}
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
if inSlice(name, s.keyList) {
if s.f.options.AllowShadows {
if err := s.keys[name].addShadow(val); err != nil {
return nil, err
}
} else {
s.keys[name].value = val
s.keysHash[name] = val
}
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = newKey(s, name, val)
s.keysHash[name] = val
return s.keys[name], nil
}
// NewBooleanKey creates a new boolean type key to given section.
func (s *Section) NewBooleanKey(name string) (*Key, error) {
key, err := s.NewKey(name, "true")
if err != nil {
return nil, err
}
key.isBooleanType = true
return key, nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
if s.f.BlockMode {
s.f.lock.RLock()
}
if s.f.options.Insensitive {
name = strings.ToLower(name)
}
key := s.keys[name]
if s.f.BlockMode {
s.f.lock.RUnlock()
}
if key == nil {
// Check if it is a child-section.
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
return sec.GetKey(name)
}
break
}
return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
}
return key, nil
}
// HasKey returns true if section contains a key with given name.
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// Deprecated: Use "HasKey" instead.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
for _, k := range s.keys {
if value == k.value {
return true
}
}
return false
}
// Key assumes named Key exists in section and returns a zero-value when not.
func (s *Section) Key(name string) *Key {
key, err := s.GetKey(name)
if err != nil {
// It's OK here because the only possible error is empty key name,
// but if it's empty, this piece of code won't be executed.
key, _ = s.NewKey(name, "")
return key
}
return key
}
// Keys returns list of keys of section.
func (s *Section) Keys() []*Key {
keys := make([]*Key, len(s.keyList))
for i := range s.keyList {
keys[i] = s.Key(s.keyList[i])
}
return keys
}
// ParentKeys returns list of keys of parent section.
func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
parentKeys = append(parentKeys, sec.Keys()...)
} else {
break
}
}
return parentKeys
}
// KeyStrings returns list of key names of section.
func (s *Section) KeyStrings() []string {
list := make([]string, len(s.keyList))
copy(list, s.keyList)
return list
}
// KeysHash returns keys hash consisting of names and values.
func (s *Section) KeysHash() map[string]string {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
hash := map[string]string{}
for key, value := range s.keysHash {
hash[key] = value
}
return hash
}
// DeleteKey deletes a key from section.
func (s *Section) DeleteKey(name string) {
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
for i, k := range s.keyList {
if k == name {
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
delete(s.keys, name)
delete(s.keysHash, name)
return
}
}
}
// ChildSections returns a list of child sections of current section.
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
prefix := s.name + "."
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
children = append(children, s.f.sections[name]...)
}
}
return children
}

View File

@@ -1,724 +0,0 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"time"
"unicode"
)
// NameMapper represents a ini tag name mapper.
type NameMapper func(string) string
// Built-in name getters.
var (
// SnackCase converts to format SNACK_CASE.
SnackCase NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
}
newstr = append(newstr, unicode.ToUpper(chr))
}
return string(newstr)
}
// TitleUnderscore converts to format title_underscore.
TitleUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
chr -= 'A' - 'a'
}
newstr = append(newstr, chr)
}
return string(newstr)
}
)
func (s *Section) parseFieldName(raw, actual string) string {
if len(actual) > 0 {
return actual
}
if s.f.NameMapper != nil {
return s.f.NameMapper(raw)
}
return raw
}
func parseDelim(actual string) string {
if len(actual) > 0 {
return actual
}
return ","
}
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
var strs []string
if allowShadow {
strs = key.StringsWithShadows(delim)
} else {
strs = key.Strings(delim)
}
numVals := len(strs)
if numVals == 0 {
return nil
}
var vals interface{}
var err error
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
vals, err = key.parseInts(strs, true, false)
case reflect.Int64:
vals, err = key.parseInt64s(strs, true, false)
case reflect.Uint:
vals, err = key.parseUints(strs, true, false)
case reflect.Uint64:
vals, err = key.parseUint64s(strs, true, false)
case reflect.Float64:
vals, err = key.parseFloat64s(strs, true, false)
case reflect.Bool:
vals, err = key.parseBools(strs, true, false)
case reflectTime:
vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
if err != nil && isStrict {
return err
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflect.String:
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
case reflect.Int:
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
case reflect.Int64:
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
case reflect.Uint:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
case reflect.Uint64:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
case reflect.Float64:
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
case reflect.Bool:
slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
}
}
field.Set(slice)
return nil
}
func wrapStrictError(err error, isStrict bool) error {
if isStrict {
return err
}
return nil
}
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to struct.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
vt := t
isPtr := t.Kind() == reflect.Ptr
if isPtr {
vt = t.Elem()
}
switch vt.Kind() {
case reflect.String:
stringVal := key.String()
if isPtr {
field.Set(reflect.ValueOf(&stringVal))
} else if len(stringVal) > 0 {
field.SetString(key.String())
}
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&boolVal))
} else {
field.SetBool(boolVal)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
// ParseDuration will not return err for `0`, so check the type name
if vt.Name() == "Duration" {
durationVal, err := key.Duration()
if err != nil {
if intVal, err := key.Int64(); err == nil {
field.SetInt(intVal)
return nil
}
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&durationVal))
} else if int64(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
}
return nil
}
intVal, err := key.Int64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetInt(intVal)
field.Set(pv)
} else {
field.SetInt(intVal)
}
// byte is an alias for uint8, so supporting uint8 breaks support for byte
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && uint64(durationVal) > 0 {
if isPtr {
field.Set(reflect.ValueOf(&durationVal))
} else {
field.Set(reflect.ValueOf(durationVal))
}
return nil
}
uintVal, err := key.Uint64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetUint(uintVal)
field.Set(pv)
} else {
field.SetUint(uintVal)
}
case reflect.Float32, reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
pv := reflect.New(t.Elem())
pv.Elem().SetFloat(floatVal)
field.Set(pv)
} else {
field.SetFloat(floatVal)
}
case reflectTime:
timeVal, err := key.Time()
if err != nil {
return wrapStrictError(err, isStrict)
}
if isPtr {
field.Set(reflect.ValueOf(&timeVal))
} else {
field.Set(reflect.ValueOf(timeVal))
}
case reflect.Slice:
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
default:
return fmt.Errorf("unsupported type %q", t)
}
return nil
}
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) {
opts := strings.SplitN(tag, ",", 4)
rawName = opts[0]
if len(opts) > 1 {
omitEmpty = opts[1] == "omitempty"
}
if len(opts) > 2 {
allowShadow = opts[2] == "allowshadow"
}
if len(opts) > 3 {
allowNonUnique = opts[3] == "nonunique"
}
return rawName, omitEmpty, allowShadow, allowNonUnique
}
func (s *Section) mapToField(val reflect.Value, isStrict bool) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
isStruct := tpField.Type.Kind() == reflect.Struct
isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct || isStructPtr {
if sec, err := s.f.GetSection(fieldName); err == nil {
// Only set the field to non-nil struct value if we have a section for it.
// Otherwise, we end up with a non-nil struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
if err = sec.mapToField(field, isStrict); err != nil {
return fmt.Errorf("map to field %q: %v", fieldName, err)
}
continue
}
}
// Map non-unique sections
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
newField, err := s.mapToSlice(fieldName, field, isStrict)
if err != nil {
return fmt.Errorf("map to slice %q: %v", fieldName, err)
}
field.Set(newField)
continue
}
if key, err := s.GetKey(fieldName); err == nil {
delim := parseDelim(tpField.Tag.Get("delim"))
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
return fmt.Errorf("set field %q: %v", fieldName, err)
}
}
}
return nil
}
// mapToSlice maps all sections with the same name and returns the new value.
// The type of the Value must be a slice.
func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
secs, err := s.f.SectionsByName(secName)
if err != nil {
return reflect.Value{}, err
}
typ := val.Type().Elem()
for _, sec := range secs {
elem := reflect.New(typ)
if err = sec.mapToField(elem, isStrict); err != nil {
return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
}
val = reflect.Append(val, elem.Elem())
}
return val, nil
}
// mapTo maps a section to object v.
func (s *Section) mapTo(v interface{}, isStrict bool) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("not a pointer to a struct")
}
if typ.Kind() == reflect.Slice {
newField, err := s.mapToSlice(s.name, val, isStrict)
if err != nil {
return err
}
val.Set(newField)
return nil
}
return s.mapToField(val, isStrict)
}
// MapTo maps section to given struct.
func (s *Section) MapTo(v interface{}) error {
return s.mapTo(v, false)
}
// StrictMapTo maps section to given struct in strict mode,
// which returns all possible error including value parsing error.
func (s *Section) StrictMapTo(v interface{}) error {
return s.mapTo(v, true)
}
// MapTo maps file to given struct.
func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
// StrictMapTo maps file to given struct in strict mode,
// which returns all possible error including value parsing error.
func (f *File) StrictMapTo(v interface{}) error {
return f.Section("").StrictMapTo(v)
}
// MapToWithMapper maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.MapTo(v)
}
// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
// which returns all possible error including value parsing error.
func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.StrictMapTo(v)
}
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// StrictMapTo maps data sources to given struct in strict mode,
// which returns all possible error including value parsing error.
func StrictMapTo(v, source interface{}, others ...interface{}) error {
return StrictMapToWithMapper(v, nil, source, others...)
}
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
if allowShadow {
var keyWithShadows *Key
for i := 0; i < field.Len(); i++ {
var val string
switch sliceOf {
case reflect.String:
val = slice.Index(i).String()
case reflect.Int, reflect.Int64:
val = fmt.Sprint(slice.Index(i).Int())
case reflect.Uint, reflect.Uint64:
val = fmt.Sprint(slice.Index(i).Uint())
case reflect.Float64:
val = fmt.Sprint(slice.Index(i).Float())
case reflect.Bool:
val = fmt.Sprint(slice.Index(i).Bool())
case reflectTime:
val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
if i == 0 {
keyWithShadows = newKey(key.s, key.name, val)
} else {
_ = keyWithShadows.AddShadow(val)
}
}
key = keyWithShadows
return nil
}
var buf bytes.Buffer
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
buf.WriteString(slice.Index(i).String())
case reflect.Int, reflect.Int64:
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
case reflect.Uint, reflect.Uint64:
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
case reflect.Float64:
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
case reflect.Bool:
buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
case reflectTime:
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-len(delim)])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool:
key.SetValue(fmt.Sprint(field.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
key.SetValue(fmt.Sprint(field.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
key.SetValue(fmt.Sprint(field.Uint()))
case reflect.Float32, reflect.Float64:
key.SetValue(fmt.Sprint(field.Float()))
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
return reflectSliceWithProperType(key, field, delim, allowShadow)
case reflect.Ptr:
if !field.IsNil() {
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
}
default:
return fmt.Errorf("unsupported type %q", t)
}
return nil
}
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
// TODO: add more test coverage.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflectTime:
t, ok := v.Interface().(time.Time)
return ok && t.IsZero()
}
return false
}
// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
type StructReflector interface {
ReflectINIStruct(*File) error
}
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
if !val.Field(i).CanInterface() {
continue
}
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag)
if omitEmpty && isEmptyValue(field) {
continue
}
if r, ok := field.Interface().(StructReflector); ok {
return r.ReflectINIStruct(s.f)
}
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
// Add comment from comment tag
if len(sec.Comment) == 0 {
sec.Comment = tpField.Tag.Get("comment")
}
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
}
continue
}
if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
for i := 0; i < field.Len(); i++ {
if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
}
sec, err := s.f.NewSection(fieldName)
if err != nil {
return err
}
// Add comment from comment tag
if len(sec.Comment) == 0 {
sec.Comment = tpField.Tag.Get("comment")
}
if err := sec.reflectFrom(slice.Index(i)); err != nil {
return fmt.Errorf("reflect from field %q: %v", fieldName, err)
}
}
continue
}
// Note: Same reason as section.
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
// Add comment from comment tag
if len(key.Comment) == 0 {
key.Comment = tpField.Tag.Get("comment")
}
delim := parseDelim(tpField.Tag.Get("delim"))
if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
return fmt.Errorf("reflect field %q: %v", fieldName, err)
}
}
return nil
}
// ReflectFrom reflects section from given struct. It overwrites existing ones.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
(typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
// Clear sections to make sure none exists before adding the new ones
s.f.DeleteSection(s.name)
if typ.Kind() == reflect.Ptr {
sec, err := s.f.NewSection(s.name)
if err != nil {
return err
}
return sec.reflectFrom(val.Elem())
}
slice := val.Slice(0, val.Len())
sliceOf := val.Type().Elem().Kind()
if sliceOf != reflect.Ptr {
return fmt.Errorf("not a slice of pointers")
}
for i := 0; i < slice.Len(); i++ {
sec, err := s.f.NewSection(s.name)
if err != nil {
return err
}
err = sec.reflectFrom(slice.Index(i))
if err != nil {
return fmt.Errorf("reflect from %dth field: %v", i, err)
}
}
return nil
}
if typ.Kind() == reflect.Ptr {
val = val.Elem()
} else {
return errors.New("not a pointer to a struct")
}
return s.reflectFrom(val)
}
// ReflectFrom reflects file from given struct.
func (f *File) ReflectFrom(v interface{}) error {
return f.Section("").ReflectFrom(v)
}
// ReflectFromWithMapper reflects data sources from given struct with name mapper.
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
cfg.NameMapper = mapper
return cfg.ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct.
func ReflectFrom(cfg *File, v interface{}) error {
return ReflectFromWithMapper(cfg, v, nil)
}

View File

@@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@@ -1,26 +0,0 @@
language: go
go:
- 1.13.1
- tip
matrix:
allow_failures:
- go: tip
notifications:
email:
recipients: dean.karn@gmail.com
on_success: change
on_failure: always
before_install:
- go install github.com/mattn/goveralls
# Only clone the most recent commit.
git:
depth: 1
script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,172 +0,0 @@
## locales
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">![Project status](https://img.shields.io/badge/version-0.13.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales)
[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
Features
--------
- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
Full Tests
--------------------
I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
Installation
-----------
Use go get
```shell
go get github.com/go-playground/locales
```
NOTES
--------
You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
Usage
-------
```go
package main
import (
"fmt"
"time"
"github.com/go-playground/locales/currency"
"github.com/go-playground/locales/en_CA"
)
func main() {
loc, _ := time.LoadLocation("America/Toronto")
datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
l := en_CA.New()
// Dates
fmt.Println(l.FmtDateFull(datetime))
fmt.Println(l.FmtDateLong(datetime))
fmt.Println(l.FmtDateMedium(datetime))
fmt.Println(l.FmtDateShort(datetime))
// Times
fmt.Println(l.FmtTimeFull(datetime))
fmt.Println(l.FmtTimeLong(datetime))
fmt.Println(l.FmtTimeMedium(datetime))
fmt.Println(l.FmtTimeShort(datetime))
// Months Wide
fmt.Println(l.MonthWide(time.January))
fmt.Println(l.MonthWide(time.February))
fmt.Println(l.MonthWide(time.March))
// ...
// Months Abbreviated
fmt.Println(l.MonthAbbreviated(time.January))
fmt.Println(l.MonthAbbreviated(time.February))
fmt.Println(l.MonthAbbreviated(time.March))
// ...
// Months Narrow
fmt.Println(l.MonthNarrow(time.January))
fmt.Println(l.MonthNarrow(time.February))
fmt.Println(l.MonthNarrow(time.March))
// ...
// Weekdays Wide
fmt.Println(l.WeekdayWide(time.Sunday))
fmt.Println(l.WeekdayWide(time.Monday))
fmt.Println(l.WeekdayWide(time.Tuesday))
// ...
// Weekdays Abbreviated
fmt.Println(l.WeekdayAbbreviated(time.Sunday))
fmt.Println(l.WeekdayAbbreviated(time.Monday))
fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
// ...
// Weekdays Short
fmt.Println(l.WeekdayShort(time.Sunday))
fmt.Println(l.WeekdayShort(time.Monday))
fmt.Println(l.WeekdayShort(time.Tuesday))
// ...
// Weekdays Narrow
fmt.Println(l.WeekdayNarrow(time.Sunday))
fmt.Println(l.WeekdayNarrow(time.Monday))
fmt.Println(l.WeekdayNarrow(time.Tuesday))
// ...
var f64 float64
f64 = -10356.4523
// Number
fmt.Println(l.FmtNumber(f64, 2))
// Currency
fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
// Accounting
fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
f64 = 78.12
// Percent
fmt.Println(l.FmtPercent(f64, 0))
// Plural Rules for locale, so you know what rules you must cover
fmt.Println(l.PluralsCardinal())
fmt.Println(l.PluralsOrdinal())
// Cardinal Plural Rules
fmt.Println(l.CardinalPluralRule(1, 0))
fmt.Println(l.CardinalPluralRule(1.0, 0))
fmt.Println(l.CardinalPluralRule(1.0, 1))
fmt.Println(l.CardinalPluralRule(3, 0))
// Ordinal Plural Rules
fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
// Range Plural Rules
fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
}
```
NOTES:
-------
These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
these locales are regenerated the fix will come with.
I do however realize that time constraints are often important and so there are two options:
1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@@ -1,308 +0,0 @@
package currency
// Type is the currency type associated with the locales currency enum
type Type int
// locale currencies
const (
ADP Type = iota
AED
AFA
AFN
ALK
ALL
AMD
ANG
AOA
AOK
AON
AOR
ARA
ARL
ARM
ARP
ARS
ATS
AUD
AWG
AZM
AZN
BAD
BAM
BAN
BBD
BDT
BEC
BEF
BEL
BGL
BGM
BGN
BGO
BHD
BIF
BMD
BND
BOB
BOL
BOP
BOV
BRB
BRC
BRE
BRL
BRN
BRR
BRZ
BSD
BTN
BUK
BWP
BYB
BYN
BYR
BZD
CAD
CDF
CHE
CHF
CHW
CLE
CLF
CLP
CNH
CNX
CNY
COP
COU
CRC
CSD
CSK
CUC
CUP
CVE
CYP
CZK
DDM
DEM
DJF
DKK
DOP
DZD
ECS
ECV
EEK
EGP
ERN
ESA
ESB
ESP
ETB
EUR
FIM
FJD
FKP
FRF
GBP
GEK
GEL
GHC
GHS
GIP
GMD
GNF
GNS
GQE
GRD
GTQ
GWE
GWP
GYD
HKD
HNL
HRD
HRK
HTG
HUF
IDR
IEP
ILP
ILR
ILS
INR
IQD
IRR
ISJ
ISK
ITL
JMD
JOD
JPY
KES
KGS
KHR
KMF
KPW
KRH
KRO
KRW
KWD
KYD
KZT
LAK
LBP
LKR
LRD
LSL
LTL
LTT
LUC
LUF
LUL
LVL
LVR
LYD
MAD
MAF
MCF
MDC
MDL
MGA
MGF
MKD
MKN
MLF
MMK
MNT
MOP
MRO
MTL
MTP
MUR
MVP
MVR
MWK
MXN
MXP
MXV
MYR
MZE
MZM
MZN
NAD
NGN
NIC
NIO
NLG
NOK
NPR
NZD
OMR
PAB
PEI
PEN
PES
PGK
PHP
PKR
PLN
PLZ
PTE
PYG
QAR
RHD
ROL
RON
RSD
RUB
RUR
RWF
SAR
SBD
SCR
SDD
SDG
SDP
SEK
SGD
SHP
SIT
SKK
SLL
SOS
SRD
SRG
SSP
STD
STN
SUR
SVC
SYP
SZL
THB
TJR
TJS
TMM
TMT
TND
TOP
TPE
TRL
TRY
TTD
TWD
TZS
UAH
UAK
UGS
UGX
USD
USN
USS
UYI
UYP
UYU
UZS
VEB
VEF
VND
VNN
VUV
WST
XAF
XAG
XAU
XBA
XBB
XBC
XBD
XCD
XDR
XEU
XFO
XFU
XOF
XPD
XPF
XPT
XRE
XSU
XTS
XUA
XXX
YDD
YER
YUD
YUM
YUN
YUR
ZAL
ZAR
ZMK
ZMW
ZRN
ZRZ
ZWD
ZWL
ZWR
)

View File

@@ -1,5 +0,0 @@
module github.com/go-playground/locales
go 1.13
require golang.org/x/text v0.3.2

View File

@@ -1,3 +0,0 @@
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@@ -1,293 +0,0 @@
package locales
import (
"strconv"
"time"
"github.com/go-playground/locales/currency"
)
// // ErrBadNumberValue is returned when the number passed for
// // plural rule determination cannot be parsed
// type ErrBadNumberValue struct {
// NumberValue string
// InnerError error
// }
// // Error returns ErrBadNumberValue error string
// func (e *ErrBadNumberValue) Error() string {
// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
// }
// var _ error = new(ErrBadNumberValue)
// PluralRule denotes the type of plural rules
type PluralRule int
// PluralRule's
const (
PluralRuleUnknown PluralRule = iota
PluralRuleZero // zero
PluralRuleOne // one - singular
PluralRuleTwo // two - dual
PluralRuleFew // few - paucal
PluralRuleMany // many - also used for fractions if they have a separate class
PluralRuleOther // other - required—general plural form—also used if the language only has a single form
)
const (
pluralsString = "UnknownZeroOneTwoFewManyOther"
)
// Translator encapsulates an instance of a locale
// NOTE: some values are returned as a []byte just in case the caller
// wishes to add more and can help avoid allocations; otherwise just cast as string
type Translator interface {
// The following Functions are for overriding, debugging or developing
// with a Translator Locale
// Locale returns the string value of the translator
Locale() string
// returns an array of cardinal plural rules associated
// with this translator
PluralsCardinal() []PluralRule
// returns an array of ordinal plural rules associated
// with this translator
PluralsOrdinal() []PluralRule
// returns an array of range plural rules associated
// with this translator
PluralsRange() []PluralRule
// returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
CardinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
OrdinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
// returns the locales abbreviated month given the 'month' provided
MonthAbbreviated(month time.Month) string
// returns the locales abbreviated months
MonthsAbbreviated() []string
// returns the locales narrow month given the 'month' provided
MonthNarrow(month time.Month) string
// returns the locales narrow months
MonthsNarrow() []string
// returns the locales wide month given the 'month' provided
MonthWide(month time.Month) string
// returns the locales wide months
MonthsWide() []string
// returns the locales abbreviated weekday given the 'weekday' provided
WeekdayAbbreviated(weekday time.Weekday) string
// returns the locales abbreviated weekdays
WeekdaysAbbreviated() []string
// returns the locales narrow weekday given the 'weekday' provided
WeekdayNarrow(weekday time.Weekday) string
// WeekdaysNarrowreturns the locales narrow weekdays
WeekdaysNarrow() []string
// returns the locales short weekday given the 'weekday' provided
WeekdayShort(weekday time.Weekday) string
// returns the locales short weekdays
WeekdaysShort() []string
// returns the locales wide weekday given the 'weekday' provided
WeekdayWide(weekday time.Weekday) string
// returns the locales wide weekdays
WeekdaysWide() []string
// The following Functions are common Formatting functionsfor the Translator's Locale
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
FmtNumber(num float64, v uint64) string
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
FmtPercent(num float64, v uint64) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
FmtCurrency(num float64, v uint64, currency currency.Type) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
// in accounting notation.
FmtAccounting(num float64, v uint64, currency currency.Type) string
// returns the short date representation of 't' for locale
FmtDateShort(t time.Time) string
// returns the medium date representation of 't' for locale
FmtDateMedium(t time.Time) string
// returns the long date representation of 't' for locale
FmtDateLong(t time.Time) string
// returns the full date representation of 't' for locale
FmtDateFull(t time.Time) string
// returns the short time representation of 't' for locale
FmtTimeShort(t time.Time) string
// returns the medium time representation of 't' for locale
FmtTimeMedium(t time.Time) string
// returns the long time representation of 't' for locale
FmtTimeLong(t time.Time) string
// returns the full time representation of 't' for locale
FmtTimeFull(t time.Time) string
}
// String returns the string value of PluralRule
func (p PluralRule) String() string {
switch p {
case PluralRuleZero:
return pluralsString[7:11]
case PluralRuleOne:
return pluralsString[11:14]
case PluralRuleTwo:
return pluralsString[14:17]
case PluralRuleFew:
return pluralsString[17:20]
case PluralRuleMany:
return pluralsString[20:24]
case PluralRuleOther:
return pluralsString[24:]
default:
return pluralsString[:7]
}
}
//
// Precision Notes:
//
// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
//
// v := float64(3.141)
// i := float64(int64(v))
//
// fmt.Println(v - i)
//
// or
//
// s := strconv.FormatFloat(v-i, 'f', -1, 64)
// fmt.Println(s)
//
// these will not print what you'd expect: 0.14100000000000001
// and so this library requires a precision to be specified, or
// inaccurate plural rules could be applied.
//
//
//
// n - absolute value of the source number (integer and decimals).
// i - integer digits of n.
// v - number of visible fraction digits in n, with trailing zeros.
// w - number of visible fraction digits in n, without trailing zeros.
// f - visible fractional digits in n, with trailing zeros.
// t - visible fractional digits in n, without trailing zeros.
//
//
// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
//
// n := math.Abs(num)
// i := int64(n)
// v := v
//
//
// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
//
//
//
// General Inclusion Rules
// - v will always be available inherently
// - all require n
// - w requires i
//
// W returns the number of visible fraction digits in N, without trailing zeros.
func W(n float64, v uint64) (w int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then w will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
w = int64(len(s[:end]))
}
return
}
// F returns the visible fractional digits in N, with trailing zeros.
func F(n float64, v uint64) (f int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then f will be zero
// otherwise need to parse
if len(s) != 1 {
// ignoring error, because it can't fail as we generated
// the string internally from a real number
f, _ = strconv.ParseInt(s[2:], 10, 64)
}
return
}
// T returns the visible fractional digits in N, without trailing zeros.
func T(n float64, v uint64) (t int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then t will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
// ignoring error, because it can't fail as we generated
// the string internally from a real number
t, _ = strconv.ParseInt(s[:end], 10, 64)
}
return
}

View File

@@ -1,25 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.coverprofile

View File

@@ -1,27 +0,0 @@
language: go
go:
- 1.13.4
- tip
matrix:
allow_failures:
- go: tip
notifications:
email:
recipients: dean.karn@gmail.com
on_success: change
on_failure: always
before_install:
- go install github.com/mattn/goveralls
# Only clone the most recent commit.
git:
depth: 1
script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
[ $TRAVIS_GO_VERSION = 1.13.4 ] &&
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,89 +0,0 @@
## universal-translator
<img align="right" src="https://raw.githubusercontent.com/go-playground/universal-translator/master/logo.png">![Project status](https://img.shields.io/badge/version-0.17.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator)
[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
Why another i18n library?
--------------------------
Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
use in your applications.
Features
--------
- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
- [x] Support loading translations from files
- [x] Exporting translations to file(s), mainly for getting them professionally translated
- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
Installation
-----------
Use go get
```shell
go get github.com/go-playground/universal-translator
```
Usage & Documentation
-------
Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
##### Examples:
- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic)
- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files)
- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files)
File formatting
--------------
All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s);
they are only separated for easy viewing.
##### Examples:
- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
##### Basic Makeup
NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
```json
{
"locale": "en",
"key": "days-left",
"trans": "You have {0} day left.",
"type": "Cardinal",
"rule": "One",
"override": false
}
```
|Field|Description|
|---|---|
|locale|The locale for which the translation is for.|
|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
|trans|The actual translation text.|
|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
Help With Tests
---------------
To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@@ -1,148 +0,0 @@
package ut
import (
"errors"
"fmt"
"github.com/go-playground/locales"
)
var (
// ErrUnknowTranslation indicates the translation could not be found
ErrUnknowTranslation = errors.New("Unknown Translation")
)
var _ error = new(ErrConflictingTranslation)
var _ error = new(ErrRangeTranslation)
var _ error = new(ErrOrdinalTranslation)
var _ error = new(ErrCardinalTranslation)
var _ error = new(ErrMissingPluralTranslation)
var _ error = new(ErrExistingTranslator)
// ErrExistingTranslator is the error representing a conflicting translator
type ErrExistingTranslator struct {
locale string
}
// Error returns ErrExistingTranslator's internal error text
func (e *ErrExistingTranslator) Error() string {
return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
}
// ErrConflictingTranslation is the error representing a conflicting translation
type ErrConflictingTranslation struct {
locale string
key interface{}
rule locales.PluralRule
text string
}
// Error returns ErrConflictingTranslation's internal error text
func (e *ErrConflictingTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
// ErrRangeTranslation is the error representing a range translation error
type ErrRangeTranslation struct {
text string
}
// Error returns ErrRangeTranslation's internal error text
func (e *ErrRangeTranslation) Error() string {
return e.text
}
// ErrOrdinalTranslation is the error representing an ordinal translation error
type ErrOrdinalTranslation struct {
text string
}
// Error returns ErrOrdinalTranslation's internal error text
func (e *ErrOrdinalTranslation) Error() string {
return e.text
}
// ErrCardinalTranslation is the error representing a cardinal translation error
type ErrCardinalTranslation struct {
text string
}
// Error returns ErrCardinalTranslation's internal error text
func (e *ErrCardinalTranslation) Error() string {
return e.text
}
// ErrMissingPluralTranslation is the error signifying a missing translation given
// the locales plural rules.
type ErrMissingPluralTranslation struct {
locale string
key interface{}
rule locales.PluralRule
translationType string
}
// Error returns ErrMissingPluralTranslation's internal error text
func (e *ErrMissingPluralTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
// ErrMissingBracket is the error representing a missing bracket in a translation
// eg. This is a {0 <-- missing ending '}'
type ErrMissingBracket struct {
locale string
key interface{}
text string
}
// Error returns ErrMissingBracket error message
func (e *ErrMissingBracket) Error() string {
return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
}
// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
// eg. This is a {must-be-int}
type ErrBadParamSyntax struct {
locale string
param string
key interface{}
text string
}
// Error returns ErrBadParamSyntax error message
func (e *ErrBadParamSyntax) Error() string {
return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
}
// import/export errors
// ErrMissingLocale is the error representing an expected locale that could
// not be found aka locale not registered with the UniversalTranslator Instance
type ErrMissingLocale struct {
locale string
}
// Error returns ErrMissingLocale's internal error text
func (e *ErrMissingLocale) Error() string {
return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
}
// ErrBadPluralDefinition is the error representing an incorrect plural definition
// usually found within translations defined within files during the import process.
type ErrBadPluralDefinition struct {
tl translation
}
// Error returns ErrBadPluralDefinition's internal error text
func (e *ErrBadPluralDefinition) Error() string {
return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
}

View File

@@ -1,5 +0,0 @@
module github.com/go-playground/universal-translator
go 1.13
require github.com/go-playground/locales v0.13.0

View File

@@ -1,4 +0,0 @@
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -1,274 +0,0 @@
package ut
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"io"
"github.com/go-playground/locales"
)
type translation struct {
Locale string `json:"locale"`
Key interface{} `json:"key"` // either string or integer
Translation string `json:"trans"`
PluralType string `json:"type,omitempty"`
PluralRule string `json:"rule,omitempty"`
OverrideExisting bool `json:"override,omitempty"`
}
const (
cardinalType = "Cardinal"
ordinalType = "Ordinal"
rangeType = "Range"
)
// ImportExportFormat is the format of the file import or export
type ImportExportFormat uint8
// supported Export Formats
const (
FormatJSON ImportExportFormat = iota
)
// Export writes the translations out to a file on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
_, err := os.Stat(dirname)
fmt.Println(dirname, err, os.IsNotExist(err))
if err != nil {
if !os.IsNotExist(err) {
return err
}
if err = os.MkdirAll(dirname, 0744); err != nil {
return err
}
}
// build up translations
var trans []translation
var b []byte
var ext string
for _, locale := range t.translators {
for k, v := range locale.(*translator).translations {
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k,
Translation: v.text,
})
}
for k, pluralTrans := range locale.(*translator).cardinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: cardinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).ordinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: ordinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).rangeTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: rangeType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
switch format {
case FormatJSON:
b, err = json.MarshalIndent(trans, "", " ")
ext = ".json"
}
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
if err != nil {
return err
}
trans = trans[0:0]
}
return nil
}
// Import reads the translations out of a file or directory on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
fi, err := os.Stat(dirnameOrFilename)
if err != nil {
return err
}
processFn := func(filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
return t.ImportByReader(format, f)
}
if !fi.IsDir() {
return processFn(dirnameOrFilename)
}
// recursively go through directory
walker := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
switch format {
case FormatJSON:
// skip non JSON files
if filepath.Ext(info.Name()) != ".json" {
return nil
}
}
return processFn(path)
}
return filepath.Walk(dirnameOrFilename, walker)
}
// ImportByReader imports the the translations found within the contents read from the supplied reader.
//
// NOTE: generally used when assets have been embedded into the binary and are already in memory.
func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
var trans []translation
switch format {
case FormatJSON:
err = json.Unmarshal(b, &trans)
}
if err != nil {
return err
}
for _, tl := range trans {
locale, found := t.FindTranslator(tl.Locale)
if !found {
return &ErrMissingLocale{locale: tl.Locale}
}
pr := stringToPR(tl.PluralRule)
if pr == locales.PluralRuleUnknown {
err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
if err != nil {
return err
}
continue
}
switch tl.PluralType {
case cardinalType:
err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case ordinalType:
err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case rangeType:
err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
default:
return &ErrBadPluralDefinition{tl: tl}
}
if err != nil {
return err
}
}
return nil
}
func stringToPR(s string) locales.PluralRule {
switch s {
case "One":
return locales.PluralRuleOne
case "Two":
return locales.PluralRuleTwo
case "Few":
return locales.PluralRuleFew
case "Many":
return locales.PluralRuleMany
case "Other":
return locales.PluralRuleOther
default:
return locales.PluralRuleUnknown
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -1,420 +0,0 @@
package ut
import (
"fmt"
"strconv"
"strings"
"github.com/go-playground/locales"
)
const (
paramZero = "{0}"
paramOne = "{1}"
unknownTranslation = ""
)
// Translator is universal translators
// translator instance which is a thin wrapper
// around locales.Translator instance providing
// some extra functionality
type Translator interface {
locales.Translator
// adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
Add(key interface{}, text string, override bool) error
// adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
// - 1st, 2nd, 3rd...
AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
// creates the translation for the locale given the 'key' and params passed in
T(key interface{}, params ...string) (string, error)
// creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
C(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
O(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
// 'digit2' arguments and 'param1' and 'param2' passed in
R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
VerifyTranslations() error
}
var _ Translator = new(translator)
var _ locales.Translator = new(translator)
type translator struct {
locales.Translator
translations map[interface{}]*transText
cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
ordinalTanslations map[interface{}][]*transText
rangeTanslations map[interface{}][]*transText
}
type transText struct {
text string
indexes []int
}
func newTranslator(trans locales.Translator) Translator {
return &translator{
Translator: trans,
translations: make(map[interface{}]*transText), // translation text broken up by byte index
cardinalTanslations: make(map[interface{}][]*transText),
ordinalTanslations: make(map[interface{}][]*transText),
rangeTanslations: make(map[interface{}][]*transText),
}
}
// Add adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
func (t *translator) Add(key interface{}, text string, override bool) error {
if _, ok := t.translations[key]; ok && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
}
lb := strings.Count(text, "{")
rb := strings.Count(text, "}")
if lb != rb {
return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
}
trans := &transText{
text: text,
}
var idx int
for i := 0; i < lb; i++ {
s := "{" + strconv.Itoa(i) + "}"
idx = strings.Index(text, s)
if idx == -1 {
return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
}
trans.indexes = append(trans.indexes, idx)
trans.indexes = append(trans.indexes, idx+len(s))
}
t.translations[key] = trans
return nil
}
// AddCardinal adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsCardinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.cardinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.cardinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddOrdinal adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsOrdinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.ordinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.ordinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddRange adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsRange() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.rangeTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.rangeTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 4, 4),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
idx = strings.Index(text, paramOne)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
}
trans.indexes[2] = idx
trans.indexes[3] = idx + len(paramOne)
return nil
}
// T creates the translation for the locale given the 'key' and params passed in
func (t *translator) T(key interface{}, params ...string) (string, error) {
trans, ok := t.translations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
b := make([]byte, 0, 64)
var start, end, count int
for i := 0; i < len(trans.indexes); i++ {
end = trans.indexes[i]
b = append(b, trans.text[start:end]...)
b = append(b, params[count]...)
i++
start = trans.indexes[i]
count++
}
b = append(b, trans.text[start:]...)
return string(b), nil
}
// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.cardinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.CardinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.ordinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.OrdinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
// and 'param1' and 'param2' passed in
func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
tarr, ok := t.rangeTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.RangePluralRule(num1, digits1, num2, digits2)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param1...)
b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
b = append(b, param2...)
b = append(b, trans.text[trans.indexes[3]:]...)
return string(b), nil
}
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
func (t *translator) VerifyTranslations() error {
for k, v := range t.cardinalTanslations {
for _, rule := range t.PluralsCardinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
}
}
}
for k, v := range t.ordinalTanslations {
for _, rule := range t.PluralsOrdinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
}
}
}
for k, v := range t.rangeTanslations {
for _, rule := range t.PluralsRange() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
}
}
}
return nil
}

View File

@@ -1,113 +0,0 @@
package ut
import (
"strings"
"github.com/go-playground/locales"
)
// UniversalTranslator holds all locale & translation data
type UniversalTranslator struct {
translators map[string]Translator
fallback Translator
}
// New returns a new UniversalTranslator instance set with
// the fallback locale and locales it should support
func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
t := &UniversalTranslator{
translators: make(map[string]Translator),
}
for _, v := range supportedLocales {
trans := newTranslator(v)
t.translators[strings.ToLower(trans.Locale())] = trans
if fallback.Locale() == v.Locale() {
t.fallback = trans
}
}
if t.fallback == nil && fallback != nil {
t.fallback = newTranslator(fallback)
}
return t
}
// FindTranslator trys to find a Translator based on an array of locales
// and returns the first one it can find, otherwise returns the
// fallback translator.
func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
for _, locale := range locales {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
}
return t.fallback, false
}
// GetTranslator returns the specified translator for the given locale,
// or fallback if not found
func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
return t.fallback, false
}
// GetFallback returns the fallback locale
func (t *UniversalTranslator) GetFallback() Translator {
return t.fallback
}
// AddTranslator adds the supplied translator, if it already exists the override param
// will be checked and if false an error will be returned, otherwise the translator will be
// overridden; if the fallback matches the supplied translator it will be overridden as well
// NOTE: this is normally only used when translator is embedded within a library
func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
lc := strings.ToLower(translator.Locale())
_, ok := t.translators[lc]
if ok && !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
trans := newTranslator(translator)
if t.fallback.Locale() == translator.Locale() {
// because it's optional to have a fallback, I don't impose that limitation
// don't know why you wouldn't but...
if !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
t.fallback = trans
}
t.translators[lc] = trans
return nil
}
// VerifyTranslations runs through all locales and identifies any issues
// eg. missing plural rules for a locale
func (t *UniversalTranslator) VerifyTranslations() (err error) {
for _, trans := range t.translators {
err = trans.VerifyTranslations()
if err != nil {
return
}
}
return
}

View File

@@ -1,29 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.test
*.out
*.txt
cover.html
README.html

View File

@@ -1,29 +0,0 @@
language: go
go:
- 1.13.7
- tip
matrix:
allow_failures:
- go: tip
notifications:
email:
recipients: dean.karn@gmail.com
on_success: change
on_failure: always
before_install:
- go install github.com/mattn/goveralls
- mkdir -p $GOPATH/src/gopkg.in
- ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9
# Only clone the most recent commit.
git:
depth: 1
script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
[ $TRAVIS_GO_VERSION = 1.13.7 ] &&
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,18 +0,0 @@
GOCMD=GO111MODULE=on go
linters-install:
@golangci-lint --version >/dev/null 2>&1 || { \
echo "installing linting tools..."; \
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0; \
}
lint: linters-install
$(PWD)/bin/golangci-lint run
test:
$(GOCMD) test -cover -race ./...
bench:
$(GOCMD) test -bench=. -benchmem ./...
.PHONY: test lint linters-install

View File

@@ -1,292 +0,0 @@
Package validator
================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v9/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.3.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Ability to dive into both map keys and values for validation
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
- Customizable i18n aware error messages.
- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding)
Installation
------------
Use go get.
go get github.com/go-playground/validator/v10
Then import the validator package into your own code.
import "github.com/go-playground/validator/v10"
Error Return Value
-------
Validation functions return type error
They return type error to avoid the issue discussed in the following, where err is always != nil:
* http://stackoverflow.com/a/29138676/3158232
* https://github.com/go-playground/validator/issues/134
Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
```go
err := validate.Struct(mystruct)
validationErrors := err.(validator.ValidationErrors)
```
Usage and documentation
------
Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs.
##### Examples:
- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go)
- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go)
- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go)
- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go)
- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
Baked-in Validations
------
### Fields:
| Tag | Description |
| - | - |
| eqcsfield | Field Equals Another Field (relative)|
| eqfield | Field Equals Another Field |
| fieldcontains | NOT DOCUMENTED IN doc.go |
| fieldexcludes | NOT DOCUMENTED IN doc.go |
| gtcsfield | Field Greater Than Another Relative Field |
| gtecsfield | Field Greater Than or Equal To Another Relative Field |
| gtefield | Field Greater Than or Equal To Another Field |
| gtfield | Field Greater Than Another Field |
| ltcsfield | Less Than Another Relative Field |
| ltecsfield | Less Than or Equal To Another Relative Field |
| ltefield | Less Than or Equal To Another Field |
| ltfield | Less Than Another Field |
| necsfield | Field Does Not Equal Another Field (relative) |
| nefield | Field Does Not Equal Another Field |
### Network:
| Tag | Description |
| - | - |
| cidr | Classless Inter-Domain Routing CIDR |
| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
| datauri | Data URL |
| fqdn | Full Qualified Domain Name (FQDN) |
| hostname | Hostname RFC 952 |
| hostname_port | HostPort |
| hostname_rfc1123 | Hostname RFC 1123 |
| ip | Internet Protocol Address IP |
| ip4_addr | Internet Protocol Address IPv4 |
| ip6_addr |Internet Protocol Address IPv6 |
| ip_addr | Internet Protocol Address IP |
| ipv4 | Internet Protocol Address IPv4 |
| ipv6 | Internet Protocol Address IPv6 |
| mac | Media Access Control Address MAC |
| tcp4_addr | Transmission Control Protocol Address TCPv4 |
| tcp6_addr | Transmission Control Protocol Address TCPv6 |
| tcp_addr | Transmission Control Protocol Address TCP |
| udp4_addr | User Datagram Protocol Address UDPv4 |
| udp6_addr | User Datagram Protocol Address UDPv6 |
| udp_addr | User Datagram Protocol Address UDP |
| unix_addr | Unix domain socket end point Address |
| uri | URI String |
| url | URL String |
| url_encoded | URL Encoded |
| urn_rfc2141 | Urn RFC 2141 String |
### Strings:
| Tag | Description |
| - | - |
| alpha | Alpha Only |
| alphanum | Alphanumeric |
| alphanumunicode | Alphanumeric Unicode |
| alphaunicode | Alpha Unicode |
| ascii | ASCII |
| contains | Contains |
| containsany | Contains Any |
| containsrune | Contains Rune |
| lowercase | Lowercase |
| multibyte | Multi-Byte Characters |
| number | NOT DOCUMENTED IN doc.go |
| numeric | Numeric |
| printascii | Printable ASCII |
| startswith | Starts With |
| uppercase | Uppercase |
### Format:
| Tag | Description |
| - | - |
| base64 | Base64 String |
| base64url | Base64URL String |
| btc_addr | Bitcoin Address |
| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
| datetime | Datetime |
| email | E-mail String
| eth_addr | Ethereum Address |
| hexadecimal | Hexadecimal String |
| hexcolor | Hexcolor String |
| hsl | HSL String |
| hsla | HSLA String |
| html | HTML Tags |
| html_encoded | HTML Encoded |
| isbn | International Standard Book Number |
| isbn10 | International Standard Book Number 10 |
| isbn13 | International Standard Book Number 13 |
| json | JSON |
| latitude | Latitude |
| longitude | Longitude |
| rgb | RGB String |
| rgba | RGBA String |
| ssn | Social Security Number SSN |
| uuid | Universally Unique Identifier UUID |
| uuid3 | Universally Unique Identifier UUID v3 |
| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
| uuid4 | Universally Unique Identifier UUID v4 |
| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
| uuid5 | Universally Unique Identifier UUID v5 |
| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
### Comparisons:
| Tag | Description |
| - | - |
| eq | Equals |
| gt | Greater than|
| gte |Greater than or equal |
| lt | Less Than |
| lte | Less Than or Equal |
| ne | Not Equal |
### Other:
| Tag | Description |
| - | - |
| dir | Directory |
| e164 | NOT DOCUMENTED IN doc.go |
| endswith | Ends With |
| excludes | Excludes |
| excludesall | Excludes All |
| excludesrune | Excludes Rune |
| file | File path |
| isdefault | Is Default |
| len | Length |
| max | Maximum |
| min | Minimum |
| oneof | One Of |
| required | Required |
| required_with | Required With |
| required_with_all | Required With All |
| required_without | Required Without |
| required_without_all | Required Without All |
| unique | Unique |
Benchmarks
------
###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
```go
goos: darwin
goarch: amd64
pkg: github.com/go-playground/validator
BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
```
Complementary Software
----------------------
Here is a list of software that complements using this library either pre or post validation.
* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support.
* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects
How to Contribute
------
Make a pull request...
License
------
Distributed under MIT License, please see license file within the code for more details.

File diff suppressed because it is too large Load Diff

View File

@@ -1,322 +0,0 @@
package validator
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type tagType uint8
const (
typeDefault tagType = iota
typeOmitEmpty
typeIsDefault
typeNoStructLevel
typeStructOnly
typeDive
typeOr
typeKeys
typeEndKeys
)
const (
invalidValidation = "Invalid validation tag on field '%s'"
undefinedValidation = "Undefined validation function '%s' on field '%s'"
keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
)
type structCache struct {
lock sync.Mutex
m atomic.Value // map[reflect.Type]*cStruct
}
func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
return
}
func (sc *structCache) Set(key reflect.Type, value *cStruct) {
m := sc.m.Load().(map[reflect.Type]*cStruct)
nm := make(map[reflect.Type]*cStruct, len(m)+1)
for k, v := range m {
nm[k] = v
}
nm[key] = value
sc.m.Store(nm)
}
type tagCache struct {
lock sync.Mutex
m atomic.Value // map[string]*cTag
}
func (tc *tagCache) Get(key string) (c *cTag, found bool) {
c, found = tc.m.Load().(map[string]*cTag)[key]
return
}
func (tc *tagCache) Set(key string, value *cTag) {
m := tc.m.Load().(map[string]*cTag)
nm := make(map[string]*cTag, len(m)+1)
for k, v := range m {
nm[k] = v
}
nm[key] = value
tc.m.Store(nm)
}
type cStruct struct {
name string
fields []*cField
fn StructLevelFuncCtx
}
type cField struct {
idx int
name string
altName string
namesEqual bool
cTags *cTag
}
type cTag struct {
tag string
aliasTag string
actualAliasTag string
param string
keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
next *cTag
fn FuncCtx
typeof tagType
hasTag bool
hasAlias bool
hasParam bool // true if parameter used eg. eq= where the equal sign has been set
isBlockEnd bool // indicates the current tag represents the last validation in the block
runValidationWhenNil bool
}
func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
v.structCache.lock.Lock()
defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
typ := current.Type()
// could have been multiple trying to access, but once first is done this ensures struct
// isn't parsed again.
cs, ok := v.structCache.Get(typ)
if ok {
return cs
}
cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
numFields := current.NumField()
var ctag *cTag
var fld reflect.StructField
var tag string
var customName string
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if !fld.Anonymous && len(fld.PkgPath) > 0 {
continue
}
tag = fld.Tag.Get(v.tagName)
if tag == skipValidationTag {
continue
}
customName = fld.Name
if v.hasTagNameFunc {
name := v.tagNameFunc(fld)
if len(name) > 0 {
customName = name
}
}
// NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
// and so only struct level caching can be used instead of combined with Field tag caching
if len(tag) > 0 {
ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
} else {
// even if field doesn't have validations need cTag for traversing to potential inner/nested
// elements of the field.
ctag = new(cTag)
}
cs.fields = append(cs.fields, &cField{
idx: i,
name: fld.Name,
altName: customName,
cTags: ctag,
namesEqual: fld.Name == customName,
})
}
v.structCache.Set(typ, cs)
return cs
}
func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
var t string
noAlias := len(alias) == 0
tags := strings.Split(tag, tagSeparator)
for i := 0; i < len(tags); i++ {
t = tags[i]
if noAlias {
alias = t
}
// check map for alias and process new tags, otherwise process as usual
if tagsVal, found := v.aliases[t]; found {
if i == 0 {
firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
} else {
next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
current.next, current = next, curr
}
continue
}
var prevTag tagType
if i == 0 {
current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault}
firstCtag = current
} else {
prevTag = current.typeof
current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
current = current.next
}
switch t {
case diveTag:
current.typeof = typeDive
continue
case keysTag:
current.typeof = typeKeys
if i == 0 || prevTag != typeDive {
panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag))
}
current.typeof = typeKeys
// need to pass along only keys tag
// need to increment i to skip over the keys tags
b := make([]byte, 0, 64)
i++
for ; i < len(tags); i++ {
b = append(b, tags[i]...)
b = append(b, ',')
if tags[i] == endKeysTag {
break
}
}
current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
continue
case endKeysTag:
current.typeof = typeEndKeys
// if there are more in tags then there was no keysTag defined
// and an error should be thrown
if i != len(tags)-1 {
panic(keysTagNotDefined)
}
return
case omitempty:
current.typeof = typeOmitEmpty
continue
case structOnlyTag:
current.typeof = typeStructOnly
continue
case noStructLevelTag:
current.typeof = typeNoStructLevel
continue
default:
if t == isdefault {
current.typeof = typeIsDefault
}
// if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
orVals := strings.Split(t, orSeparator)
for j := 0; j < len(orVals); j++ {
vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
if noAlias {
alias = vals[0]
current.aliasTag = alias
} else {
current.actualAliasTag = t
}
if j > 0 {
current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
current = current.next
}
current.hasParam = len(vals) > 1
current.tag = vals[0]
if len(current.tag) == 0 {
panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
}
if wrapper, ok := v.validations[current.tag]; ok {
current.fn = wrapper.fn
current.runValidationWhenNil = wrapper.runValidatinOnNil
} else {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
}
if len(orVals) > 1 {
current.typeof = typeOr
}
if len(vals) > 1 {
current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
}
}
current.isBlockEnd = true
}
}
return
}
func (v *Validate) fetchCacheTag(tag string) *cTag {
// find cached tag
ctag, found := v.tagCache.Get(tag)
if !found {
v.tagCache.lock.Lock()
defer v.tagCache.lock.Unlock()
// could have been multiple trying to access, but once first is done this ensures tag
// isn't parsed again.
ctag, found = v.tagCache.Get(tag)
if !found {
ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
v.tagCache.Set(tag, ctag)
}
}
return ctag
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,272 +0,0 @@
package validator
import (
"bytes"
"fmt"
"reflect"
"strings"
ut "github.com/go-playground/universal-translator"
)
const (
fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
)
// ValidationErrorsTranslations is the translation return type
type ValidationErrorsTranslations map[string]string
// InvalidValidationError describes an invalid argument passed to
// `Struct`, `StructExcept`, StructPartial` or `Field`
type InvalidValidationError struct {
Type reflect.Type
}
// Error returns InvalidValidationError message
func (e *InvalidValidationError) Error() string {
if e.Type == nil {
return "validator: (nil)"
}
return "validator: (nil " + e.Type.String() + ")"
}
// ValidationErrors is an array of FieldError's
// for use in custom error messages post validation.
type ValidationErrors []FieldError
// Error is intended for use in development + debugging and not intended to be a production error message.
// It allows ValidationErrors to subscribe to the Error interface.
// All information to create an error message specific to your application is contained within
// the FieldError found within the ValidationErrors array
func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString("")
var fe *fieldError
for i := 0; i < len(ve); i++ {
fe = ve[i].(*fieldError)
buff.WriteString(fe.Error())
buff.WriteString("\n")
}
return strings.TrimSpace(buff.String())
}
// Translate translates all of the ValidationErrors
func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
trans := make(ValidationErrorsTranslations)
var fe *fieldError
for i := 0; i < len(ve); i++ {
fe = ve[i].(*fieldError)
// // in case an Anonymous struct was used, ensure that the key
// // would be 'Username' instead of ".Username"
// if len(fe.ns) > 0 && fe.ns[:1] == "." {
// trans[fe.ns[1:]] = fe.Translate(ut)
// continue
// }
trans[fe.ns] = fe.Translate(ut)
}
return trans
}
// FieldError contains all functions to get error details
type FieldError interface {
// returns the validation tag that failed. if the
// validation was an alias, this will return the
// alias name and not the underlying tag that failed.
//
// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
// will return "iscolor"
Tag() string
// returns the validation tag that failed, even if an
// alias the actual tag within the alias will be returned.
// If an 'or' validation fails the entire or will be returned.
//
// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
// will return "hexcolor|rgb|rgba|hsl|hsla"
ActualTag() string
// returns the namespace for the field error, with the tag
// name taking precedence over the field's actual name.
//
// eg. JSON name "User.fname"
//
// See StructNamespace() for a version that returns actual names.
//
// NOTE: this field can be blank when validating a single primitive field
// using validate.Field(...) as there is no way to extract it's name
Namespace() string
// returns the namespace for the field error, with the field's
// actual name.
//
// eq. "User.FirstName" see Namespace for comparison
//
// NOTE: this field can be blank when validating a single primitive field
// using validate.Field(...) as there is no way to extract its name
StructNamespace() string
// returns the fields name with the tag name taking precedence over the
// field's actual name.
//
// eq. JSON name "fname"
// see StructField for comparison
Field() string
// returns the field's actual name from the struct, when able to determine.
//
// eq. "FirstName"
// see Field for comparison
StructField() string
// returns the actual field's value in case needed for creating the error
// message
Value() interface{}
// returns the param value, in string form for comparison; this will also
// help with generating an error message
Param() string
// Kind returns the Field's reflect Kind
//
// eg. time.Time's kind is a struct
Kind() reflect.Kind
// Type returns the Field's reflect Type
//
// // eg. time.Time's type is time.Time
Type() reflect.Type
// returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: if no registered translator can be found it returns the same as
// calling fe.Error()
Translate(ut ut.Translator) string
}
// compile time interface checks
var _ FieldError = new(fieldError)
var _ error = new(fieldError)
// fieldError contains a single field's validation error along
// with other properties that may be needed for error message creation
// it complies with the FieldError interface
type fieldError struct {
v *Validate
tag string
actualTag string
ns string
structNs string
fieldLen uint8
structfieldLen uint8
value interface{}
param string
kind reflect.Kind
typ reflect.Type
}
// Tag returns the validation tag that failed.
func (fe *fieldError) Tag() string {
return fe.tag
}
// ActualTag returns the validation tag that failed, even if an
// alias the actual tag within the alias will be returned.
func (fe *fieldError) ActualTag() string {
return fe.actualTag
}
// Namespace returns the namespace for the field error, with the tag
// name taking precedence over the field's actual name.
func (fe *fieldError) Namespace() string {
return fe.ns
}
// StructNamespace returns the namespace for the field error, with the field's
// actual name.
func (fe *fieldError) StructNamespace() string {
return fe.structNs
}
// Field returns the field's name with the tag name taking precedence over the
// field's actual name.
func (fe *fieldError) Field() string {
return fe.ns[len(fe.ns)-int(fe.fieldLen):]
// // return fe.field
// fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
// log.Println("FLD:", fld)
// if len(fld) > 0 && fld[:1] == "." {
// return fld[1:]
// }
// return fld
}
// returns the field's actual name from the struct, when able to determine.
func (fe *fieldError) StructField() string {
// return fe.structField
return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
}
// Value returns the actual field's value in case needed for creating the error
// message
func (fe *fieldError) Value() interface{} {
return fe.value
}
// Param returns the param value, in string form for comparison; this will
// also help with generating an error message
func (fe *fieldError) Param() string {
return fe.param
}
// Kind returns the Field's reflect Kind
func (fe *fieldError) Kind() reflect.Kind {
return fe.kind
}
// Type returns the Field's reflect Type
func (fe *fieldError) Type() reflect.Type {
return fe.typ
}
// Error returns the fieldError's error message
func (fe *fieldError) Error() string {
return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
}
// Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: is not registered translation can be found it returns the same
// as calling fe.Error()
func (fe *fieldError) Translate(ut ut.Translator) string {
m, ok := fe.v.transTagFunc[ut]
if !ok {
return fe.Error()
}
fn, ok := m[fe.tag]
if !ok {
return fe.Error()
}
return fn(ut, fe)
}

View File

@@ -1,119 +0,0 @@
package validator
import "reflect"
// FieldLevel contains all the information and helper functions
// to validate a field
type FieldLevel interface {
// returns the top level struct, if any
Top() reflect.Value
// returns the current fields parent struct, if any or
// the comparison value if called 'VarWithValue'
Parent() reflect.Value
// returns current field for validation
Field() reflect.Value
// returns the field's name with the tag
// name taking precedence over the fields actual name.
FieldName() string
// returns the struct field's name
StructFieldName() string
// returns param for validation against current field
Param() string
// GetTag returns the current validations tag name
GetTag() string
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
// traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind and whether is was successful in retrieving
// the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
//
// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
//
// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
// traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
// the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
}
var _ FieldLevel = new(validate)
// Field returns current field for validation
func (v *validate) Field() reflect.Value {
return v.flField
}
// FieldName returns the field's name with the tag
// name taking precedence over the fields actual name.
func (v *validate) FieldName() string {
return v.cf.altName
}
// GetTag returns the current validations tag name
func (v *validate) GetTag() string {
return v.ct.tag
}
// StructFieldName returns the struct field's name
func (v *validate) StructFieldName() string {
return v.cf.name
}
// Param returns param for validation against current field
func (v *validate) Param() string {
return v.ct.param
}
// GetStructFieldOK returns Param returns param for validation against current field
//
// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param)
return current, kind, found
}
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
//
// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace)
return current, kind, found
}
// GetStructFieldOK returns Param returns param for validation against current field
func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
}
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(val, namespace)
}

View File

@@ -1,10 +0,0 @@
module github.com/go-playground/validator/v10
go 1.13
require (
github.com/go-playground/assert/v2 v2.0.1
github.com/go-playground/locales v0.13.0
github.com/go-playground/universal-translator v0.17.0
github.com/leodido/go-urn v1.2.0
)

View File

@@ -1,21 +0,0 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -1,101 +0,0 @@
package validator
import "regexp"
const (
alphaRegexString = "^[a-zA-Z]+$"
alphaNumericRegexString = "^[a-zA-Z0-9]+$"
alphaUnicodeRegexString = "^[\\p{L}]+$"
alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
hexadecimalRegexString = "^[0-9a-fA-F]+$"
hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
aSCIIRegexString = "^[\x00-\x7F]*$"
printableASCIIRegexString = "^[\x20-\x7E]*$"
multibyteRegexString = "[^\x00-\x7F]"
dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)`
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})`
hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(&gt)|(&lt)|(&quot)|(&amp)+[;]?`
hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
splitParamsRegexString = `'[^']*'|\S+`
)
var (
alphaRegex = regexp.MustCompile(alphaRegexString)
alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
numericRegex = regexp.MustCompile(numericRegexString)
numberRegex = regexp.MustCompile(numberRegexString)
hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
rgbRegex = regexp.MustCompile(rgbRegexString)
rgbaRegex = regexp.MustCompile(rgbaRegexString)
hslRegex = regexp.MustCompile(hslRegexString)
hslaRegex = regexp.MustCompile(hslaRegexString)
e164Regex = regexp.MustCompile(e164RegexString)
emailRegex = regexp.MustCompile(emailRegexString)
base64Regex = regexp.MustCompile(base64RegexString)
base64URLRegex = regexp.MustCompile(base64URLRegexString)
iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
uUID3Regex = regexp.MustCompile(uUID3RegexString)
uUID4Regex = regexp.MustCompile(uUID4RegexString)
uUID5Regex = regexp.MustCompile(uUID5RegexString)
uUIDRegex = regexp.MustCompile(uUIDRegexString)
uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
multibyteRegex = regexp.MustCompile(multibyteRegexString)
dataURIRegex = regexp.MustCompile(dataURIRegexString)
latitudeRegex = regexp.MustCompile(latitudeRegexString)
longitudeRegex = regexp.MustCompile(longitudeRegexString)
sSNRegex = regexp.MustCompile(sSNRegexString)
hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString)
ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString)
uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
hTMLRegex = regexp.MustCompile(hTMLRegexString)
splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
)

View File

@@ -1,175 +0,0 @@
package validator
import (
"context"
"reflect"
)
// StructLevelFunc accepts all values needed for struct level validation
type StructLevelFunc func(sl StructLevel)
// StructLevelFuncCtx accepts all values needed for struct level validation
// but also allows passing of contextual validation information via context.Context.
type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx
func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
return func(ctx context.Context, sl StructLevel) {
fn(sl)
}
}
// StructLevel contains all the information and helper functions
// to validate a struct
type StructLevel interface {
// returns the main validation object, in case one wants to call validations internally.
// this is so you don't have to use anonymous functions to get access to the validate
// instance.
Validator() *Validate
// returns the top level struct, if any
Top() reflect.Value
// returns the current fields parent struct, if any
Parent() reflect.Value
// returns the current struct.
Current() reflect.Value
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and its kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
// reports an error just by passing the field and tag information
//
// NOTES:
//
// fieldName and altName get appended to the existing namespace that
// validator is on. e.g. pass 'FirstName' or 'Names[0]' depending
// on the nesting
//
// tag can be an existing validation tag or just something you make up
// and process on the flip side it's up to you.
ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
// reports an error just by passing ValidationErrors
//
// NOTES:
//
// relativeNamespace and relativeActualNamespace get appended to the
// existing namespace that validator is on.
// e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
// on the nesting. most of the time they will be blank, unless you validate
// at a level lower the the current field depth
ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
}
var _ StructLevel = new(validate)
// Top returns the top level struct
//
// NOTE: this can be the same as the current struct being validated
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
// should not be relied upon for an acurate value otherwise.
func (v *validate) Top() reflect.Value {
return v.top
}
// Parent returns the current structs parent
//
// NOTE: this can be the same as the current struct being validated
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
// should not be relied upon for an acurate value otherwise.
func (v *validate) Parent() reflect.Value {
return v.slflParent
}
// Current returns the current struct.
func (v *validate) Current() reflect.Value {
return v.slCurrent
}
// Validator returns the main validation object, in case one want to call validations internally.
func (v *validate) Validator() *Validate {
return v.v
}
// ExtractType gets the actual underlying type of field value.
func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
return v.extractTypeInternal(field, false)
}
// ReportError reports an error just by passing the field and tag information
func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
if len(structFieldName) == 0 {
structFieldName = fieldName
}
v.str1 = string(append(v.ns, fieldName...))
if v.v.hasTagNameFunc || fieldName != structFieldName {
v.str2 = string(append(v.actualNs, structFieldName...))
} else {
v.str2 = v.str1
}
if kind == reflect.Invalid {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tag,
actualTag: tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(fieldName)),
structfieldLen: uint8(len(structFieldName)),
param: param,
kind: kind,
},
)
return
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tag,
actualTag: tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(fieldName)),
structfieldLen: uint8(len(structFieldName)),
value: fv.Interface(),
param: param,
kind: kind,
typ: fv.Type(),
},
)
}
// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
//
// NOTE: this function prepends the current namespace to the relative ones.
func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
var err *fieldError
for i := 0; i < len(errs); i++ {
err = errs[i].(*fieldError)
err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
v.errs = append(v.errs, err)
}
}

View File

@@ -1,11 +0,0 @@
package validator
import ut "github.com/go-playground/universal-translator"
// TranslationFunc is the function type used to register or override
// custom translations
type TranslationFunc func(ut ut.Translator, fe FieldError) string
// RegisterTranslationsFunc allows for registering of translations
// for a 'ut.Translator' for use within the 'TranslationFunc'
type RegisterTranslationsFunc func(ut ut.Translator) error

View File

@@ -1,266 +0,0 @@
package validator
import (
"reflect"
"strconv"
"strings"
)
// extractTypeInternal gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
BEGIN:
switch current.Kind() {
case reflect.Ptr:
nullable = true
if current.IsNil() {
return current, reflect.Ptr, nullable
}
current = current.Elem()
goto BEGIN
case reflect.Interface:
nullable = true
if current.IsNil() {
return current, reflect.Interface, nullable
}
current = current.Elem()
goto BEGIN
case reflect.Invalid:
return current, reflect.Invalid, nullable
default:
if v.v.hasCustomFuncs {
if fn, ok := v.v.customFuncs[current.Type()]; ok {
current = reflect.ValueOf(fn(current))
goto BEGIN
}
}
return current, current.Kind(), nullable
}
}
// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
// returns the field, field kind and whether is was successful in retrieving the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) {
BEGIN:
current, kind, nullable = v.ExtractType(val)
if kind == reflect.Invalid {
return
}
if namespace == "" {
found = true
return
}
switch kind {
case reflect.Ptr, reflect.Interface:
return
case reflect.Struct:
typ := current.Type()
fld := namespace
var ns string
if typ != timeType {
idx := strings.Index(namespace, namespaceSeparator)
if idx != -1 {
fld = namespace[:idx]
ns = namespace[idx+1:]
} else {
ns = ""
}
bracketIdx := strings.Index(fld, leftBracket)
if bracketIdx != -1 {
fld = fld[:bracketIdx]
ns = namespace[bracketIdx:]
}
val = current.FieldByName(fld)
namespace = ns
goto BEGIN
}
case reflect.Array, reflect.Slice:
idx := strings.Index(namespace, leftBracket)
idx2 := strings.Index(namespace, rightBracket)
arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
if arrIdx >= current.Len() {
return
}
startIdx := idx2 + 1
if startIdx < len(namespace) {
if namespace[startIdx:startIdx+1] == namespaceSeparator {
startIdx++
}
}
val = current.Index(arrIdx)
namespace = namespace[startIdx:]
goto BEGIN
case reflect.Map:
idx := strings.Index(namespace, leftBracket) + 1
idx2 := strings.Index(namespace, rightBracket)
endIdx := idx2
if endIdx+1 < len(namespace) {
if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
endIdx++
}
}
key := namespace[idx:idx2]
switch current.Type().Key().Kind() {
case reflect.Int:
i, _ := strconv.Atoi(key)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Int8:
i, _ := strconv.ParseInt(key, 10, 8)
val = current.MapIndex(reflect.ValueOf(int8(i)))
namespace = namespace[endIdx+1:]
case reflect.Int16:
i, _ := strconv.ParseInt(key, 10, 16)
val = current.MapIndex(reflect.ValueOf(int16(i)))
namespace = namespace[endIdx+1:]
case reflect.Int32:
i, _ := strconv.ParseInt(key, 10, 32)
val = current.MapIndex(reflect.ValueOf(int32(i)))
namespace = namespace[endIdx+1:]
case reflect.Int64:
i, _ := strconv.ParseInt(key, 10, 64)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Uint:
i, _ := strconv.ParseUint(key, 10, 0)
val = current.MapIndex(reflect.ValueOf(uint(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint8:
i, _ := strconv.ParseUint(key, 10, 8)
val = current.MapIndex(reflect.ValueOf(uint8(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint16:
i, _ := strconv.ParseUint(key, 10, 16)
val = current.MapIndex(reflect.ValueOf(uint16(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint32:
i, _ := strconv.ParseUint(key, 10, 32)
val = current.MapIndex(reflect.ValueOf(uint32(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint64:
i, _ := strconv.ParseUint(key, 10, 64)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Float32:
f, _ := strconv.ParseFloat(key, 32)
val = current.MapIndex(reflect.ValueOf(float32(f)))
namespace = namespace[endIdx+1:]
case reflect.Float64:
f, _ := strconv.ParseFloat(key, 64)
val = current.MapIndex(reflect.ValueOf(f))
namespace = namespace[endIdx+1:]
case reflect.Bool:
b, _ := strconv.ParseBool(key)
val = current.MapIndex(reflect.ValueOf(b))
namespace = namespace[endIdx+1:]
// reflect.Type = string
default:
val = current.MapIndex(reflect.ValueOf(key))
namespace = namespace[endIdx+1:]
}
goto BEGIN
}
// if got here there was more namespace, cannot go any deeper
panic("Invalid field namespace")
}
// asInt returns the parameter as a int64
// or panics if it can't convert
func asInt(param string) int64 {
i, err := strconv.ParseInt(param, 0, 64)
panicIf(err)
return i
}
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {
i, err := strconv.ParseUint(param, 0, 64)
panicIf(err)
return i
}
// asFloat returns the parameter as a float64
// or panics if it can't convert
func asFloat(param string) float64 {
i, err := strconv.ParseFloat(param, 64)
panicIf(err)
return i
}
// asBool returns the parameter as a bool
// or panics if it can't convert
func asBool(param string) bool {
i, err := strconv.ParseBool(param)
panicIf(err)
return i
}
func panicIf(err error) {
if err != nil {
panic(err.Error())
}
}

View File

@@ -1,477 +0,0 @@
package validator
import (
"context"
"fmt"
"reflect"
"strconv"
)
// per validate construct
type validate struct {
v *Validate
top reflect.Value
ns []byte
actualNs []byte
errs ValidationErrors
includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
ffn FilterFunc
slflParent reflect.Value // StructLevel & FieldLevel
slCurrent reflect.Value // StructLevel & FieldLevel
flField reflect.Value // StructLevel & FieldLevel
cf *cField // StructLevel & FieldLevel
ct *cTag // StructLevel & FieldLevel
misc []byte // misc reusable
str1 string // misc reusable
str2 string // misc reusable
fldIsPointer bool // StructLevel & FieldLevel
isPartial bool
hasExcludes bool
}
// parent and current will be the same the first run of validateStruct
func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
cs, ok := v.v.structCache.Get(typ)
if !ok {
cs = v.v.extractStructCache(current, typ.Name())
}
if len(ns) == 0 && len(cs.name) != 0 {
ns = append(ns, cs.name...)
ns = append(ns, '.')
structNs = append(structNs, cs.name...)
structNs = append(structNs, '.')
}
// ct is nil on top level struct, and structs as fields that have no tag info
// so if nil or if not nil and the structonly tag isn't present
if ct == nil || ct.typeof != typeStructOnly {
var f *cField
for i := 0; i < len(cs.fields); i++ {
f = cs.fields[i]
if v.isPartial {
if v.ffn != nil {
// used with StructFiltered
if v.ffn(append(structNs, f.name...)) {
continue
}
} else {
// used with StructPartial & StructExcept
_, ok = v.includeExclude[string(append(structNs, f.name...))]
if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
continue
}
}
}
v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags)
}
}
// check if any struct level validations, after all field validations already checked.
// first iteration will have no info about nostructlevel tag, and is checked prior to
// calling the next iteration of validateStruct called from traverseField.
if cs.fn != nil {
v.slflParent = parent
v.slCurrent = current
v.ns = ns
v.actualNs = structNs
cs.fn(ctx, v)
}
}
// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
var typ reflect.Type
var kind reflect.Kind
current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
switch kind {
case reflect.Ptr, reflect.Interface, reflect.Invalid:
if ct == nil {
return
}
if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
return
}
if ct.hasTag {
if kind == reflect.Invalid {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
param: ct.param,
kind: kind,
},
)
return
}
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
if !ct.runValidationWhenNil {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: current.Type(),
},
)
return
}
}
case reflect.Struct:
typ = current.Type()
if typ != timeType {
if ct != nil {
if ct.typeof == typeStructOnly {
goto CONTINUE
} else if ct.typeof == typeIsDefault {
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !ct.fn(ctx, v) {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
return
}
}
ct = ct.next
}
if ct != nil && ct.typeof == typeNoStructLevel {
return
}
CONTINUE:
// if len == 0 then validating using 'Var' or 'VarWithValue'
// Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
// VarWithField - this allows for validating against each field within the struct against a specific value
// pretty handy in certain situations
if len(cf.name) > 0 {
ns = append(append(ns, cf.altName...), '.')
structNs = append(append(structNs, cf.name...), '.')
}
v.validateStruct(ctx, current, current, typ, ns, structNs, ct)
return
}
}
if !ct.hasTag {
return
}
typ = current.Type()
OUTER:
for {
if ct == nil {
return
}
switch ct.typeof {
case typeOmitEmpty:
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !v.fldIsPointer && !hasValue(v) {
return
}
ct = ct.next
continue
case typeEndKeys:
return
case typeDive:
ct = ct.next
// traverse slice or map here
// or panic ;)
switch kind {
case reflect.Slice, reflect.Array:
var i64 int64
reusableCF := &cField{}
for i := 0; i < current.Len(); i++ {
i64 = int64(i)
v.misc = append(v.misc[0:0], cf.name...)
v.misc = append(v.misc, '[')
v.misc = strconv.AppendInt(v.misc, i64, 10)
v.misc = append(v.misc, ']')
reusableCF.name = string(v.misc)
if cf.namesEqual {
reusableCF.altName = reusableCF.name
} else {
v.misc = append(v.misc[0:0], cf.altName...)
v.misc = append(v.misc, '[')
v.misc = strconv.AppendInt(v.misc, i64, 10)
v.misc = append(v.misc, ']')
reusableCF.altName = string(v.misc)
}
v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
}
case reflect.Map:
var pv string
reusableCF := &cField{}
for _, key := range current.MapKeys() {
pv = fmt.Sprintf("%v", key.Interface())
v.misc = append(v.misc[0:0], cf.name...)
v.misc = append(v.misc, '[')
v.misc = append(v.misc, pv...)
v.misc = append(v.misc, ']')
reusableCF.name = string(v.misc)
if cf.namesEqual {
reusableCF.altName = reusableCF.name
} else {
v.misc = append(v.misc[0:0], cf.altName...)
v.misc = append(v.misc, '[')
v.misc = append(v.misc, pv...)
v.misc = append(v.misc, ']')
reusableCF.altName = string(v.misc)
}
if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
// can be nil when just keys being validated
if ct.next != nil {
v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
}
} else {
v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
}
}
default:
// throw error, if not a slice or map then should not have gotten here
// bad dive tag
panic("dive error! can't dive on a non slice or map")
}
return
case typeOr:
v.misc = v.misc[0:0]
for {
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if ct.fn(ctx, v) {
// drain rest of the 'or' values, then continue or leave
for {
ct = ct.next
if ct == nil {
return
}
if ct.typeof != typeOr {
continue OUTER
}
}
}
v.misc = append(v.misc, '|')
v.misc = append(v.misc, ct.tag...)
if ct.hasParam {
v.misc = append(v.misc, '=')
v.misc = append(v.misc, ct.param...)
}
if ct.isBlockEnd || ct.next == nil {
// if we get here, no valid 'or' value and no more tags
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
if ct.hasAlias {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.actualAliasTag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
} else {
tVal := string(v.misc)[1:]
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tVal,
actualTag: tVal,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
}
return
}
ct = ct.next
}
default:
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !ct.fn(ctx, v) {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
return
}
ct = ct.next
}
}
}

View File

@@ -1,615 +0,0 @@
package validator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
ut "github.com/go-playground/universal-translator"
)
const (
defaultTagName = "validate"
utf8HexComma = "0x2C"
utf8Pipe = "0x7C"
tagSeparator = ","
orSeparator = "|"
tagKeySeparator = "="
structOnlyTag = "structonly"
noStructLevelTag = "nostructlevel"
omitempty = "omitempty"
isdefault = "isdefault"
requiredWithoutAllTag = "required_without_all"
requiredWithoutTag = "required_without"
requiredWithTag = "required_with"
requiredWithAllTag = "required_with_all"
skipValidationTag = "-"
diveTag = "dive"
keysTag = "keys"
endKeysTag = "endkeys"
requiredTag = "required"
namespaceSeparator = "."
leftBracket = "["
rightBracket = "]"
restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
)
var (
timeType = reflect.TypeOf(time.Time{})
defaultCField = &cField{namesEqual: true}
)
// FilterFunc is the type used to filter fields using
// StructFiltered(...) function.
// returning true results in the field being filtered/skiped from
// validation
type FilterFunc func(ns []byte) bool
// CustomTypeFunc allows for overriding or adding custom field type handler functions
// field = field value of the type to return a value to be validated
// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
type CustomTypeFunc func(field reflect.Value) interface{}
// TagNameFunc allows for adding of a custom tag name parser
type TagNameFunc func(field reflect.StructField) string
type internalValidationFuncWrapper struct {
fn FuncCtx
runValidatinOnNil bool
}
// Validate contains the validator settings and cache
type Validate struct {
tagName string
pool *sync.Pool
hasCustomFuncs bool
hasTagNameFunc bool
tagNameFunc TagNameFunc
structLevelFuncs map[reflect.Type]StructLevelFuncCtx
customFuncs map[reflect.Type]CustomTypeFunc
aliases map[string]string
validations map[string]internalValidationFuncWrapper
transTagFunc map[ut.Translator]map[string]TranslationFunc // map[<locale>]map[<tag>]TranslationFunc
tagCache *tagCache
structCache *structCache
}
// New returns a new instance of 'validate' with sane defaults.
func New() *Validate {
tc := new(tagCache)
tc.m.Store(make(map[string]*cTag))
sc := new(structCache)
sc.m.Store(make(map[reflect.Type]*cStruct))
v := &Validate{
tagName: defaultTagName,
aliases: make(map[string]string, len(bakedInAliases)),
validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)),
tagCache: tc,
structCache: sc,
}
// must copy alias validators for separate validations to be used in each validator instance
for k, val := range bakedInAliases {
v.RegisterAlias(k, val)
}
// must copy validators for separate validations to be used in each instance
for k, val := range bakedInValidators {
switch k {
// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
case requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag:
_ = v.registerValidation(k, wrapFunc(val), true, true)
default:
// no need to error check here, baked in will always be valid
_ = v.registerValidation(k, wrapFunc(val), true, false)
}
}
v.pool = &sync.Pool{
New: func() interface{} {
return &validate{
v: v,
ns: make([]byte, 0, 64),
actualNs: make([]byte, 0, 64),
misc: make([]byte, 32),
}
},
}
return v
}
// SetTagName allows for changing of the default tag name of 'validate'
func (v *Validate) SetTagName(name string) {
v.tagName = name
}
// RegisterTagNameFunc registers a function to get alternate names for StructFields.
//
// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
//
// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
// if name == "-" {
// return ""
// }
// return name
// })
func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
v.tagNameFunc = fn
v.hasTagNameFunc = true
}
// RegisterValidation adds a validation with the given tag
//
// NOTES:
// - if the key already exists, the previous validation function will be replaced.
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error {
return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...)
}
// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
// allowing context.Context validation support.
func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error {
var nilCheckable bool
if len(callValidationEvenIfNull) > 0 {
nilCheckable = callValidationEvenIfNull[0]
}
return v.registerValidation(tag, fn, false, nilCheckable)
}
func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
if len(tag) == 0 {
return errors.New("Function Key cannot be empty")
}
if fn == nil {
return errors.New("Function cannot be empty")
}
_, ok := restrictedTags[tag]
if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
panic(fmt.Sprintf(restrictedTagErr, tag))
}
v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
return nil
}
// RegisterAlias registers a mapping of a single validation tag that
// defines a common or complex set of validation(s) to simplify adding validation
// to structs.
//
// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterAlias(alias, tags string) {
_, ok := restrictedTags[alias]
if ok || strings.ContainsAny(alias, restrictedTagChars) {
panic(fmt.Sprintf(restrictedAliasErr, alias))
}
v.aliases[alias] = tags
}
// RegisterStructValidation registers a StructLevelFunc against a number of types.
//
// NOTE:
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
}
// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
// of contextual validation information via context.Context.
//
// NOTE:
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
if v.structLevelFuncs == nil {
v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
}
for _, t := range types {
tv := reflect.ValueOf(t)
if tv.Kind() == reflect.Ptr {
t = reflect.Indirect(tv).Interface()
}
v.structLevelFuncs[reflect.TypeOf(t)] = fn
}
}
// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
//
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
if v.customFuncs == nil {
v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
}
for _, t := range types {
v.customFuncs[reflect.TypeOf(t)] = fn
}
v.hasCustomFuncs = true
}
// RegisterTranslation registers translations against the provided tag.
func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
if v.transTagFunc == nil {
v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
}
if err = registerFn(trans); err != nil {
return
}
m, ok := v.transTagFunc[trans]
if !ok {
m = make(map[string]TranslationFunc)
v.transTagFunc[trans] = m
}
m[tag] = translationFn
return
}
// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) Struct(s interface{}) error {
return v.StructCtx(context.Background(), s)
}
// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
// and also allows passing of context.Context for contextual validation information.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type() == timeType {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = false
// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
// nested structs, unless otherwise specified.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
return v.StructFilteredCtx(context.Background(), s, fn)
}
// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
// nested structs, unless otherwise specified and also allows passing of contextual validation information via
// context.Context
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type() == timeType {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = fn
// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructPartial validates the fields passed in only, ignoring all others.
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructPartial(s interface{}, fields ...string) error {
return v.StructPartialCtx(context.Background(), s, fields...)
}
// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
// validation validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type() == timeType {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = nil
vd.hasExcludes = false
vd.includeExclude = make(map[string]struct{})
typ := val.Type()
name := typ.Name()
for _, k := range fields {
flds := strings.Split(k, namespaceSeparator)
if len(flds) > 0 {
vd.misc = append(vd.misc[0:0], name...)
vd.misc = append(vd.misc, '.')
for _, s := range flds {
idx := strings.Index(s, leftBracket)
if idx != -1 {
for idx != -1 {
vd.misc = append(vd.misc, s[:idx]...)
vd.includeExclude[string(vd.misc)] = struct{}{}
idx2 := strings.Index(s, rightBracket)
idx2++
vd.misc = append(vd.misc, s[idx:idx2]...)
vd.includeExclude[string(vd.misc)] = struct{}{}
s = s[idx2:]
idx = strings.Index(s, leftBracket)
}
} else {
vd.misc = append(vd.misc, s...)
vd.includeExclude[string(vd.misc)] = struct{}{}
}
vd.misc = append(vd.misc, '.')
}
}
}
vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructExcept validates all fields except the ones passed in.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructExcept(s interface{}, fields ...string) error {
return v.StructExceptCtx(context.Background(), s, fields...)
}
// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
// validation validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type() == timeType {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = nil
vd.hasExcludes = true
vd.includeExclude = make(map[string]struct{})
typ := val.Type()
name := typ.Name()
for _, key := range fields {
vd.misc = vd.misc[0:0]
if len(name) > 0 {
vd.misc = append(vd.misc, name...)
vd.misc = append(vd.misc, '.')
}
vd.misc = append(vd.misc, key...)
vd.includeExclude[string(vd.misc)] = struct{}{}
}
vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// Var validates a single variable using tag style validation.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) Var(field interface{}, tag string) error {
return v.VarCtx(context.Background(), field, tag)
}
// VarCtx validates a single variable using tag style validation and allows passing of contextual
// validation validation information via context.Context.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
if len(tag) == 0 || tag == skipValidationTag {
return nil
}
ctag := v.fetchCacheTag(tag)
val := reflect.ValueOf(field)
vd := v.pool.Get().(*validate)
vd.top = val
vd.isPartial = false
vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// VarWithValue validates a single variable, against another variable/field's value using tag style validation
// eg.
// s1 := "abcd"
// s2 := "abcd"
// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
return v.VarWithValueCtx(context.Background(), field, other, tag)
}
// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
// allows passing of contextual validation validation information via context.Context.
// eg.
// s1 := "abcd"
// s2 := "abcd"
// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
if len(tag) == 0 || tag == skipValidationTag {
return nil
}
ctag := v.fetchCacheTag(tag)
otherVal := reflect.ValueOf(other)
vd := v.pool.Get().(*validate)
vd.top = otherVal
vd.isPartial = false
vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}

View File

@@ -1,9 +0,0 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

View File

@@ -1,10 +0,0 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

View File

@@ -1,9 +0,0 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

View File

@@ -1,27 +0,0 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,19 +0,0 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored
View File

@@ -1,80 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored
View File

@@ -1,12 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

View File

@@ -1 +0,0 @@
module github.com/google/uuid

View File

@@ -1,53 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

View File

@@ -1,37 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

View File

@@ -1,90 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

View File

@@ -1,12 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

View File

@@ -1,33 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

59
vendor/github.com/google/uuid/sql.go generated vendored
View File

@@ -1,59 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

123
vendor/github.com/google/uuid/time.go generated vendored
View File

@@ -1,123 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

Some files were not shown because too many files have changed in this diff Show More